language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
cherrypy__cherrypy
|
cherrypy/process/wspbus.py
|
{
"start": 5241,
"end": 21676
}
|
class ____(object):
"""Process state-machine and messenger for HTTP site deployment.
All listeners for a given channel are guaranteed to be called even
if others at the same channel fail. Each failure is logged, but
execution proceeds on to the next listener. The only way to stop all
processing from inside a listener is to raise SystemExit and stop
the whole server.
"""
states = states
state = states.STOPPED
execv = False
max_cloexec_files = max_files
def __init__(self):
"""Initialize pub/sub bus."""
self.execv = False
self.state = states.STOPPED
channels = 'start', 'stop', 'exit', 'graceful', 'log', 'main'
self.listeners = dict((channel, set()) for channel in channels)
self._priorities = {}
def subscribe(self, channel, callback=None, priority=None):
"""Add the given callback at the given channel (if not present).
If callback is None, return a partial suitable for decorating
the callback.
"""
if callback is None:
return functools.partial(
self.subscribe,
channel,
priority=priority,
)
ch_listeners = self.listeners.setdefault(channel, set())
ch_listeners.add(callback)
if priority is None:
priority = getattr(callback, 'priority', 50)
self._priorities[(channel, callback)] = priority
def unsubscribe(self, channel, callback):
"""Discard the given callback (if present)."""
listeners = self.listeners.get(channel)
if listeners and callback in listeners:
listeners.discard(callback)
del self._priorities[(channel, callback)]
def publish(self, channel, *args, **kwargs):
"""Return output of all subscribers for the given channel."""
if channel not in self.listeners:
return []
exc = ChannelFailures()
output = []
raw_items = (
(self._priorities[(channel, listener)], listener)
for listener in self.listeners[channel]
)
items = sorted(raw_items, key=operator.itemgetter(0))
for priority, listener in items:
try:
output.append(listener(*args, **kwargs))
except KeyboardInterrupt:
raise
except SystemExit:
e = sys.exc_info()[1]
# If we have previous errors ensure the exit code is non-zero
if exc and e.code == 0:
e.code = 1
raise
except Exception:
exc.handle_exception()
if channel == 'log':
# Assume any further messages to 'log' will fail.
pass
else:
self.log(
'Error in %r listener %r' % (channel, listener),
level=40,
traceback=True,
)
if exc:
raise exc
return output
def _clean_exit(self):
"""Assert that the Bus is not running in atexit handler callback."""
if self.state != states.EXITING:
warnings.warn(
'The main thread is exiting, but the Bus is in the %r state; '
'shutting it down automatically now. You must either call '
'bus.block() after start(), or call bus.exit() before the '
'main thread exits.' % self.state,
RuntimeWarning,
)
self.exit()
def start(self):
"""Start all services."""
atexit.register(self._clean_exit)
self.state = states.STARTING
self.log('Bus STARTING')
try:
self.publish('start')
self.state = states.STARTED
self.log('Bus STARTED')
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.log(
'Shutting down due to error in start listener:',
level=40,
traceback=True,
)
e_info = sys.exc_info()[1]
try:
self.exit()
except Exception:
# Any stop/exit errors will be logged inside publish().
pass
# Re-raise the original error
raise e_info
def exit(self):
"""Stop all services and prepare to exit the process."""
exitstate = self.state
EX_SOFTWARE = 70
try:
self.stop()
self.state = states.EXITING
self.log('Bus EXITING')
self.publish('exit')
# This isn't strictly necessary, but it's better than seeing
# "Waiting for child threads to terminate..." and then nothing.
self.log('Bus EXITED')
except Exception:
# This method is often called asynchronously (whether thread,
# signal handler, console handler, or atexit handler), so we
# can't just let exceptions propagate out unhandled.
# Assume it's been logged and just die.
os._exit(EX_SOFTWARE)
if exitstate == states.STARTING:
# exit() was called before start() finished, possibly due to
# Ctrl-C because a start listener got stuck. In this case,
# we could get stuck in a loop where Ctrl-C never exits the
# process, so we just call os.exit here.
os._exit(EX_SOFTWARE)
def restart(self):
"""Restart the process (may close connections).
This method does not restart the process from the calling
thread; instead, it stops the bus and asks the main thread to
call execv.
"""
self.execv = True
self.exit()
def graceful(self):
"""Advise all services to reload."""
self.log('Bus graceful')
self.publish('graceful')
def block(self, interval=0.1):
"""Wait for the EXITING state, KeyboardInterrupt or SystemExit.
This function is intended to be called only by the main thread.
After waiting for the EXITING state, it also waits for all
threads to terminate, and then calls os.execv if self.execv is
True. This design allows another thread to call bus.restart, yet
have the main thread perform the actual execv call (required on
some platforms).
"""
try:
self.wait(states.EXITING, interval=interval, channel='main')
except (KeyboardInterrupt, IOError):
# The time.sleep call might raise
# "IOError: [Errno 4] Interrupted function call" on KBInt.
self.log('Keyboard Interrupt: shutting down bus')
self.exit()
except SystemExit:
self.log('SystemExit raised: shutting down bus')
self.exit()
raise
# Waiting for ALL child threads to finish is necessary on OS X.
# See https://github.com/cherrypy/cherrypy/issues/581.
# It's also good to let them all shut down before allowing
# the main thread to call atexit handlers.
# See https://github.com/cherrypy/cherrypy/issues/751.
self.log('Waiting for child threads to terminate...')
for t in threading.enumerate():
# Validate the we're not trying to join the MainThread
# that will cause a deadlock and the case exist when
# implemented as a windows service and in any other case
# that another thread executes cherrypy.engine.exit()
if (
t != threading.current_thread()
and not isinstance(t, threading._MainThread)
and
# Note that any dummy (external) threads are
# always daemonic.
not t.daemon
):
self.log('Waiting for thread %s.' % t.name)
t.join()
if self.execv:
self._do_execv()
def wait(self, state, interval=0.1, channel=None):
"""Poll for the given state(s) at intervals; publish to channel."""
states = set(always_iterable(state))
while self.state not in states:
time.sleep(interval)
self.publish(channel)
def _do_execv(self):
"""Re-execute the current process.
This must be called from the main thread, because certain
platforms (OS X) don't allow execv to be called in a child
thread very well.
"""
try:
args = self._get_true_argv()
except NotImplementedError:
"""It's probably win32 or GAE."""
args = [sys.executable] + self._get_interpreter_argv() + sys.argv
self.log('Re-spawning %s' % ' '.join(args))
self._extend_pythonpath(os.environ)
if sys.platform[:4] == 'java':
from _systemrestart import SystemRestart
raise SystemRestart
else:
if sys.platform == 'win32':
args = ['"%s"' % arg for arg in args]
os.chdir(_startup_cwd)
if self.max_cloexec_files:
self._set_cloexec()
os.execv(sys.executable, args)
@staticmethod
def _get_interpreter_argv():
"""Retrieve current Python interpreter's arguments.
Returns empty tuple in case of frozen mode, uses built-in arguments
reproduction function otherwise.
Frozen mode is possible for the app has been packaged into a binary
executable using py2exe. In this case the interpreter's arguments are
already built-in into that executable.
:seealso: https://github.com/cherrypy/cherrypy/issues/1526
Ref: https://pythonhosted.org/PyInstaller/runtime-information.html
"""
return (
[]
if getattr(sys, 'frozen', False)
else subprocess._args_from_interpreter_flags()
)
@staticmethod
def _get_true_argv():
"""Retrieve all real arguments of the python interpreter.
...even those not listed in ``sys.argv``
:seealso: http://stackoverflow.com/a/28338254/595220
:seealso: http://stackoverflow.com/a/6683222/595220
:seealso: http://stackoverflow.com/a/28414807/595220
"""
try:
char_p = ctypes.c_wchar_p
argv = ctypes.POINTER(char_p)()
argc = ctypes.c_int()
ctypes.pythonapi.Py_GetArgcArgv(
ctypes.byref(argc),
ctypes.byref(argv),
)
_argv = argv[: argc.value]
# The code below is trying to correctly handle special cases.
# `-c`'s argument interpreted by Python itself becomes `-c` as
# well. Same applies to `-m`. This snippet is trying to survive
# at least the case with `-m`
# Ref: https://github.com/cherrypy/cherrypy/issues/1545
# Ref: python/cpython@418baf9
argv_len, is_command, is_module = len(_argv), False, False
try:
m_ind = _argv.index('-m')
if m_ind < argv_len - 1 and _argv[m_ind + 1] in ('-c', '-m'):
"""
In some older Python versions `-m`'s argument may be
substituted with `-c`, not `-m`
"""
is_module = True
except (IndexError, ValueError):
m_ind = None
try:
c_ind = _argv.index('-c')
if c_ind < argv_len - 1 and _argv[c_ind + 1] == '-c':
is_command = True
except (IndexError, ValueError):
c_ind = None
if is_module:
"""It's containing `-m -m` sequence of arguments."""
if is_command and c_ind < m_ind:
"""There's `-c -c` before `-m`"""
raise RuntimeError(
"Cannot reconstruct command from '-c'. Ref: "
'https://github.com/cherrypy/cherrypy/issues/1545',
)
# Survive module argument here
original_module = sys.argv[0]
if not os.access(original_module, os.R_OK):
"""There's no such module exist."""
raise AttributeError(
"{} doesn't seem to be a module "
'accessible by current user'.format(original_module),
)
del _argv[m_ind : m_ind + 2] # remove `-m -m`
# ... and substitute it with the original module path:
_argv.insert(m_ind, original_module)
elif is_command:
"""It's containing just `-c -c` sequence of arguments."""
raise RuntimeError(
"Cannot reconstruct command from '-c'. "
'Ref: https://github.com/cherrypy/cherrypy/issues/1545',
)
except AttributeError:
"""It looks Py_GetArgcArgv's completely absent in some environments
It is known, that there's no Py_GetArgcArgv in MS Windows and
``ctypes`` module is completely absent in Google AppEngine
:seealso: https://github.com/cherrypy/cherrypy/issues/1506
:seealso: https://github.com/cherrypy/cherrypy/issues/1512
:ref: http://bit.ly/2gK6bXK
"""
raise NotImplementedError
else:
return _argv
@staticmethod
def _extend_pythonpath(env):
"""Prepend current working dir to PATH environment variable if needed.
If sys.path[0] is an empty string, the interpreter was likely
invoked with -m and the effective path is about to change on re-
exec. Add the current directory to $PYTHONPATH to ensure that
the new process sees the same path.
This issue cannot be addressed in the general case because
Python cannot reliably reconstruct the original command line (
http://bugs.python.org/issue14208).
(This idea filched from tornado.autoreload)
"""
path_prefix = '.' + os.pathsep
existing_path = env.get('PYTHONPATH', '')
needs_patch = sys.path[0] == '' and not existing_path.startswith(
path_prefix,
)
if needs_patch:
env['PYTHONPATH'] = path_prefix + existing_path
def _set_cloexec(self):
"""Set the CLOEXEC flag on all open files (except stdin/out/err).
If self.max_cloexec_files is an integer (the default), then on
platforms which support it, it represents the max open files
setting for the operating system. This function will be called
just before the process is restarted via os.execv() to prevent
open files from persisting into the new process.
Set self.max_cloexec_files to 0 to disable this behavior.
"""
for fd in range(3, self.max_cloexec_files): # skip stdin/out/err
try:
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
except IOError:
continue
fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
def stop(self):
"""Stop all services."""
self.state = states.STOPPING
self.log('Bus STOPPING')
self.publish('stop')
self.state = states.STOPPED
self.log('Bus STOPPED')
def start_with_callback(self, func, args=None, kwargs=None):
"""Start 'func' in a new thread T, then start self (and return T)."""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
args = (func,) + args
def _callback(func, *a, **kw):
self.wait(states.STARTED)
func(*a, **kw)
t = threading.Thread(target=_callback, args=args, kwargs=kwargs)
t.name = 'Bus Callback ' + t.name
t.start()
self.start()
return t
def log(self, msg='', level=20, traceback=False):
"""Log the given message.
Append the last traceback if requested.
"""
if traceback:
msg += '\n' + ''.join(_traceback.format_exception(*sys.exc_info()))
self.publish('log', msg, level)
bus = Bus()
|
Bus
|
python
|
python-poetry__poetry
|
src/poetry/installation/chef.py
|
{
"start": 587,
"end": 3665
}
|
class ____:
def __init__(
self, artifact_cache: ArtifactCache, env: Env, pool: RepositoryPool
) -> None:
self._env = env
self._pool = pool
self._artifact_cache = artifact_cache
def prepare(
self,
archive: Path,
output_dir: Path | None = None,
*,
editable: bool = False,
config_settings: Mapping[str, str | Sequence[str]] | None = None,
) -> Path:
if not self._should_prepare(archive):
return archive
if archive.is_dir():
destination = output_dir or Path(tempfile.mkdtemp(prefix="poetry-chef-"))
return self._prepare(
archive,
destination=destination,
editable=editable,
config_settings=config_settings,
)
return self._prepare_sdist(
archive, destination=output_dir, config_settings=config_settings
)
def _prepare(
self,
directory: Path,
destination: Path,
*,
editable: bool = False,
config_settings: Mapping[str, str | Sequence[str]] | None = None,
) -> Path:
distribution: DistributionType = "editable" if editable else "wheel"
with isolated_builder(
source=directory,
distribution=distribution,
python_executable=self._env.python,
pool=self._pool,
) as builder:
return Path(
builder.build(
distribution,
destination.as_posix(),
config_settings=config_settings,
)
)
def _prepare_sdist(
self,
archive: Path,
destination: Path | None = None,
config_settings: Mapping[str, str | Sequence[str]] | None = None,
) -> Path:
from poetry.core.packages.utils.link import Link
suffix = archive.suffix
zip = suffix == ".zip"
with temporary_directory() as tmp_dir:
archive_dir = Path(tmp_dir)
extractall(source=archive, dest=archive_dir, zip=zip)
elements = list(archive_dir.glob("*"))
if len(elements) == 1 and elements[0].is_dir():
sdist_dir = elements[0]
else:
sdist_dir = archive_dir / archive.name.rstrip(suffix)
if not sdist_dir.is_dir():
sdist_dir = archive_dir
if destination is None:
destination = self._artifact_cache.get_cache_directory_for_link(
Link(archive.as_uri())
)
destination.mkdir(parents=True, exist_ok=True)
return self._prepare(
sdist_dir,
destination,
config_settings=config_settings,
)
def _should_prepare(self, archive: Path) -> bool:
return archive.is_dir() or not self._is_wheel(archive)
@classmethod
def _is_wheel(cls, archive: Path) -> bool:
return archive.suffix == ".whl"
|
Chef
|
python
|
kubernetes-client__python
|
kubernetes/client/exceptions.py
|
{
"start": 2099,
"end": 2623
}
|
class ____(OpenApiException, KeyError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (None/list) the path to the exception in the
received_data dict
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiKeyError, self).__init__(full_msg)
|
ApiKeyError
|
python
|
pandas-dev__pandas
|
pandas/core/indexes/multi.py
|
{
"start": 4886,
"end": 156898
}
|
class ____(Index):
"""
A multi-level, or hierarchical, index object for pandas objects.
Parameters
----------
levels : sequence of arrays
The unique labels for each level.
codes : sequence of arrays
Integers for each level designating which label at each location.
sortorder : optional int
Level of sortedness (must be lexicographically sorted by that
level).
names : optional sequence of objects
Names for each of the index levels. (name is accepted for compat).
copy : bool, default False
Copy the meta-data.
name : Label
Kept for compatibility with 1-dimensional Index. Should not be used.
verify_integrity : bool, default True
Check that the levels/codes are consistent and valid.
Attributes
----------
names
levels
codes
nlevels
levshape
dtypes
Methods
-------
from_arrays
from_tuples
from_product
from_frame
set_levels
set_codes
to_frame
to_flat_index
sortlevel
droplevel
swaplevel
reorder_levels
remove_unused_levels
get_level_values
get_indexer
get_loc
get_locs
get_loc_level
drop
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_product : Create a MultiIndex from the cartesian product
of iterables.
MultiIndex.from_tuples : Convert list of tuples to a MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Index : The base pandas Index type.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html>`__
for more.
Examples
--------
A new ``MultiIndex`` is typically constructed using one of the helper
methods :meth:`MultiIndex.from_arrays`, :meth:`MultiIndex.from_product`
and :meth:`MultiIndex.from_tuples`. For example (using ``.from_arrays``):
>>> arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
>>> pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
See further examples for how to construct a MultiIndex in the doc strings
of the mentioned helper methods.
"""
_hidden_attrs = Index._hidden_attrs | frozenset()
# initialize to zero-length tuples to make everything work
_typ = "multiindex"
_names: list[Hashable | None] = []
_levels = FrozenList()
_codes = FrozenList()
_comparables = ["names"]
sortorder: int | None
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
levels=None,
codes=None,
sortorder=None,
names=None,
copy: bool = False,
name=None,
verify_integrity: bool = True,
) -> Self:
# compat with Index
if name is not None:
names = name
if levels is None or codes is None:
raise TypeError("Must pass both levels and codes")
if len(levels) != len(codes):
raise ValueError("Length of levels and codes must be the same.")
if len(levels) == 0:
raise ValueError("Must pass non-zero number of levels/codes")
result = object.__new__(cls)
result._cache = {}
# we've already validated levels and codes, so shortcut here
result._set_levels(levels, copy=copy, validate=False)
result._set_codes(codes, copy=copy, validate=False)
result._names = [None] * len(levels)
if names is not None:
# handles name validation
result._set_names(names)
if sortorder is not None:
result.sortorder = int(sortorder)
else:
result.sortorder = sortorder
if verify_integrity:
new_codes = result._verify_integrity()
result._codes = new_codes
result._reset_identity()
result._references = None
return result
def _validate_codes(self, level: Index, code: np.ndarray) -> np.ndarray:
"""
Reassign code values as -1 if their corresponding levels are NaN.
Parameters
----------
code : Index
Code to reassign.
level : np.ndarray
Level to check for missing values (NaN, NaT, None).
Returns
-------
new code where code value = -1 if it corresponds
to a level with missing values (NaN, NaT, None).
"""
null_mask = isna(level)
if np.any(null_mask):
code = np.where(null_mask[code], -1, code)
return code
def _verify_integrity(
self,
codes: list | None = None,
levels: list | None = None,
levels_to_verify: list[int] | range | None = None,
) -> FrozenList:
"""
Parameters
----------
codes : optional list
Codes to check for validity. Defaults to current codes.
levels : optional list
Levels to check for validity. Defaults to current levels.
levels_to_validate: optional list
Specifies the levels to verify.
Raises
------
ValueError
If length of levels and codes don't match, if the codes for any
level would exceed level bounds, or there are any duplicate levels.
Returns
-------
new codes where code value = -1 if it corresponds to a
NaN level.
"""
# NOTE: Currently does not check, among other things, that cached
# nlevels matches nor that sortorder matches actually sortorder.
codes = codes or self.codes
levels = levels or self.levels
if levels_to_verify is None:
levels_to_verify = range(len(levels))
if len(levels) != len(codes):
raise ValueError(
"Length of levels and codes must match. NOTE: "
"this index is in an inconsistent state."
)
codes_length = len(codes[0])
for i in levels_to_verify:
level = levels[i]
level_codes = codes[i]
if len(level_codes) != codes_length:
raise ValueError(
f"Unequal code lengths: {[len(code_) for code_ in codes]}"
)
if len(level_codes) and level_codes.max() >= len(level):
raise ValueError(
f"On level {i}, code max ({level_codes.max()}) >= length of "
f"level ({len(level)}). NOTE: this index is in an "
"inconsistent state"
)
if len(level_codes) and level_codes.min() < -1:
raise ValueError(f"On level {i}, code value ({level_codes.min()}) < -1")
if not level.is_unique:
raise ValueError(
f"Level values must be unique: {list(level)} on level {i}"
)
if self.sortorder is not None:
if self.sortorder > _lexsort_depth(self.codes, self.nlevels):
raise ValueError(
"Value for sortorder must be inferior or equal to actual "
f"lexsort_depth: sortorder {self.sortorder} "
f"with lexsort_depth {_lexsort_depth(self.codes, self.nlevels)}"
)
result_codes = []
for i in range(len(levels)):
if i in levels_to_verify:
result_codes.append(self._validate_codes(levels[i], codes[i]))
else:
result_codes.append(codes[i])
new_codes = FrozenList(result_codes)
return new_codes
@classmethod
def from_arrays(
cls,
arrays,
sortorder: int | None = None,
names: Sequence[Hashable] | Hashable | lib.NoDefault = lib.no_default,
) -> MultiIndex:
"""
Convert arrays to MultiIndex.
Parameters
----------
arrays : list / sequence of array-likes
Each array-like gives one level's value for each data point.
len(arrays) is the number of levels.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
>>> pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
"""
error_msg = "Input must be a list / sequence of array-likes."
if not is_list_like(arrays):
raise TypeError(error_msg)
if is_iterator(arrays):
arrays = list(arrays)
# Check if elements of array are list-like
for array in arrays:
if not is_list_like(array):
raise TypeError(error_msg)
# Check if lengths of all arrays are equal or not,
# raise ValueError, if not
for i in range(1, len(arrays)):
if len(arrays[i]) != len(arrays[i - 1]):
raise ValueError("all arrays must be same length")
codes, levels = factorize_from_iterables(arrays)
if names is lib.no_default:
names = [getattr(arr, "name", None) for arr in arrays]
return cls(
levels=levels,
codes=codes,
sortorder=sortorder,
names=names,
verify_integrity=False,
)
@classmethod
@names_compat
def from_tuples(
cls,
tuples: Iterable[tuple[Hashable, ...]],
sortorder: int | None = None,
names: Sequence[Hashable] | Hashable | None = None,
) -> MultiIndex:
"""
Convert list of tuples to MultiIndex.
Parameters
----------
tuples : list / sequence of tuple-likes
Each tuple is the index of one row/column.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> tuples = [(1, "red"), (1, "blue"), (2, "red"), (2, "blue")]
>>> pd.MultiIndex.from_tuples(tuples, names=("number", "color"))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
"""
if not is_list_like(tuples):
raise TypeError("Input must be a list / sequence of tuple-likes.")
if is_iterator(tuples):
tuples = list(tuples)
tuples = cast(Collection[tuple[Hashable, ...]], tuples)
# handling the empty tuple cases
if len(tuples) and all(isinstance(e, tuple) and not e for e in tuples):
codes = [np.zeros(len(tuples))]
levels = [Index(com.asarray_tuplesafe(tuples, dtype=np.dtype("object")))]
return cls(
levels=levels,
codes=codes,
sortorder=sortorder,
names=names,
verify_integrity=False,
)
arrays: list[Sequence[Hashable]]
if len(tuples) == 0:
if names is None:
raise TypeError("Cannot infer number of levels from empty list")
# error: Argument 1 to "len" has incompatible type "Hashable";
# expected "Sized"
arrays = [[]] * len(names) # type: ignore[arg-type]
elif isinstance(tuples, (np.ndarray, Index)):
if isinstance(tuples, Index):
tuples = np.asarray(tuples._values)
arrays = list(lib.tuples_to_object_array(tuples).T)
elif isinstance(tuples, list):
arrays = list(lib.to_object_array_tuples(tuples).T)
else:
arrs = zip_longest(*tuples, fillvalue=np.nan)
arrays = cast(list[Sequence[Hashable]], arrs)
return cls.from_arrays(arrays, sortorder=sortorder, names=names)
@classmethod
def from_product(
cls,
iterables: Sequence[Iterable[Hashable]],
sortorder: int | None = None,
names: Sequence[Hashable] | Hashable | lib.NoDefault = lib.no_default,
) -> MultiIndex:
"""
Make a MultiIndex from the cartesian product of multiple iterables.
Parameters
----------
iterables : list / sequence of iterables
Each iterable has unique labels for each level of the index.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
If not explicitly provided, names will be inferred from the
elements of iterables if an element has a name attribute.
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> numbers = [0, 1, 2]
>>> colors = ["green", "purple"]
>>> pd.MultiIndex.from_product([numbers, colors], names=["number", "color"])
MultiIndex([(0, 'green'),
(0, 'purple'),
(1, 'green'),
(1, 'purple'),
(2, 'green'),
(2, 'purple')],
names=['number', 'color'])
"""
if not is_list_like(iterables):
raise TypeError("Input must be a list / sequence of iterables.")
if is_iterator(iterables):
iterables = list(iterables)
codes, levels = factorize_from_iterables(iterables)
if names is lib.no_default:
names = [getattr(it, "name", None) for it in iterables]
# codes are all ndarrays, so cartesian_product is lossless
codes = cartesian_product(codes)
return cls(levels, codes, sortorder=sortorder, names=names)
@classmethod
def from_frame(
cls,
df: DataFrame,
sortorder: int | None = None,
names: Sequence[Hashable] | Hashable | None = None,
) -> MultiIndex:
"""
Make a MultiIndex from a DataFrame.
Parameters
----------
df : DataFrame
DataFrame to be converted to MultiIndex.
sortorder : int, optional
Level of sortedness (must be lexicographically sorted by that
level).
names : list-like, optional
If no names are provided, use the column names, or tuple of column
names if the columns is a MultiIndex. If a sequence, overwrite
names with the given sequence.
Returns
-------
MultiIndex
The MultiIndex representation of the given DataFrame.
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
Examples
--------
>>> df = pd.DataFrame(
... [["HI", "Temp"], ["HI", "Precip"], ["NJ", "Temp"], ["NJ", "Precip"]],
... columns=["a", "b"],
... )
>>> df
a b
0 HI Temp
1 HI Precip
2 NJ Temp
3 NJ Precip
>>> pd.MultiIndex.from_frame(df)
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['a', 'b'])
Using explicit names, instead of the column names
>>> pd.MultiIndex.from_frame(df, names=["state", "observation"])
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['state', 'observation'])
"""
if not isinstance(df, ABCDataFrame):
raise TypeError("Input must be a DataFrame")
column_names, columns = zip(*df.items(), strict=True)
names = column_names if names is None else names
return cls.from_arrays(columns, sortorder=sortorder, names=names)
# --------------------------------------------------------------------
@cache_readonly
def _values(self) -> np.ndarray:
# We override here, since our parent uses _data, which we don't use.
values = []
for i in range(self.nlevels):
index = self.levels[i]
codes = self.codes[i]
vals = index
if isinstance(vals.dtype, CategoricalDtype):
vals = cast("CategoricalIndex", vals)
vals = vals._data._internal_get_values()
if isinstance(vals.dtype, ExtensionDtype) or lib.is_np_dtype(
vals.dtype, "mM"
):
vals = vals.astype(object)
array_vals = np.asarray(vals)
array_vals = algos.take_nd(array_vals, codes, fill_value=index._na_value)
values.append(array_vals)
arr = lib.fast_zip(values)
return arr
@property
def values(self) -> np.ndarray:
return self._values
@property
def array(self):
"""
Raises a ValueError for `MultiIndex` because there's no single
array backing a MultiIndex.
Raises
------
ValueError
"""
raise ValueError(
"MultiIndex has no single backing array. Use "
"'MultiIndex.to_numpy()' to get a NumPy array of tuples."
)
@cache_readonly
def dtypes(self) -> Series:
"""
Return the dtypes as a Series for the underlying MultiIndex.
See Also
--------
Index.dtype : Return the dtype object of the underlying data.
Series.dtypes : Return the data type of the underlying Series.
Examples
--------
>>> idx = pd.MultiIndex.from_product(
... [(0, 1, 2), ("green", "purple")], names=["number", "color"]
... )
>>> idx
MultiIndex([(0, 'green'),
(0, 'purple'),
(1, 'green'),
(1, 'purple'),
(2, 'green'),
(2, 'purple')],
names=['number', 'color'])
>>> idx.dtypes
number int64
color object
dtype: object
"""
from pandas import Series
names = com.fill_missing_names(self.names)
return Series([level.dtype for level in self.levels], index=Index(names))
def __len__(self) -> int:
return len(self.codes[0])
@property
def size(self) -> int:
"""
Return the number of elements in the underlying data.
"""
# override Index.size to avoid materializing _values
return len(self)
# --------------------------------------------------------------------
# Levels Methods
@cache_readonly
def levels(self) -> FrozenList:
"""
Levels of the MultiIndex.
Levels refer to the different hierarchical levels or layers in a MultiIndex.
In a MultiIndex, each level represents a distinct dimension or category of
the index.
To access the levels, you can use the levels attribute of the MultiIndex,
which returns a tuple of Index objects. Each Index object represents a
level in the MultiIndex and contains the unique values found in that
specific level.
If a MultiIndex is created with levels A, B, C, and the DataFrame using
it filters out all rows of the level C, MultiIndex.levels will still
return A, B, C.
See Also
--------
MultiIndex.codes : The codes of the levels in the MultiIndex.
MultiIndex.get_level_values : Return vector of label values for requested
level.
Examples
--------
>>> index = pd.MultiIndex.from_product(
... [["mammal"], ("goat", "human", "cat", "dog")],
... names=["Category", "Animals"],
... )
>>> leg_num = pd.DataFrame(data=(4, 2, 4, 4), index=index, columns=["Legs"])
>>> leg_num
Legs
Category Animals
mammal goat 4
human 2
cat 4
dog 4
>>> leg_num.index.levels
FrozenList([['mammal'], ['cat', 'dog', 'goat', 'human']])
MultiIndex levels will not change even if the DataFrame using the MultiIndex
does not contain all them anymore.
See how "human" is not in the DataFrame, but it is still in levels:
>>> large_leg_num = leg_num[leg_num.Legs > 2]
>>> large_leg_num
Legs
Category Animals
mammal goat 4
cat 4
dog 4
>>> large_leg_num.index.levels
FrozenList([['mammal'], ['cat', 'dog', 'goat', 'human']])
"""
# Use cache_readonly to ensure that self.get_locs doesn't repeatedly
# create new IndexEngine
# https://github.com/pandas-dev/pandas/issues/31648
result = [
x._rename(name=name)
for x, name in zip(self._levels, self._names, strict=True)
]
for level in result:
# disallow midx.levels[0].name = "foo"
level._no_setting_name = True
return FrozenList(result)
def _set_levels(
self,
levels,
*,
level=None,
copy: bool = False,
validate: bool = True,
verify_integrity: bool = False,
) -> None:
# This is NOT part of the levels property because it should be
# externally not allowed to set levels. User beware if you change
# _levels directly
if validate:
if len(levels) == 0:
raise ValueError("Must set non-zero number of levels.")
if level is None and len(levels) != self.nlevels:
raise ValueError("Length of levels must match number of levels.")
if level is not None and len(levels) != len(level):
raise ValueError("Length of levels must match length of level.")
if level is None:
new_levels = FrozenList(
ensure_index(lev, copy=copy)._view() for lev in levels
)
level_numbers: range | list[int] = range(len(new_levels))
else:
level_numbers = [self._get_level_number(lev) for lev in level]
new_levels_list = list(self._levels)
for lev_num, lev in zip(level_numbers, levels, strict=True):
new_levels_list[lev_num] = ensure_index(lev, copy=copy)._view()
new_levels = FrozenList(new_levels_list)
if verify_integrity:
new_codes = self._verify_integrity(
levels=new_levels, levels_to_verify=level_numbers
)
self._codes = new_codes
names = self.names
self._levels = new_levels
if any(names):
self._set_names(names)
self._reset_cache()
def set_levels(
self, levels, *, level=None, verify_integrity: bool = True
) -> MultiIndex:
"""
Set new levels on MultiIndex. Defaults to returning new index.
The `set_levels` method provides a flexible way to change the levels of a
`MultiIndex`. This is particularly useful when you need to update the
index structure of your DataFrame without altering the data. The method
returns a new `MultiIndex` unless the operation is performed in-place,
ensuring that the original index remains unchanged unless explicitly
modified.
The method checks the integrity of the new levels against the existing
codes by default, but this can be disabled if you are confident that
your levels are consistent with the underlying data. This can be useful
when you want to perform optimizations or make specific adjustments to
the index levels that do not strictly adhere to the original structure.
Parameters
----------
levels : sequence or list of sequence
New level(s) to apply.
level : int, level name, or sequence of int/level names (default None)
Level(s) to set (None for all levels).
verify_integrity : bool, default True
If True, checks that levels and codes are compatible.
Returns
-------
MultiIndex
A new `MultiIndex` with the updated levels.
See Also
--------
MultiIndex.set_codes : Set new codes on the existing `MultiIndex`.
MultiIndex.remove_unused_levels : Create new MultiIndex from current that
removes unused levels.
Index.set_names : Set Index or MultiIndex name.
Examples
--------
>>> idx = pd.MultiIndex.from_tuples(
... [
... (1, "one"),
... (1, "two"),
... (2, "one"),
... (2, "two"),
... (3, "one"),
... (3, "two"),
... ],
... names=["foo", "bar"],
... )
>>> idx
MultiIndex([(1, 'one'),
(1, 'two'),
(2, 'one'),
(2, 'two'),
(3, 'one'),
(3, 'two')],
names=['foo', 'bar'])
>>> idx.set_levels([["a", "b", "c"], [1, 2]])
MultiIndex([('a', 1),
('a', 2),
('b', 1),
('b', 2),
('c', 1),
('c', 2)],
names=['foo', 'bar'])
>>> idx.set_levels(["a", "b", "c"], level=0)
MultiIndex([('a', 'one'),
('a', 'two'),
('b', 'one'),
('b', 'two'),
('c', 'one'),
('c', 'two')],
names=['foo', 'bar'])
>>> idx.set_levels(["a", "b"], level="bar")
MultiIndex([(1, 'a'),
(1, 'b'),
(2, 'a'),
(2, 'b'),
(3, 'a'),
(3, 'b')],
names=['foo', 'bar'])
If any of the levels passed to ``set_levels()`` exceeds the
existing length, all of the values from that argument will
be stored in the MultiIndex levels, though the values will
be truncated in the MultiIndex output.
>>> idx.set_levels([["a", "b", "c"], [1, 2, 3, 4]], level=[0, 1])
MultiIndex([('a', 1),
('a', 2),
('b', 1),
('b', 2),
('c', 1),
('c', 2)],
names=['foo', 'bar'])
>>> idx.set_levels([["a", "b", "c"], [1, 2, 3, 4]], level=[0, 1]).levels
FrozenList([['a', 'b', 'c'], [1, 2, 3, 4]])
"""
if isinstance(levels, Index):
pass
elif is_array_like(levels):
levels = Index(levels)
elif is_list_like(levels):
levels = list(levels)
level, levels = _require_listlike(level, levels, "Levels")
idx = self._view()
idx._reset_identity()
idx._set_levels(
levels, level=level, validate=True, verify_integrity=verify_integrity
)
return idx
@property
def nlevels(self) -> int:
"""
Integer number of levels in this MultiIndex.
See Also
--------
MultiIndex.levels : Get the levels of the MultiIndex.
MultiIndex.codes : Get the codes of the MultiIndex.
MultiIndex.from_arrays : Convert arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([["a"], ["b"], ["c"]])
>>> mi
MultiIndex([('a', 'b', 'c')],
)
>>> mi.nlevels
3
"""
return len(self._levels)
@property
def levshape(self) -> Shape:
"""
A tuple representing the length of each level in the MultiIndex.
In a `MultiIndex`, each level can contain multiple unique values. The
`levshape` property provides a quick way to assess the size of each
level by returning a tuple where each entry represents the number of
unique values in that specific level. This is particularly useful in
scenarios where you need to understand the structure and distribution
of your index levels, such as when working with multidimensional data.
See Also
--------
MultiIndex.shape : Return a tuple of the shape of the MultiIndex.
MultiIndex.levels : Returns the levels of the MultiIndex.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([["a"], ["b"], ["c"]])
>>> mi
MultiIndex([('a', 'b', 'c')],
)
>>> mi.levshape
(1, 1, 1)
"""
return tuple(len(x) for x in self.levels)
# --------------------------------------------------------------------
# Codes Methods
@property
def codes(self) -> FrozenList:
"""
Codes of the MultiIndex.
Codes are the position of the index value in the list of level values
for each level.
Returns
-------
tuple of numpy.ndarray
The codes of the MultiIndex. Each array in the tuple corresponds
to a level in the MultiIndex.
See Also
--------
MultiIndex.set_codes : Set new codes on MultiIndex.
Examples
--------
>>> arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
>>> mi = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
>>> mi.codes
FrozenList([[0, 0, 1, 1], [1, 0, 1, 0]])
"""
return self._codes
def _set_codes(
self,
codes,
*,
level=None,
copy: bool = False,
validate: bool = True,
verify_integrity: bool = False,
) -> None:
if validate:
if level is None and len(codes) != self.nlevels:
raise ValueError("Length of codes must match number of levels")
if level is not None and len(codes) != len(level):
raise ValueError("Length of codes must match length of levels.")
level_numbers: list[int] | range
if level is None:
new_codes = FrozenList(
_coerce_indexer_frozen(level_codes, lev, copy=copy).view()
for lev, level_codes in zip(self._levels, codes, strict=True)
)
level_numbers = range(len(new_codes))
else:
level_numbers = [self._get_level_number(lev) for lev in level]
new_codes_list = list(self._codes)
for lev_num, level_codes in zip(level_numbers, codes, strict=True):
lev = self.levels[lev_num]
new_codes_list[lev_num] = _coerce_indexer_frozen(
level_codes, lev, copy=copy
)
new_codes = FrozenList(new_codes_list)
if verify_integrity:
new_codes = self._verify_integrity(
codes=new_codes, levels_to_verify=level_numbers
)
self._codes = new_codes
self._reset_cache()
def set_codes(
self, codes, *, level=None, verify_integrity: bool = True
) -> MultiIndex:
"""
Set new codes on MultiIndex. Defaults to returning new index.
Parameters
----------
codes : sequence or list of sequence
New codes to apply.
level : int, level name, or sequence of int/level names (default None)
Level(s) to set (None for all levels).
verify_integrity : bool, default True
If True, checks that levels and codes are compatible.
Returns
-------
new index (of same type and class...etc) or None
The same type as the caller or None if ``inplace=True``.
See Also
--------
MultiIndex.set_levels : Set new levels on MultiIndex.
MultiIndex.codes : Get the codes of the levels in the MultiIndex.
MultiIndex.levels : Get the levels of the MultiIndex.
Examples
--------
>>> idx = pd.MultiIndex.from_tuples(
... [(1, "one"), (1, "two"), (2, "one"), (2, "two")], names=["foo", "bar"]
... )
>>> idx
MultiIndex([(1, 'one'),
(1, 'two'),
(2, 'one'),
(2, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]])
MultiIndex([(2, 'one'),
(1, 'one'),
(2, 'two'),
(1, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([1, 0, 1, 0], level=0)
MultiIndex([(2, 'one'),
(1, 'two'),
(2, 'one'),
(1, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([0, 0, 1, 1], level="bar")
MultiIndex([(1, 'one'),
(1, 'one'),
(2, 'two'),
(2, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]], level=[0, 1])
MultiIndex([(2, 'one'),
(1, 'one'),
(2, 'two'),
(1, 'two')],
names=['foo', 'bar'])
"""
level, codes = _require_listlike(level, codes, "Codes")
idx = self._view()
idx._reset_identity()
idx._set_codes(codes, level=level, verify_integrity=verify_integrity)
return idx
# --------------------------------------------------------------------
# Index Internals
@cache_readonly
def _engine(self):
# Calculate the number of bits needed to represent labels in each
# level, as log2 of their sizes:
# NaN values are shifted to 1 and missing values in other while
# calculating the indexer are shifted to 0
sizes = np.ceil(
np.log2(
[len(level) + libindex.multiindex_nulls_shift for level in self.levels]
)
)
# Sum bit counts, starting from the _right_....
lev_bits = np.cumsum(sizes[::-1])[::-1]
# ... in order to obtain offsets such that sorting the combination of
# shifted codes (one for each level, resulting in a unique integer) is
# equivalent to sorting lexicographically the codes themselves. Notice
# that each level needs to be shifted by the number of bits needed to
# represent the _previous_ ones:
offsets = np.concatenate([lev_bits[1:], [0]])
# Downcast the type if possible, to prevent upcasting when shifting codes:
offsets = offsets.astype(np.min_scalar_type(int(offsets[0])))
# Check the total number of bits needed for our representation:
if lev_bits[0] > 64:
# The levels would overflow a 64 bit uint - use Python integers:
return MultiIndexPyIntEngine(self.levels, self.codes, offsets)
if lev_bits[0] > 32:
# The levels would overflow a 32 bit uint - use uint64
return MultiIndexUInt64Engine(self.levels, self.codes, offsets)
if lev_bits[0] > 16:
# The levels would overflow a 16 bit uint - use uint8
return MultiIndexUInt32Engine(self.levels, self.codes, offsets)
if lev_bits[0] > 8:
# The levels would overflow a 8 bit uint - use uint16
return MultiIndexUInt16Engine(self.levels, self.codes, offsets)
# The levels fit in an 8 bit uint - use uint8
return MultiIndexUInt8Engine(self.levels, self.codes, offsets)
# Return type "Callable[..., MultiIndex]" of "_constructor" incompatible with return
# type "Type[MultiIndex]" in supertype "Index"
@property
def _constructor(self) -> Callable[..., MultiIndex]: # type: ignore[override]
return type(self).from_tuples
@doc(Index._shallow_copy)
def _shallow_copy(self, values: np.ndarray, name=lib.no_default) -> MultiIndex:
names = name if name is not lib.no_default else self.names
return type(self).from_tuples(values, sortorder=None, names=names)
def _view(self) -> MultiIndex:
result = type(self)(
levels=self.levels,
codes=self.codes,
sortorder=self.sortorder,
names=self.names,
verify_integrity=False,
)
result._cache = self._cache.copy()
result._reset_cache("levels") # GH32669
return result
# --------------------------------------------------------------------
# error: Signature of "copy" incompatible with supertype "Index"
def copy( # type: ignore[override]
self,
names=None,
deep: bool = False,
name=None,
) -> Self:
"""
Make a copy of this object. Names, dtype, levels and codes can be passed and \
will be set on new copy.
The `copy` method provides a mechanism to create a duplicate of an
existing MultiIndex object. This is particularly useful in scenarios where
modifications are required on an index, but the original MultiIndex should
remain unchanged. By specifying the `deep` parameter, users can control
whether the copy should be a deep or shallow copy, providing flexibility
depending on the size and complexity of the MultiIndex.
Parameters
----------
names : sequence, optional
Names to set on the new MultiIndex object.
deep : bool, default False
If False, the new object will be a shallow copy. If True, a deep copy
will be attempted. Deep copying can be potentially expensive for large
MultiIndex objects.
name : Label
Kept for compatibility with 1-dimensional Index. Should not be used.
Returns
-------
MultiIndex
A new MultiIndex object with the specified modifications.
See Also
--------
MultiIndex.from_arrays : Convert arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_frame : Convert DataFrame to MultiIndex.
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
This could be potentially expensive on large MultiIndex objects.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([["a"], ["b"], ["c"]])
>>> mi
MultiIndex([('a', 'b', 'c')],
)
>>> mi.copy()
MultiIndex([('a', 'b', 'c')],
)
"""
names = self._validate_names(name=name, names=names, deep=deep)
keep_id = not deep
levels, codes = None, None
if deep:
from copy import deepcopy
levels = deepcopy(self.levels)
codes = deepcopy(self.codes)
levels = levels if levels is not None else self.levels
codes = codes if codes is not None else self.codes
new_index = type(self)(
levels=levels,
codes=codes,
sortorder=self.sortorder,
names=names,
verify_integrity=False,
)
new_index._cache = self._cache.copy()
new_index._reset_cache("levels") # GH32669
if keep_id:
new_index._id = self._id
return new_index
def __array__(self, dtype=None, copy=None) -> np.ndarray:
"""the array interface, return my values"""
if copy is False:
# self.values is always a newly construct array, so raise.
raise ValueError(
"Unable to avoid copy while creating an array as requested."
)
if copy is True:
# explicit np.array call to ensure a copy is made and unique objects
# are returned, because self.values is cached
return np.array(self.values, dtype=dtype)
return self.values
def view(self, cls=None) -> Self:
"""this is defined as a copy with the same identity"""
result = self.copy()
result._id = self._id
return result
@doc(Index.__contains__)
def __contains__(self, key: Any) -> bool:
hash(key)
try:
self.get_loc(key)
return True
except (LookupError, TypeError, ValueError):
return False
@cache_readonly
def dtype(self) -> np.dtype:
return np.dtype("O")
@cache_readonly
def _is_memory_usage_qualified(self) -> bool:
"""return a boolean if we need a qualified .info display"""
def f(dtype) -> bool:
return is_object_dtype(dtype) or (
is_string_dtype(dtype) and dtype.storage == "python"
)
return any(f(level.dtype) for level in self.levels)
# Cannot determine type of "memory_usage"
@doc(Index.memory_usage) # type: ignore[has-type]
def memory_usage(self, deep: bool = False) -> int:
# we are overwriting our base class to avoid
# computing .values here which could materialize
# a tuple representation unnecessarily
return self._nbytes(deep)
@cache_readonly
def nbytes(self) -> int:
"""return the number of bytes in the underlying data"""
return self._nbytes(False)
def _nbytes(self, deep: bool = False) -> int:
"""
return the number of bytes in the underlying data
deeply introspect the level data if deep=True
include the engine hashtable
*this is in internal routine*
"""
# for implementations with no useful getsizeof (PyPy)
objsize = 24
level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels)
label_nbytes = sum(i.nbytes for i in self.codes)
names_nbytes = sum(getsizeof(i, objsize) for i in self.names)
result = level_nbytes + label_nbytes + names_nbytes
# include our engine hashtable, only if it's already cached
if "_engine" in self._cache:
result += self._engine.sizeof(deep=deep)
return result
# --------------------------------------------------------------------
# Rendering Methods
def _formatter_func(self, tup):
"""
Formats each item in tup according to its level's formatter function.
"""
formatter_funcs = (level._formatter_func for level in self.levels)
return tuple(func(val) for func, val in zip(formatter_funcs, tup, strict=True))
def _get_values_for_csv(
self, *, na_rep: str = "nan", **kwargs
) -> npt.NDArray[np.object_]:
new_levels = []
new_codes = []
# go through the levels and format them
for level, level_codes in zip(self.levels, self.codes, strict=True):
level_strs = level._get_values_for_csv(na_rep=na_rep, **kwargs)
# add nan values, if there are any
mask = level_codes == -1
if mask.any():
nan_index = len(level_strs)
# numpy 1.21 deprecated implicit string casting
level_strs = level_strs.astype(str)
level_strs = np.append(level_strs, na_rep)
assert not level_codes.flags.writeable # i.e. copy is needed
level_codes = level_codes.copy() # make writeable
level_codes[mask] = nan_index
new_levels.append(level_strs)
new_codes.append(level_codes)
if len(new_levels) == 1:
# a single-level multi-index
return Index(new_levels[0].take(new_codes[0]))._get_values_for_csv()
else:
# reconstruct the multi-index
mi = MultiIndex(
levels=new_levels,
codes=new_codes,
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
return mi._values
def _format_multi(
self,
*,
include_names: bool,
sparsify: bool | None | lib.NoDefault,
formatter: Callable | None = None,
) -> list:
if len(self) == 0:
return []
stringified_levels = []
for lev, level_codes in zip(self.levels, self.codes, strict=True):
na = _get_na_rep(lev.dtype)
if len(lev) > 0:
taken = formatted = lev.take(level_codes)
formatted = taken._format_flat(include_name=False, formatter=formatter)
# we have some NA
mask = level_codes == -1
if mask.any():
formatted = np.array(formatted, dtype=object)
formatted[mask] = na
formatted = formatted.tolist()
else:
# weird all NA case
formatted = [
pprint_thing(na if isna(x) else x, escape_chars=("\t", "\r", "\n"))
for x in algos.take_nd(lev._values, level_codes)
]
stringified_levels.append(formatted)
result_levels = []
for lev, lev_name in zip(stringified_levels, self.names, strict=True):
level = []
if include_names:
level.append(
pprint_thing(lev_name, escape_chars=("\t", "\r", "\n"))
if lev_name is not None
else ""
)
level.extend(np.array(lev, dtype=object))
result_levels.append(level)
if sparsify is None:
sparsify = get_option("display.multi_sparse")
if sparsify:
sentinel: Literal[""] | bool | lib.NoDefault = ""
# GH3547 use value of sparsify as sentinel if it's "Falsey"
assert isinstance(sparsify, bool) or sparsify is lib.no_default
if sparsify is lib.no_default:
sentinel = sparsify
# little bit of a kludge job for #1217
result_levels = sparsify_labels(
result_levels, start=int(include_names), sentinel=sentinel
)
return result_levels
# --------------------------------------------------------------------
# Names Methods
def _get_names(self) -> FrozenList:
return FrozenList(self._names)
def _set_names(self, names, *, level=None) -> None:
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
Raises
------
TypeError if each name is not hashable.
Notes
-----
sets names on levels. WARNING: mutates!
Note that you generally want to set this *after* changing levels, so
that it only acts on copies
"""
# GH 15110
# Don't allow a single string for names in a MultiIndex
if names is not None and not is_list_like(names):
raise ValueError("Names should be list-like for a MultiIndex")
names = list(names)
if level is not None and len(names) != len(level):
raise ValueError("Length of names must match length of level.")
if level is None and len(names) != self.nlevels:
raise ValueError(
"Length of names must match number of levels in MultiIndex."
)
if level is None:
level = range(self.nlevels)
else:
level = (self._get_level_number(lev) for lev in level)
# set the name
for lev, name in zip(level, names, strict=True):
if name is not None:
# GH 20527
# All items in 'names' need to be hashable:
if not is_hashable(name):
raise TypeError(
f"{type(self).__name__}.name must be a hashable type"
)
self._names[lev] = name
# If .levels has been accessed, the .name of each level in our cache
# will be stale.
self._reset_cache("levels")
names = property(
fset=_set_names,
fget=_get_names,
doc="""
Names of levels in MultiIndex.
This attribute provides access to the names of the levels in a `MultiIndex`.
The names are stored as a `FrozenList`, which is an immutable list-like
container. Each name corresponds to a level in the `MultiIndex`, and can be
used to identify or manipulate the levels individually.
See Also
--------
MultiIndex.set_names : Set Index or MultiIndex name.
MultiIndex.rename : Rename specific levels in a MultiIndex.
Index.names : Get names on index.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays(
... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z']
... )
>>> mi
MultiIndex([(1, 3, 5),
(2, 4, 6)],
names=['x', 'y', 'z'])
>>> mi.names
FrozenList(['x', 'y', 'z'])
""",
)
# --------------------------------------------------------------------
@cache_readonly
def inferred_type(self) -> str:
return "mixed"
def _get_level_number(self, level) -> int:
count = self.names.count(level)
if (count > 1) and not is_integer(level):
raise ValueError(
f"The name {level} occurs multiple times, use a level number"
)
try:
level = self.names.index(level)
except ValueError as err:
if not is_integer(level):
raise KeyError(f"Level {level} not found") from err
if level < 0:
level += self.nlevels
if level < 0:
orig_level = level - self.nlevels
raise IndexError(
f"Too many levels: Index has only {self.nlevels} levels, "
f"{orig_level} is not a valid level number"
) from err
# Note: levels are zero-based
elif level >= self.nlevels:
raise IndexError(
f"Too many levels: Index has only {self.nlevels} levels, "
f"not {level + 1}"
) from err
return level
@cache_readonly
def is_monotonic_increasing(self) -> bool:
"""
Return a boolean if the values are equal or increasing.
"""
if any(-1 in code for code in self.codes):
return False
if all(level.is_monotonic_increasing for level in self.levels):
# If each level is sorted, we can operate on the codes directly. GH27495
return libalgos.is_lexsorted(
[x.astype("int64", copy=False) for x in self.codes]
)
# reversed() because lexsort() wants the most significant key last.
values = [
self._get_level_values(i)._values for i in reversed(range(len(self.levels)))
]
try:
# error: Argument 1 to "lexsort" has incompatible type
# "List[Union[ExtensionArray, ndarray[Any, Any]]]";
# expected "Union[_SupportsArray[dtype[Any]],
# _NestedSequence[_SupportsArray[dtype[Any]]], bool,
# int, float, complex, str, bytes, _NestedSequence[Union
# [bool, int, float, complex, str, bytes]]]"
sort_order = np.lexsort(values) # type: ignore[arg-type]
return Index(sort_order).is_monotonic_increasing
except TypeError:
# we have mixed types and np.lexsort is not happy
return Index(self._values).is_monotonic_increasing
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
"""
Return a boolean if the values are equal or decreasing.
"""
# monotonic decreasing if and only if reverse is monotonic increasing
return self[::-1].is_monotonic_increasing
@doc(Index.duplicated)
def duplicated(self, keep: DropKeep = "first") -> npt.NDArray[np.bool_]:
shape = tuple(len(lev) for lev in self.levels)
ids = get_group_index(self.codes, shape, sort=False, xnull=False)
return duplicated(ids, keep)
# error: Cannot override final attribute "_duplicated"
# (previously declared in base class "IndexOpsMixin")
_duplicated = duplicated # type: ignore[misc]
def fillna(self, value):
"""
fillna is not implemented for MultiIndex
"""
raise NotImplementedError("fillna is not defined for MultiIndex")
@doc(Index.dropna)
def dropna(self, how: AnyAll = "any") -> MultiIndex:
nans = [level_codes == -1 for level_codes in self.codes]
if how == "any":
indexer = np.any(nans, axis=0)
elif how == "all":
indexer = np.all(nans, axis=0)
else:
raise ValueError(f"invalid how option: {how}")
new_codes = [level_codes[~indexer] for level_codes in self.codes]
return self.set_codes(codes=new_codes)
def _get_level_values(self, level: int, unique: bool = False) -> Index:
"""
Return vector of label values for requested level,
equal to the length of the index
**this is an internal method**
Parameters
----------
level : int
unique : bool, default False
if True, drop duplicated values
Returns
-------
Index
"""
lev = self.levels[level]
level_codes = self.codes[level]
name = self._names[level]
if unique:
level_codes = algos.unique(level_codes)
filled = algos.take_nd(lev._values, level_codes, fill_value=lev._na_value)
return lev._shallow_copy(filled, name=name)
def get_level_values(self, level) -> Index:
"""
Return vector of label values for requested level.
Length of returned vector is equal to the length of the index.
The `get_level_values` method is a crucial utility for extracting
specific level values from a `MultiIndex`. This function is particularly
useful when working with multi-level data, allowing you to isolate
and manipulate individual levels without having to deal with the
complexity of the entire `MultiIndex` structure. It seamlessly handles
both integer and string-based level access, providing flexibility in
how you can interact with the data. Additionally, this method ensures
that the returned `Index` maintains the integrity of the original data,
even when missing values are present, by appropriately casting the
result to a suitable data type.
Parameters
----------
level : int or str
``level`` is either the integer position of the level in the
MultiIndex, or the name of the level.
Returns
-------
Index
Values is a level of this MultiIndex converted to
a single :class:`Index` (or subclass thereof).
See Also
--------
MultiIndex : A multi-level, or hierarchical, index object for pandas objects.
Index : Immutable sequence used for indexing and alignment.
MultiIndex.remove_unused_levels : Create new MultiIndex from current that
removes unused levels.
Notes
-----
If the level contains missing values, the result may be casted to
``float`` with missing values specified as ``NaN``. This is because
the level is converted to a regular ``Index``.
Examples
--------
Create a MultiIndex:
>>> mi = pd.MultiIndex.from_arrays((list("abc"), list("def")))
>>> mi.names = ["level_1", "level_2"]
Get level values by supplying level as either integer or name:
>>> mi.get_level_values(0)
Index(['a', 'b', 'c'], dtype='object', name='level_1')
>>> mi.get_level_values("level_2")
Index(['d', 'e', 'f'], dtype='object', name='level_2')
If a level contains missing values, the return type of the level
may be cast to ``float``.
>>> pd.MultiIndex.from_arrays([[1, None, 2], [3, 4, 5]]).dtypes
level_0 int64
level_1 int64
dtype: object
>>> pd.MultiIndex.from_arrays([[1, None, 2], [3, 4, 5]]).get_level_values(0)
Index([1.0, nan, 2.0], dtype='float64')
"""
level = self._get_level_number(level)
values = self._get_level_values(level)
return values
@doc(Index.unique)
def unique(self, level=None):
if level is None:
return self.drop_duplicates()
else:
level = self._get_level_number(level)
return self._get_level_values(level=level, unique=True)
def to_frame(
self,
index: bool = True,
name=lib.no_default,
allow_duplicates: bool = False,
) -> DataFrame:
"""
Create a DataFrame with the levels of the MultiIndex as columns.
Column ordering is determined by the DataFrame constructor with data as
a dict.
Parameters
----------
index : bool, default True
Set the index of the returned DataFrame as the original MultiIndex.
name : list / sequence of str, optional
The passed names should substitute index level names.
allow_duplicates : bool, optional default False
Allow duplicate column labels to be created.
Returns
-------
DataFrame
DataFrame representation of the MultiIndex, with levels as columns.
See Also
--------
DataFrame : Two-dimensional, size-mutable, potentially heterogeneous
tabular data.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([["a", "b"], ["c", "d"]])
>>> mi
MultiIndex([('a', 'c'),
('b', 'd')],
)
>>> df = mi.to_frame()
>>> df
0 1
a c a c
b d b d
>>> df = mi.to_frame(index=False)
>>> df
0 1
0 a c
1 b d
>>> df = mi.to_frame(name=["x", "y"])
>>> df
x y
a c a c
b d b d
"""
from pandas import DataFrame
if name is not lib.no_default:
if not is_list_like(name):
raise TypeError("'name' must be a list / sequence of column names.")
if len(name) != len(self.levels):
raise ValueError(
"'name' should have same length as number of levels on index."
)
idx_names = name
else:
idx_names = self._get_level_names()
if not allow_duplicates and len(set(idx_names)) != len(idx_names):
raise ValueError(
"Cannot create duplicate column labels if allow_duplicates is False"
)
# Guarantee resulting column order - PY36+ dict maintains insertion order
result = DataFrame(
{level: self._get_level_values(level) for level in range(len(self.levels))},
copy=False,
)
result.columns = idx_names
if index:
result.index = self
return result
# error: Return type "Index" of "to_flat_index" incompatible with return type
# "MultiIndex" in supertype "Index"
def to_flat_index(self) -> Index: # type: ignore[override]
"""
Convert a MultiIndex to an Index of Tuples containing the level values.
Returns
-------
pd.Index
Index with the MultiIndex data represented in Tuples.
See Also
--------
MultiIndex.from_tuples : Convert flat index back to MultiIndex.
Notes
-----
This method will simply return the caller if called by anything other
than a MultiIndex.
Examples
--------
>>> index = pd.MultiIndex.from_product(
... [["foo", "bar"], ["baz", "qux"]], names=["a", "b"]
... )
>>> index.to_flat_index()
Index([('foo', 'baz'), ('foo', 'qux'),
('bar', 'baz'), ('bar', 'qux')],
dtype='object')
"""
return Index(self._values, tupleize_cols=False)
def _is_lexsorted(self) -> bool:
"""
Return True if the codes are lexicographically sorted.
Returns
-------
bool
Examples
--------
In the below examples, the first level of the MultiIndex is sorted because
a<b<c, so there is no need to look at the next level.
>>> pd.MultiIndex.from_arrays(
... [["a", "b", "c"], ["d", "e", "f"]]
... )._is_lexsorted()
True
>>> pd.MultiIndex.from_arrays(
... [["a", "b", "c"], ["d", "f", "e"]]
... )._is_lexsorted()
True
In case there is a tie, the lexicographical sorting looks
at the next level of the MultiIndex.
>>> pd.MultiIndex.from_arrays([[0, 1, 1], ["a", "b", "c"]])._is_lexsorted()
True
>>> pd.MultiIndex.from_arrays([[0, 1, 1], ["a", "c", "b"]])._is_lexsorted()
False
>>> pd.MultiIndex.from_arrays(
... [["a", "a", "b", "b"], ["aa", "bb", "aa", "bb"]]
... )._is_lexsorted()
True
>>> pd.MultiIndex.from_arrays(
... [["a", "a", "b", "b"], ["bb", "aa", "aa", "bb"]]
... )._is_lexsorted()
False
"""
return self._lexsort_depth == self.nlevels
@cache_readonly
def _lexsort_depth(self) -> int:
"""
Compute and return the lexsort_depth, the number of levels of the
MultiIndex that are sorted lexically
Returns
-------
int
"""
if self.sortorder is not None:
return self.sortorder
return _lexsort_depth(self.codes, self.nlevels)
def _sort_levels_monotonic(self, raise_if_incomparable: bool = False) -> MultiIndex:
"""
This is an *internal* function.
Create a new MultiIndex from the current to monotonically sorted
items IN the levels. This does not actually make the entire MultiIndex
monotonic, JUST the levels.
The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will also
be .equals() to the original.
Returns
-------
MultiIndex
Examples
--------
>>> mi = pd.MultiIndex(
... levels=[["a", "b"], ["bb", "aa"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
... )
>>> mi
MultiIndex([('a', 'bb'),
('a', 'aa'),
('b', 'bb'),
('b', 'aa')],
)
>>> mi.sort_values()
MultiIndex([('a', 'aa'),
('a', 'bb'),
('b', 'aa'),
('b', 'bb')],
)
"""
if self._is_lexsorted() and self.is_monotonic_increasing:
return self
new_levels = []
new_codes = []
for lev, level_codes in zip(self.levels, self.codes, strict=True):
if not lev.is_monotonic_increasing:
try:
# indexer to reorder the levels
indexer = lev.argsort()
except TypeError:
if raise_if_incomparable:
raise
else:
lev = lev.take(indexer)
# indexer to reorder the level codes
indexer = ensure_platform_int(indexer)
ri = lib.get_reverse_indexer(indexer, len(indexer))
level_codes = algos.take_nd(ri, level_codes, fill_value=-1)
new_levels.append(lev)
new_codes.append(level_codes)
return MultiIndex(
new_levels,
new_codes,
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
def remove_unused_levels(self) -> MultiIndex:
"""
Create new MultiIndex from current that removes unused levels.
Unused level(s) means levels that are not expressed in the
labels. The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will
also be .equals() to the original.
The `remove_unused_levels` method is useful in cases where you have a
MultiIndex with hierarchical levels, but some of these levels are no
longer needed due to filtering or subsetting operations. By removing
the unused levels, the resulting MultiIndex becomes more compact and
efficient, which can improve performance in subsequent operations.
Returns
-------
MultiIndex
A new MultiIndex with unused levels removed.
See Also
--------
MultiIndex.droplevel : Remove specified levels from a MultiIndex.
MultiIndex.reorder_levels : Rearrange levels of a MultiIndex.
MultiIndex.set_levels : Set new levels on a MultiIndex.
Examples
--------
>>> mi = pd.MultiIndex.from_product([range(2), list("ab")])
>>> mi
MultiIndex([(0, 'a'),
(0, 'b'),
(1, 'a'),
(1, 'b')],
)
>>> mi[2:]
MultiIndex([(1, 'a'),
(1, 'b')],
)
The 0 from the first level is not represented
and can be removed
>>> mi2 = mi[2:].remove_unused_levels()
>>> mi2.levels
FrozenList([[1], ['a', 'b']])
"""
new_levels = []
new_codes = []
changed = False
for lev, level_codes in zip(self.levels, self.codes, strict=True):
# Since few levels are typically unused, bincount() is more
# efficient than unique() - however it only accepts positive values
# (and drops order):
uniques = np.where(np.bincount(level_codes + 1) > 0)[0] - 1
has_na = int(len(uniques) and (uniques[0] == -1))
if len(uniques) != len(lev) + has_na:
if lev.isna().any() and len(uniques) == len(lev):
break
# We have unused levels
changed = True
# Recalculate uniques, now preserving order.
# Can easily be cythonized by exploiting the already existing
# "uniques" and stop parsing "level_codes" when all items
# are found:
uniques = algos.unique(level_codes)
if has_na:
na_idx = np.where(uniques == -1)[0]
# Just ensure that -1 is in first position:
uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]]
# codes get mapped from uniques to 0:len(uniques)
# -1 (if present) is mapped to last position
code_mapping = np.zeros(len(lev) + has_na)
# ... and reassigned value -1:
code_mapping[uniques] = np.arange(len(uniques)) - has_na
level_codes = code_mapping[level_codes]
# new levels are simple
lev = lev.take(uniques[has_na:])
new_levels.append(lev)
new_codes.append(level_codes)
result = self.view()
if changed:
result._reset_identity()
result._set_levels(new_levels, validate=False)
result._set_codes(new_codes, validate=False)
return result
# --------------------------------------------------------------------
# Pickling Methods
def __reduce__(self):
"""Necessary for making this object picklable"""
d = {
"levels": list(self.levels),
"codes": list(self.codes),
"sortorder": self.sortorder,
"names": list(self.names),
}
return ibase._new_Index, (type(self), d), None
# --------------------------------------------------------------------
def __getitem__(self, key):
key = lib.item_from_zerodim(key)
if is_scalar(key):
key = com.cast_scalar_indexer(key)
retval = []
for lev, level_codes in zip(self.levels, self.codes, strict=True):
if level_codes[key] == -1:
retval.append(np.nan)
else:
retval.append(lev[level_codes[key]])
return tuple(retval)
else:
# in general cannot be sure whether the result will be sorted
sortorder = None
if com.is_bool_indexer(key):
key = np.asarray(key, dtype=bool)
sortorder = self.sortorder
elif isinstance(key, slice):
if key.step is None or key.step > 0:
sortorder = self.sortorder
elif isinstance(key, Index):
key = np.asarray(key)
new_codes = [level_codes[key] for level_codes in self.codes]
return MultiIndex(
levels=self.levels,
codes=new_codes,
names=self.names,
sortorder=sortorder,
verify_integrity=False,
)
def _getitem_slice(self: MultiIndex, slobj: slice) -> MultiIndex:
"""
Fastpath for __getitem__ when we know we have a slice.
"""
sortorder = None
if slobj.step is None or slobj.step > 0:
sortorder = self.sortorder
new_codes = [level_codes[slobj] for level_codes in self.codes]
return type(self)(
levels=self.levels,
codes=new_codes,
names=self._names,
sortorder=sortorder,
verify_integrity=False,
)
def take(
self: MultiIndex,
indices,
axis: Axis = 0,
allow_fill: bool = True,
fill_value=None,
**kwargs,
) -> MultiIndex:
"""
Return a new MultiIndex of the values selected by the indices.
For internal compatibility with numpy arrays.
Parameters
----------
indices : array-like
Indices to be taken.
axis : {0 or 'index'}, optional
The axis over which to select values, always 0 or 'index'.
allow_fill : bool, default True
How to handle negative values in `indices`.
* False: negative values in `indices` indicate positional indices
from the right (the default). This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate
missing values. These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
fill_value : scalar, default None
If allow_fill=True and fill_value is not None, indices specified by
-1 are regarded as NA. If Index doesn't hold NA, raise ValueError.
**kwargs
Required for compatibility with numpy.
Returns
-------
Index
An index formed of elements at the given indices. Will be the same
type as self, except for RangeIndex.
See Also
--------
numpy.ndarray.take: Return an array formed from the
elements of a at the given indices.
Examples
--------
>>> idx = pd.MultiIndex.from_arrays([["a", "b", "c"], [1, 2, 3]])
>>> idx
MultiIndex([('a', 1),
('b', 2),
('c', 3)],
)
>>> idx.take([2, 2, 1, 0])
MultiIndex([('c', 3),
('c', 3),
('b', 2),
('a', 1)],
)
"""
nv.validate_take((), kwargs)
indices = ensure_platform_int(indices)
# only fill if we are passing a non-None fill_value
allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices)
if indices.ndim == 1 and lib.is_range_indexer(indices, len(self)):
return self.copy()
na_value = -1
taken = [lab.take(indices) for lab in self.codes]
if allow_fill:
mask = indices == -1
if mask.any():
masked = []
for new_label in taken:
label_values = new_label
label_values[mask] = na_value
masked.append(np.asarray(label_values))
taken = masked
return MultiIndex(
levels=self.levels, codes=taken, names=self.names, verify_integrity=False
)
def append(self, other):
"""
Append a collection of Index options together.
The `append` method is used to combine multiple `Index` objects into a single
`Index`. This is particularly useful when dealing with multi-level indexing
(MultiIndex) where you might need to concatenate different levels of indices.
The method handles the alignment of the levels and codes of the indices being
appended to ensure consistency in the resulting `MultiIndex`.
Parameters
----------
other : Index or list/tuple of indices
Index or list/tuple of Index objects to be appended.
Returns
-------
Index
The combined index.
See Also
--------
MultiIndex: A multi-level, or hierarchical, index object for pandas objects.
Index.append : Append a collection of Index options together.
concat : Concatenate pandas objects along a particular axis.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([["a"], ["b"]])
>>> mi
MultiIndex([('a', 'b')],
)
>>> mi.append(mi)
MultiIndex([('a', 'b'), ('a', 'b')],
)
"""
if not isinstance(other, (list, tuple)):
other = [other]
if all(
(isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other
):
codes = []
levels = []
names = []
for i in range(self.nlevels):
level_values = self.levels[i]
for mi in other:
level_values = level_values.union(mi.levels[i])
level_codes = [
recode_for_categories(
mi.codes[i], mi.levels[i], level_values, copy=False
)
for mi in ([self, *other])
]
level_name = self.names[i]
if any(mi.names[i] != level_name for mi in other):
level_name = None
codes.append(np.concatenate(level_codes))
levels.append(level_values)
names.append(level_name)
return MultiIndex(
codes=codes, levels=levels, names=names, verify_integrity=False
)
to_concat = (self._values,) + tuple(k._values for k in other)
new_tuples = np.concatenate(to_concat)
# if all(isinstance(x, MultiIndex) for x in other):
try:
# We only get here if other contains at least one index with tuples,
# setting names to None automatically
return MultiIndex.from_tuples(new_tuples)
except (TypeError, IndexError):
return Index(new_tuples)
def argsort(
self, *args, na_position: NaPosition = "last", **kwargs
) -> npt.NDArray[np.intp]:
"""
Return the integer indices that would sort the index.
Parameters
----------
*args
Passed to `numpy.ndarray.argsort`.
na_position : {'first' or 'last'}, default 'last'
Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at
the end.
**kwargs
Passed to `numpy.ndarray.argsort`.
Returns
-------
np.ndarray[np.intp]
Integer indices that would sort the index if used as
an indexer.
See Also
--------
numpy.argsort : Similar method for NumPy arrays.
Index.argsort : Similar method for Index.
Examples
--------
>>> midx = pd.MultiIndex.from_arrays([[3, 2], ["e", "c"]])
>>> midx
MultiIndex([(3, 'e'), (2, 'c')])
>>> order = midx.argsort()
>>> order
array([1, 0])
>>> midx[order]
MultiIndex([(2, 'c'),
(3, 'e')],
)
>>> midx = pd.MultiIndex.from_arrays([[2, 2], [np.nan, 0]])
>>> midx.argsort(na_position="first")
array([0, 1])
>>> midx.argsort()
array([1, 0])
"""
target = self._sort_levels_monotonic(raise_if_incomparable=True)
keys = [lev.codes for lev in target._get_codes_for_sorting()]
return lexsort_indexer(keys, na_position=na_position, codes_given=True)
def repeat(self, repeats: int, axis=None) -> MultiIndex:
"""
Repeat elements of a MultiIndex.
Returns a new MultiIndex where each element of the current MultiIndex
is repeated consecutively a given number of times.
Parameters
----------
repeats : int or array of ints
The number of repetitions for each element. This should be a
non-negative integer. Repeating 0 times will return an empty
MultiIndex.
axis : None
Must be ``None``. Has no effect but is accepted for compatibility
with numpy.
Returns
-------
MultiIndex
Newly created MultiIndex with repeated elements.
See Also
--------
Series.repeat : Equivalent function for Series.
numpy.repeat : Similar method for :class:`numpy.ndarray`.
Examples
--------
>>> idx = pd.MultiIndex.from_arrays([["a", "b", "c"], [1, 2, 3]])
>>> idx
MultiIndex([('a', 1),
('b', 2),
('c', 3)],
)
>>> idx.repeat(2)
MultiIndex([('a', 1),
('a', 1),
('b', 2),
('b', 2),
('c', 3),
('c', 3)],
)
>>> idx.repeat([1, 2, 3])
MultiIndex([('a', 1),
('b', 2),
('b', 2),
('c', 3),
('c', 3),
('c', 3)],
)
"""
nv.validate_repeat((), {"axis": axis})
# error: Incompatible types in assignment (expression has type "ndarray",
# variable has type "int")
repeats = ensure_platform_int(repeats) # type: ignore[assignment]
return MultiIndex(
levels=self.levels,
codes=[
level_codes.view(np.ndarray).astype(np.intp, copy=False).repeat(repeats)
for level_codes in self.codes
],
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
# error: Signature of "drop" incompatible with supertype "Index"
def drop( # type: ignore[override]
self,
codes,
level: Index | np.ndarray | Iterable[Hashable] | None = None,
errors: IgnoreRaise = "raise",
) -> MultiIndex:
"""
Make a new :class:`pandas.MultiIndex` with the passed list of codes deleted.
This method allows for the removal of specified labels from a MultiIndex.
The labels to be removed can be provided as a list of tuples if no level
is specified, or as a list of labels from a specific level if the level
parameter is provided. This can be useful for refining the structure of a
MultiIndex to fit specific requirements.
Parameters
----------
codes : array-like
Must be a list of tuples when ``level`` is not specified.
level : int or level name, default None
Level from which the labels will be dropped.
errors : str, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
Returns
-------
MultiIndex
A new MultiIndex with the specified labels removed.
See Also
--------
MultiIndex.remove_unused_levels : Create new MultiIndex from current that
removes unused levels.
MultiIndex.reorder_levels : Rearrange levels using input order.
MultiIndex.rename : Rename levels in a MultiIndex.
Examples
--------
>>> idx = pd.MultiIndex.from_product(
... [(0, 1, 2), ("green", "purple")], names=["number", "color"]
... )
>>> idx
MultiIndex([(0, 'green'),
(0, 'purple'),
(1, 'green'),
(1, 'purple'),
(2, 'green'),
(2, 'purple')],
names=['number', 'color'])
>>> idx.drop([(1, "green"), (2, "purple")])
MultiIndex([(0, 'green'),
(0, 'purple'),
(1, 'purple'),
(2, 'green')],
names=['number', 'color'])
We can also drop from a specific level.
>>> idx.drop("green", level="color")
MultiIndex([(0, 'purple'),
(1, 'purple'),
(2, 'purple')],
names=['number', 'color'])
>>> idx.drop([1, 2], level=0)
MultiIndex([(0, 'green'),
(0, 'purple')],
names=['number', 'color'])
"""
if level is not None:
return self._drop_from_level(codes, level, errors)
if not isinstance(codes, (np.ndarray, Index)):
try:
codes = com.index_labels_to_array(codes, dtype=np.dtype("object"))
except ValueError:
pass
inds = []
for level_codes in codes:
try:
loc = self.get_loc(level_codes)
# get_loc returns either an integer, a slice, or a boolean
# mask
if isinstance(loc, int):
inds.append(loc)
elif isinstance(loc, slice):
step = loc.step if loc.step is not None else 1
inds.extend(range(loc.start, loc.stop, step))
elif com.is_bool_indexer(loc):
if get_option("performance_warnings") and self._lexsort_depth == 0:
warnings.warn(
"dropping on a non-lexsorted multi-index "
"without a level parameter may impact performance.",
PerformanceWarning,
stacklevel=find_stack_level(),
)
loc = loc.nonzero()[0]
inds.extend(loc)
else:
msg = f"unsupported indexer of type {type(loc)}"
raise AssertionError(msg)
except KeyError:
if errors != "ignore":
raise
return self.delete(inds)
def _drop_from_level(
self, codes, level, errors: IgnoreRaise = "raise"
) -> MultiIndex:
codes = com.index_labels_to_array(codes)
i = self._get_level_number(level)
index = self.levels[i]
values = index.get_indexer(codes)
# If nan should be dropped it will equal -1 here. We have to check which values
# are not nan and equal -1, this means they are missing in the index
nan_codes = isna(codes)
values[(np.equal(nan_codes, False)) & (values == -1)] = -2
if index.shape[0] == self.shape[0]:
values[np.equal(nan_codes, True)] = -2
not_found = codes[values == -2]
if len(not_found) != 0 and errors != "ignore":
raise KeyError(f"labels {not_found} not found in level")
mask = ~algos.isin(self.codes[i], values)
return self[mask]
def swaplevel(self, i=-2, j=-1) -> MultiIndex:
"""
Swap level i with level j.
Calling this method does not change the ordering of the values.
Parameters
----------
i : int, str, default -2
First level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
j : int, str, default -1
Second level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
Returns
-------
MultiIndex
A new MultiIndex.
See Also
--------
Series.swaplevel : Swap levels i and j in a MultiIndex.
DataFrame.swaplevel : Swap levels i and j in a MultiIndex on a
particular axis.
Examples
--------
>>> mi = pd.MultiIndex(
... levels=[["a", "b"], ["bb", "aa"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
... )
>>> mi
MultiIndex([('a', 'bb'),
('a', 'aa'),
('b', 'bb'),
('b', 'aa')],
)
>>> mi.swaplevel(0, 1)
MultiIndex([('bb', 'a'),
('aa', 'a'),
('bb', 'b'),
('aa', 'b')],
)
"""
new_levels = list(self.levels)
new_codes = list(self.codes)
new_names = list(self.names)
i = self._get_level_number(i)
j = self._get_level_number(j)
new_levels[i], new_levels[j] = new_levels[j], new_levels[i]
new_codes[i], new_codes[j] = new_codes[j], new_codes[i]
new_names[i], new_names[j] = new_names[j], new_names[i]
return MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
def reorder_levels(self, order) -> MultiIndex:
"""
Rearrange levels using input order. May not drop or duplicate levels.
`reorder_levels` is useful when you need to change the order of levels in
a MultiIndex, such as when reordering levels for hierarchical indexing. It
maintains the integrity of the MultiIndex, ensuring that all existing levels
are present and no levels are duplicated. This method is helpful for aligning
the index structure with other data structures or for optimizing the order
for specific data operations.
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
Returns
-------
MultiIndex
A new MultiIndex with levels rearranged according to the specified order.
See Also
--------
MultiIndex.swaplevel : Swap two levels of the MultiIndex.
MultiIndex.set_names : Set names for the MultiIndex levels.
DataFrame.reorder_levels : Reorder levels in a DataFrame with a MultiIndex.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([[1, 2], [3, 4]], names=["x", "y"])
>>> mi
MultiIndex([(1, 3),
(2, 4)],
names=['x', 'y'])
>>> mi.reorder_levels(order=[1, 0])
MultiIndex([(3, 1),
(4, 2)],
names=['y', 'x'])
>>> mi.reorder_levels(order=["y", "x"])
MultiIndex([(3, 1),
(4, 2)],
names=['y', 'x'])
"""
order = [self._get_level_number(i) for i in order]
result = self._reorder_ilevels(order)
return result
def _reorder_ilevels(self, order) -> MultiIndex:
if len(order) != self.nlevels:
raise AssertionError(
f"Length of order must be same as number of levels ({self.nlevels}), "
f"got {len(order)}"
)
new_levels = [self.levels[i] for i in order]
new_codes = [self.codes[i] for i in order]
new_names = [self.names[i] for i in order]
return MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
def _recode_for_new_levels(
self, new_levels, *, copy: bool
) -> Generator[np.ndarray]:
if len(new_levels) > self.nlevels:
raise AssertionError(
f"Length of new_levels ({len(new_levels)}) "
f"must be <= self.nlevels ({self.nlevels})"
)
for i in range(len(new_levels)):
yield recode_for_categories(
self.codes[i], self.levels[i], new_levels[i], copy=copy
)
def _get_codes_for_sorting(self) -> list[Categorical]:
"""
we are categorizing our codes by using the
available categories (all, not just observed)
excluding any missing ones (-1); this is in preparation
for sorting, where we need to disambiguate that -1 is not
a valid valid
"""
def cats(level_codes: np.ndarray) -> np.ndarray:
return np.arange(
level_codes.max() + 1 if len(level_codes) else 0,
dtype=level_codes.dtype,
)
return [
Categorical.from_codes(level_codes, cats(level_codes), True, validate=False)
for level_codes in self.codes
]
def sortlevel(
self,
level: IndexLabel = 0,
ascending: bool | list[bool] = True,
sort_remaining: bool = True,
na_position: str = "first",
) -> tuple[MultiIndex, npt.NDArray[np.intp]]:
"""
Sort MultiIndex at the requested level.
This method is useful when dealing with MultiIndex objects, allowing for
sorting at a specific level of the index. The function preserves the
relative ordering of data within the same level while sorting
the overall MultiIndex. The method provides flexibility with the `ascending`
parameter to define the sort order and with the `sort_remaining` parameter to
control whether the remaining levels should also be sorted. Sorting a
MultiIndex can be crucial when performing operations that require ordered
indices, such as grouping or merging datasets. The `na_position` argument is
important in handling missing values consistently across different levels.
Parameters
----------
level : list-like, int or str, default 0
If a string is given, must be a name of the level.
If list-like must be names or ints of levels.
ascending : bool, default True
False to sort in descending order.
Can also be a list to specify a directed ordering.
sort_remaining : bool, default True
If True, sorts by the remaining levels after sorting by the specified
`level`.
na_position : {'first' or 'last'}, default 'first'
Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at
the end.
.. versionadded:: 2.1.0
Returns
-------
sorted_index : pd.MultiIndex
Resulting index.
indexer : np.ndarray[np.intp]
Indices of output values in original index.
See Also
--------
MultiIndex : A multi-level, or hierarchical, index object for pandas objects.
Index.sort_values : Sort Index values.
DataFrame.sort_index : Sort DataFrame by the index.
Series.sort_index : Sort Series by the index.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([[0, 0], [2, 1]])
>>> mi
MultiIndex([(0, 2),
(0, 1)],
)
>>> mi.sortlevel()
(MultiIndex([(0, 1),
(0, 2)],
), array([1, 0]))
>>> mi.sortlevel(sort_remaining=False)
(MultiIndex([(0, 2),
(0, 1)],
), array([0, 1]))
>>> mi.sortlevel(1)
(MultiIndex([(0, 1),
(0, 2)],
), array([1, 0]))
>>> mi.sortlevel(1, ascending=False)
(MultiIndex([(0, 2),
(0, 1)],
), array([0, 1]))
"""
if not is_list_like(level):
level = [level]
# error: Item "Hashable" of "Union[Hashable, Sequence[Hashable]]" has
# no attribute "__iter__" (not iterable)
level = [
self._get_level_number(lev)
for lev in level # type: ignore[union-attr]
]
sortorder = None
codes = [self.codes[lev] for lev in level]
# we have a directed ordering via ascending
if isinstance(ascending, list):
if not len(level) == len(ascending):
raise ValueError("level must have same length as ascending")
elif sort_remaining:
codes.extend(
[self.codes[lev] for lev in range(len(self.levels)) if lev not in level]
)
else:
sortorder = level[0]
indexer = lexsort_indexer(
codes, orders=ascending, na_position=na_position, codes_given=True
)
indexer = ensure_platform_int(indexer)
new_codes = [level_codes.take(indexer) for level_codes in self.codes]
new_index = MultiIndex(
codes=new_codes,
levels=self.levels,
names=self.names,
sortorder=sortorder,
verify_integrity=False,
)
return new_index, indexer
def _wrap_reindex_result(self, target, indexer, preserve_names: bool):
if not isinstance(target, MultiIndex):
if indexer is None:
target = self
elif (indexer >= 0).all():
target = self.take(indexer)
else:
try:
target = MultiIndex.from_tuples(target)
except TypeError:
# not all tuples, see test_constructor_dict_multiindex_reindex_flat
return target
target = self._maybe_preserve_names(target, preserve_names)
return target
def _maybe_preserve_names(self, target: IndexT, preserve_names: bool) -> IndexT:
if (
preserve_names
and target.nlevels == self.nlevels
and target.names != self.names
):
target = target.copy(deep=False)
target.names = self.names
return target
# --------------------------------------------------------------------
# Indexing Methods
def _check_indexing_error(self, key) -> None:
if not is_hashable(key) or is_iterator(key):
# We allow tuples if they are hashable, whereas other Index
# subclasses require scalar.
# We have to explicitly exclude generators, as these are hashable.
raise InvalidIndexError(key)
@cache_readonly
def _should_fallback_to_positional(self) -> bool:
"""
Should integer key(s) be treated as positional?
"""
# GH#33355
return self.levels[0]._should_fallback_to_positional
def _get_indexer_strict(
self, key, axis_name: str
) -> tuple[Index, npt.NDArray[np.intp]]:
keyarr = key
if not isinstance(keyarr, Index):
keyarr = com.asarray_tuplesafe(keyarr)
if len(keyarr) and not isinstance(keyarr[0], tuple):
indexer = self._get_indexer_level_0(keyarr)
self._raise_if_missing(key, indexer, axis_name)
return self[indexer], indexer
return super()._get_indexer_strict(key, axis_name)
def _raise_if_missing(self, key, indexer, axis_name: str) -> None:
keyarr = key
if not isinstance(key, Index):
keyarr = com.asarray_tuplesafe(key)
if len(keyarr) and not isinstance(keyarr[0], tuple):
# i.e. same condition for special case in MultiIndex._get_indexer_strict
mask = indexer == -1
if mask.any():
check = self.levels[0].get_indexer(keyarr)
cmask = check == -1
if cmask.any():
raise KeyError(f"{keyarr[cmask]} not in index")
# We get here when levels still contain values which are not
# actually in Index anymore
raise KeyError(f"{keyarr} not in index")
else:
return super()._raise_if_missing(key, indexer, axis_name)
def _get_indexer_level_0(self, target) -> npt.NDArray[np.intp]:
"""
Optimized equivalent to `self.get_level_values(0).get_indexer_for(target)`.
"""
lev = self.levels[0]
codes = self._codes[0]
cat = Categorical.from_codes(codes=codes, categories=lev, validate=False)
ci = Index(cat)
return ci.get_indexer_for(target)
def get_slice_bound(
self,
label: Hashable | Sequence[Hashable],
side: Literal["left", "right"],
) -> int:
"""
For an ordered MultiIndex, compute slice bound
that corresponds to given label.
Returns leftmost (one-past-the-rightmost if `side=='right') position
of given label.
Parameters
----------
label : object or tuple of objects
side : {'left', 'right'}
Returns
-------
int
Index of label.
Notes
-----
This method only works if level 0 index of the MultiIndex is lexsorted.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list("abbc"), list("gefd")])
Get the locations from the leftmost 'b' in the first level
until the end of the multiindex:
>>> mi.get_slice_bound("b", side="left")
1
Like above, but if you get the locations from the rightmost
'b' in the first level and 'f' in the second level:
>>> mi.get_slice_bound(("b", "f"), side="right")
3
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
if not isinstance(label, tuple):
label = (label,)
return self._partial_tup_index(label, side=side)
def slice_locs(self, start=None, end=None, step=None) -> tuple[int, int]:
"""
For an ordered MultiIndex, compute the slice locations for input
labels.
The input labels can be tuples representing partial levels, e.g. for a
MultiIndex with 3 levels, you can pass a single value (corresponding to
the first level), or a 1-, 2-, or 3-tuple.
Parameters
----------
start : label or tuple, default None
If None, defaults to the beginning
end : label or tuple
If None, defaults to the end
step : int or None
Slice step
Returns
-------
(start, end) : (int, int)
Notes
-----
This method only works if the MultiIndex is properly lexsorted. So,
if only the first 2 levels of a 3-level MultiIndex are lexsorted,
you can only pass two levels to ``.slice_locs``.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays(
... [list("abbd"), list("deff")], names=["A", "B"]
... )
Get the slice locations from the beginning of 'b' in the first level
until the end of the multiindex:
>>> mi.slice_locs(start="b")
(1, 4)
Like above, but stop at the end of 'b' in the first level and 'f' in
the second level:
>>> mi.slice_locs(start="b", end=("b", "f"))
(1, 3)
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
# This function adds nothing to its parent implementation (the magic
# happens in get_slice_bound method), but it adds meaningful doc.
return super().slice_locs(start, end, step)
def _partial_tup_index(self, tup: tuple, side: Literal["left", "right"] = "left"):
if len(tup) > self._lexsort_depth:
raise UnsortedIndexError(
f"Key length ({len(tup)}) was greater than MultiIndex lexsort depth "
f"({self._lexsort_depth})"
)
n = len(tup)
start, end = 0, len(self)
zipped = zip(tup, self.levels, self.codes, strict=True)
for k, (lab, lev, level_codes) in enumerate(zipped):
section = level_codes[start:end]
loc: npt.NDArray[np.intp] | np.intp | int
if lab not in lev and not isna(lab):
# short circuit
try:
loc = algos.searchsorted(lev, lab, side=side)
except TypeError as err:
# non-comparable e.g. test_slice_locs_with_type_mismatch
raise TypeError(f"Level type mismatch: {lab}") from err
if not is_integer(loc):
# non-comparable level, e.g. test_groupby_example
raise TypeError(f"Level type mismatch: {lab}")
if side == "right" and loc >= 0:
loc -= 1
return start + algos.searchsorted(section, loc, side=side)
idx = self._get_loc_single_level_index(lev, lab)
if isinstance(idx, slice) and k < n - 1:
# Get start and end value from slice, necessary when a non-integer
# interval is given as input GH#37707
start = idx.start
end = idx.stop
elif k < n - 1:
# error: Incompatible types in assignment (expression has type
# "Union[ndarray[Any, dtype[signedinteger[Any]]]
end = start + algos.searchsorted( # type: ignore[assignment]
section, idx, side="right"
)
# error: Incompatible types in assignment (expression has type
# "Union[ndarray[Any, dtype[signedinteger[Any]]]
start = start + algos.searchsorted( # type: ignore[assignment]
section, idx, side="left"
)
elif isinstance(idx, slice):
idx = idx.start
return start + algos.searchsorted(section, idx, side=side)
else:
return start + algos.searchsorted(section, idx, side=side)
def _get_loc_single_level_index(self, level_index: Index, key: Hashable) -> int:
"""
If key is NA value, location of index unify as -1.
Parameters
----------
level_index: Index
key : label
Returns
-------
loc : int
If key is NA value, loc is -1
Else, location of key in index.
See Also
--------
Index.get_loc : The get_loc method for (single-level) index.
"""
if is_scalar(key) and isna(key):
# TODO: need is_valid_na_for_dtype(key, level_index.dtype)
return -1
else:
return level_index.get_loc(key)
def get_loc(self, key):
"""
Get location for a label or a tuple of labels. The location is returned \
as an integer/slice or boolean mask.
This method returns the integer location, slice object, or boolean mask
corresponding to the specified key, which can be a single label or a tuple
of labels. The key represents a position in the MultiIndex, and the location
indicates where the key is found within the index.
Parameters
----------
key : label or tuple of labels (one for each level)
A label or tuple of labels that correspond to the levels of the MultiIndex.
The key must match the structure of the MultiIndex.
Returns
-------
int, slice object or boolean mask
If the key is past the lexsort depth, the return may be a
boolean mask array, otherwise it is always a slice or int.
See Also
--------
Index.get_loc : The get_loc method for (single-level) index.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
Notes
-----
The key cannot be a slice, list of same-level labels, a boolean mask,
or a sequence of such. If you want to use those, use
:meth:`MultiIndex.get_locs` instead.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list("abb"), list("def")])
>>> mi.get_loc("b")
slice(1, 3, None)
>>> mi.get_loc(("b", "e"))
1
"""
self._check_indexing_error(key)
def _maybe_to_slice(loc):
"""convert integer indexer to boolean mask or slice if possible"""
if not isinstance(loc, np.ndarray) or loc.dtype != np.intp:
return loc
loc = lib.maybe_indices_to_slice(loc, len(self))
if isinstance(loc, slice):
return loc
mask = np.empty(len(self), dtype="bool")
mask.fill(False)
mask[loc] = True
return mask
if not isinstance(key, tuple):
loc = self._get_level_indexer(key, level=0)
return _maybe_to_slice(loc)
keylen = len(key)
if self.nlevels < keylen:
raise KeyError(
f"Key length ({keylen}) exceeds index depth ({self.nlevels})"
)
if keylen == self.nlevels and self.is_unique:
# TODO: what if we have an IntervalIndex level?
# i.e. do we need _index_as_unique on that level?
try:
return self._engine.get_loc(key)
except KeyError as err:
raise KeyError(key) from err
except TypeError:
# e.g. test_partial_slicing_with_multiindex partial string slicing
loc, _ = self.get_loc_level(key, range(self.nlevels))
return loc
# -- partial selection or non-unique index
# break the key into 2 parts based on the lexsort_depth of the index;
# the first part returns a continuous slice of the index; the 2nd part
# needs linear search within the slice
i = self._lexsort_depth
lead_key, follow_key = key[:i], key[i:]
if not lead_key:
start = 0
stop = len(self)
else:
try:
start, stop = self.slice_locs(lead_key, lead_key)
except TypeError as err:
# e.g. test_groupby_example key = ((0, 0, 1, 2), "new_col")
# when self has 5 integer levels
raise KeyError(key) from err
if start == stop:
raise KeyError(key)
if not follow_key:
return slice(start, stop)
if get_option("performance_warnings"):
warnings.warn(
"indexing past lexsort depth may impact performance.",
PerformanceWarning,
stacklevel=find_stack_level(),
)
loc = np.arange(start, stop, dtype=np.intp)
for i, k in enumerate(follow_key, len(lead_key)):
mask = self.codes[i][loc] == self._get_loc_single_level_index(
self.levels[i], k
)
if not mask.all():
loc = loc[mask]
if not len(loc):
raise KeyError(key)
return _maybe_to_slice(loc) if len(loc) != stop - start else slice(start, stop)
def get_loc_level(self, key, level: IndexLabel = 0, drop_level: bool = True):
"""
Get location and sliced index for requested label(s)/level(s).
The `get_loc_level` method is a more advanced form of `get_loc`, allowing
users to specify not just a label or sequence of labels, but also the level(s)
in which to search. This method is useful when you need to isolate particular
sections of a MultiIndex, either for further analysis or for slicing and
dicing the data. The method provides flexibility in terms of maintaining
or dropping levels from the resulting index based on the `drop_level`
parameter.
Parameters
----------
key : label or sequence of labels
The label(s) for which to get the location.
level : int/level name or list thereof, optional
The level(s) in the MultiIndex to consider. If not provided, defaults
to the first level.
drop_level : bool, default True
If ``False``, the resulting index will not drop any level.
Returns
-------
tuple
A 2-tuple where the elements :
Element 0: int, slice object or boolean array.
Element 1: The resulting sliced multiindex/index. If the key
contains all levels, this will be ``None``.
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list("abb"), list("def")], names=["A", "B"])
>>> mi.get_loc_level("b")
(slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B'))
>>> mi.get_loc_level("e", level="B")
(array([False, True, False]), Index(['b'], dtype='object', name='A'))
>>> mi.get_loc_level(["b", "e"])
(1, None)
"""
if not isinstance(level, (range, list, tuple)):
level = self._get_level_number(level)
else:
level = [self._get_level_number(lev) for lev in level]
loc, mi = self._get_loc_level(key, level=level)
if not drop_level:
if lib.is_integer(loc):
# Slice index must be an integer or None
mi = self[loc : loc + 1]
else:
mi = self[loc]
return loc, mi
def _get_loc_level(self, key, level: int | list[int] = 0):
"""
get_loc_level but with `level` known to be positional, not name-based.
"""
# different name to distinguish from maybe_droplevels
def maybe_mi_droplevels(indexer, levels):
"""
If level does not exist or all levels were dropped, the exception
has to be handled outside.
"""
new_index = self[indexer]
for i in sorted(levels, reverse=True):
new_index = new_index._drop_level_numbers([i])
return new_index
if isinstance(level, (tuple, list)):
if len(key) != len(level):
raise AssertionError(
"Key for location must have same length as number of levels"
)
result = None
for lev, k in zip(level, key, strict=True):
loc, new_index = self._get_loc_level(k, level=lev)
if isinstance(loc, slice):
mask = np.zeros(len(self), dtype=bool)
mask[loc] = True
loc = mask
result = loc if result is None else result & loc
try:
# FIXME: we should be only dropping levels on which we are
# scalar-indexing
mi = maybe_mi_droplevels(result, level)
except ValueError:
# droplevel failed because we tried to drop all levels,
# i.e. len(level) == self.nlevels
mi = self[result]
return result, mi
# kludge for #1796
if isinstance(key, list):
key = tuple(key)
if isinstance(key, tuple) and level == 0:
try:
# Check if this tuple is a single key in our first level
if key in self.levels[0]:
indexer = self._get_level_indexer(key, level=level)
new_index = maybe_mi_droplevels(indexer, [0])
return indexer, new_index
except (TypeError, InvalidIndexError):
pass
if not any(isinstance(k, slice) for k in key):
if len(key) == self.nlevels and self.is_unique:
# Complete key in unique index -> standard get_loc
try:
return (self._engine.get_loc(key), None)
except KeyError as err:
raise KeyError(key) from err
except TypeError:
# e.g. partial string indexing
# test_partial_string_timestamp_multiindex
pass
# partial selection
indexer = self.get_loc(key)
ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)]
if len(ilevels) == self.nlevels:
if is_integer(indexer):
# we are dropping all levels
return indexer, None
# TODO: in some cases we still need to drop some levels,
# e.g. test_multiindex_perf_warn
# test_partial_string_timestamp_multiindex
ilevels = [
i
for i in range(len(key))
if (
not isinstance(key[i], str)
or not self.levels[i]._supports_partial_string_indexing
)
and key[i] != slice(None, None)
]
if len(ilevels) == self.nlevels:
# TODO: why?
ilevels = []
return indexer, maybe_mi_droplevels(indexer, ilevels)
else:
indexer = None
for i, k in enumerate(key):
if not isinstance(k, slice):
loc_level = self._get_level_indexer(k, level=i)
if isinstance(loc_level, slice):
if com.is_null_slice(loc_level) or com.is_full_slice(
loc_level, len(self)
):
# everything
continue
# e.g. test_xs_IndexSlice_argument_not_implemented
k_index = np.zeros(len(self), dtype=bool)
k_index[loc_level] = True
else:
k_index = loc_level
elif com.is_null_slice(k):
# taking everything, does not affect `indexer` below
continue
else:
# FIXME: this message can be inaccurate, e.g.
# test_series_varied_multiindex_alignment
raise TypeError(f"Expected label or tuple of labels, got {key}")
if indexer is None:
indexer = k_index
else:
indexer &= k_index
if indexer is None:
indexer = slice(None, None)
ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)]
return indexer, maybe_mi_droplevels(indexer, ilevels)
else:
indexer = self._get_level_indexer(key, level=level)
if (
isinstance(key, str)
and self.levels[level]._supports_partial_string_indexing
):
# check to see if we did an exact lookup vs sliced
check = self.levels[level].get_loc(key)
if not is_integer(check):
# e.g. test_partial_string_timestamp_multiindex
return indexer, self[indexer]
try:
result_index = maybe_mi_droplevels(indexer, [level])
except ValueError:
result_index = self[indexer]
return indexer, result_index
def _get_level_indexer(
self, key, level: int = 0, indexer: npt.NDArray[np.bool_] | None = None
):
# `level` kwarg is _always_ positional, never name
# return a boolean array or slice showing where the key is
# in the totality of values
# if the indexer is provided, then use this
level_index = self.levels[level]
level_codes = self.codes[level]
def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes):
# Compute a bool indexer to identify the positions to take.
# If we have an existing indexer, we only need to examine the
# subset of positions where the existing indexer is True.
if indexer is not None:
# we only need to look at the subset of codes where the
# existing indexer equals True
codes = codes[indexer]
if step is None or step == 1:
new_indexer = (codes >= start) & (codes < stop)
else:
r = np.arange(start, stop, step, dtype=codes.dtype)
new_indexer = algos.isin(codes, r)
if indexer is None:
return new_indexer
indexer = indexer.copy()
indexer[indexer] = new_indexer
return indexer
if isinstance(key, slice):
# handle a slice, returning a slice if we can
# otherwise a boolean indexer
step = key.step
is_negative_step = step is not None and step < 0
try:
if key.start is not None:
start = level_index.get_loc(key.start)
elif is_negative_step:
start = len(level_index) - 1
else:
start = 0
if key.stop is not None:
stop = level_index.get_loc(key.stop)
elif is_negative_step:
stop = 0
elif isinstance(start, slice):
stop = len(level_index)
else:
stop = len(level_index) - 1
except KeyError:
# we have a partial slice (like looking up a partial date
# string)
start = stop = level_index.slice_indexer(key.start, key.stop, key.step)
step = start.step
if isinstance(start, slice) or isinstance(stop, slice):
# we have a slice for start and/or stop
# a partial date slicer on a DatetimeIndex generates a slice
# note that the stop ALREADY includes the stopped point (if
# it was a string sliced)
start = getattr(start, "start", start)
stop = getattr(stop, "stop", stop)
return convert_indexer(start, stop, step)
elif level > 0 or self._lexsort_depth == 0 or step is not None:
# need to have like semantics here to right
# searching as when we are using a slice
# so adjust the stop by 1 (so we include stop)
stop = (stop - 1) if is_negative_step else (stop + 1)
return convert_indexer(start, stop, step)
else:
# sorted, so can return slice object -> view
i = algos.searchsorted(level_codes, start, side="left")
j = algos.searchsorted(level_codes, stop, side="right")
return slice(i, j, step)
else:
idx = self._get_loc_single_level_index(level_index, key)
if level > 0 or self._lexsort_depth == 0:
# Desired level is not sorted
if isinstance(idx, slice):
# test_get_loc_partial_timestamp_multiindex
locs = (level_codes >= idx.start) & (level_codes < idx.stop)
return locs
locs = np.asarray(level_codes == idx, dtype=bool)
if not locs.any():
# The label is present in self.levels[level] but unused:
raise KeyError(key)
return locs
if isinstance(idx, slice):
# e.g. test_partial_string_timestamp_multiindex
start = algos.searchsorted(level_codes, idx.start, side="left")
# NB: "left" here bc of slice semantics
end = algos.searchsorted(level_codes, idx.stop, side="left")
else:
start = algos.searchsorted(level_codes, idx, side="left")
end = algos.searchsorted(level_codes, idx, side="right")
if start == end:
# The label is present in self.levels[level] but unused:
raise KeyError(key)
return slice(start, end)
def get_locs(self, seq) -> npt.NDArray[np.intp]:
"""
Get location for a sequence of labels.
Parameters
----------
seq : label, slice, list, mask or a sequence of such
You should use one of the above for each level.
If a level should not be used, set it to ``slice(None)``.
Returns
-------
numpy.ndarray
NumPy array of integers suitable for passing to iloc.
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list("abb"), list("def")])
>>> mi.get_locs("b") # doctest: +SKIP
array([1, 2], dtype=int64)
>>> mi.get_locs([slice(None), ["e", "f"]]) # doctest: +SKIP
array([1, 2], dtype=int64)
>>> mi.get_locs([[True, False, True], slice("e", "f")]) # doctest: +SKIP
array([2], dtype=int64)
"""
# must be lexsorted to at least as many levels
true_slices = [i for (i, s) in enumerate(com.is_true_slices(seq)) if s]
if true_slices and true_slices[-1] >= self._lexsort_depth:
raise UnsortedIndexError(
"MultiIndex slicing requires the index to be lexsorted: slicing "
f"on levels {true_slices}, lexsort depth {self._lexsort_depth}"
)
if any(x is Ellipsis for x in seq):
raise NotImplementedError(
"MultiIndex does not support indexing with Ellipsis"
)
n = len(self)
def _to_bool_indexer(indexer) -> npt.NDArray[np.bool_]:
if isinstance(indexer, slice):
new_indexer = np.zeros(n, dtype=np.bool_)
new_indexer[indexer] = True
return new_indexer
return indexer
# a bool indexer for the positions we want to take
indexer: npt.NDArray[np.bool_] | None = None
for i, k in enumerate(seq):
lvl_indexer: npt.NDArray[np.bool_] | slice | None = None
if com.is_bool_indexer(k):
if len(k) != n:
raise ValueError(
"cannot index with a boolean indexer that "
"is not the same length as the index"
)
if isinstance(k, (ABCSeries, Index)):
k = k._values
lvl_indexer = np.asarray(k)
if indexer is None:
lvl_indexer = lvl_indexer.copy()
elif is_list_like(k):
# a collection of labels to include from this level (these are or'd)
# GH#27591 check if this is a single tuple key in the level
try:
lvl_indexer = self._get_level_indexer(k, level=i, indexer=indexer)
except (InvalidIndexError, TypeError, KeyError) as err:
# InvalidIndexError e.g. non-hashable, fall back to treating
# this as a sequence of labels
# KeyError it can be ambiguous if this is a label or sequence
# of labels
# github.com/pandas-dev/pandas/issues/39424#issuecomment-871626708
for x in k:
if not is_hashable(x):
# e.g. slice
raise err
# GH 39424: Ignore not founds
# GH 42351: No longer ignore not founds & enforced in 2.0
# TODO: how to handle IntervalIndex level? (no test cases)
item_indexer = self._get_level_indexer(
x, level=i, indexer=indexer
)
if lvl_indexer is None:
lvl_indexer = _to_bool_indexer(item_indexer)
elif isinstance(item_indexer, slice):
lvl_indexer[item_indexer] = True # type: ignore[index]
else:
lvl_indexer |= item_indexer
if lvl_indexer is None:
# no matches we are done
# test_loc_getitem_duplicates_multiindex_empty_indexer
return np.array([], dtype=np.intp)
elif com.is_null_slice(k):
# empty slice
if indexer is None and i == len(seq) - 1:
return np.arange(n, dtype=np.intp)
continue
else:
# a slice or a single label
lvl_indexer = self._get_level_indexer(k, level=i, indexer=indexer)
# update indexer
lvl_indexer = _to_bool_indexer(lvl_indexer)
if indexer is None:
indexer = lvl_indexer
else:
indexer &= lvl_indexer
if not np.any(indexer) and np.any(lvl_indexer):
raise KeyError(seq)
# empty indexer
if indexer is None:
return np.array([], dtype=np.intp)
pos_indexer = indexer.nonzero()[0]
return self._reorder_indexer(seq, pos_indexer)
# --------------------------------------------------------------------
def _reorder_indexer(
self,
seq: tuple[Scalar | Iterable | AnyArrayLike, ...],
indexer: npt.NDArray[np.intp],
) -> npt.NDArray[np.intp]:
"""
Reorder an indexer of a MultiIndex (self) so that the labels are in the
same order as given in seq
Parameters
----------
seq : label/slice/list/mask or a sequence of such
indexer: a position indexer of self
Returns
-------
indexer : a sorted position indexer of self ordered as seq
"""
# check if sorting is necessary
need_sort = False
for i, k in enumerate(seq):
if com.is_null_slice(k) or com.is_bool_indexer(k) or is_scalar(k):
pass
elif is_list_like(k):
if len(k) <= 1: # type: ignore[arg-type]
pass
elif self._is_lexsorted():
# If the index is lexsorted and the list_like label
# in seq are sorted then we do not need to sort
k_codes = self.levels[i].get_indexer(k)
k_codes = k_codes[k_codes >= 0] # Filter absent keys
# True if the given codes are not ordered
need_sort = (k_codes[:-1] > k_codes[1:]).any()
else:
need_sort = True
elif isinstance(k, slice):
if self._is_lexsorted():
need_sort = k.step is not None and k.step < 0
else:
need_sort = True
else:
need_sort = True
if need_sort:
break
if not need_sort:
return indexer
n = len(self)
keys: tuple[np.ndarray, ...] = ()
# For each level of the sequence in seq, map the level codes with the
# order they appears in a list-like sequence
# This mapping is then use to reorder the indexer
for i, k in enumerate(seq):
if is_scalar(k):
# GH#34603 we want to treat a scalar the same as an all equal list
k = [k]
if com.is_bool_indexer(k):
new_order = np.arange(n)[indexer]
elif is_list_like(k):
# Generate a map with all level codes as sorted initially
if not isinstance(k, (np.ndarray, ExtensionArray, Index, ABCSeries)):
k = sanitize_array(k, None)
k = algos.unique(k)
key_order_map = np.ones(len(self.levels[i]), dtype=np.uint64) * len(
self.levels[i]
)
# Set order as given in the indexer list
level_indexer = self.levels[i].get_indexer(k)
level_indexer = level_indexer[level_indexer >= 0] # Filter absent keys
key_order_map[level_indexer] = np.arange(len(level_indexer))
new_order = key_order_map[self.codes[i][indexer]]
elif isinstance(k, slice) and k.step is not None and k.step < 0:
# flip order for negative step
new_order = np.arange(n - 1, -1, -1)[indexer]
elif isinstance(k, slice) and k.start is None and k.stop is None:
# slice(None) should not determine order GH#31330
new_order = np.ones((n,), dtype=np.intp)[indexer]
else:
# For all other case, use the same order as the level
new_order = np.arange(n)[indexer]
keys = (new_order,) + keys
# Find the reordering using lexsort on the keys mapping
ind = np.lexsort(keys)
return indexer[ind]
def truncate(self, before=None, after=None) -> MultiIndex:
"""
Slice index between two labels / tuples, return new MultiIndex.
Parameters
----------
before : label or tuple, can be partial. Default None
None defaults to start.
after : label or tuple, can be partial. Default None
None defaults to end.
Returns
-------
MultiIndex
The truncated MultiIndex.
See Also
--------
DataFrame.truncate : Truncate a DataFrame before and after some index values.
Series.truncate : Truncate a Series before and after some index values.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([["a", "b", "c"], ["x", "y", "z"]])
>>> mi
MultiIndex([('a', 'x'), ('b', 'y'), ('c', 'z')],
)
>>> mi.truncate(before="a", after="b")
MultiIndex([('a', 'x'), ('b', 'y')],
)
"""
if after and before and after < before:
raise ValueError("after < before")
i, j = self.levels[0].slice_locs(before, after)
left, right = self.slice_locs(before, after)
new_levels = list(self.levels)
new_levels[0] = new_levels[0][i:j]
new_codes = [level_codes[left:right] for level_codes in self.codes]
new_codes[0] = new_codes[0] - i
return MultiIndex(
levels=new_levels,
codes=new_codes,
names=self._names,
verify_integrity=False,
)
def equals(self, other: object) -> bool:
"""
Determines if two MultiIndex objects have the same labeling information
(the levels themselves do not necessarily have to be the same)
See Also
--------
equal_levels
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if len(self) != len(other):
return False
if not isinstance(other, MultiIndex):
# d-level MultiIndex can equal d-tuple Index
if not self._should_compare(other):
# object Index or Categorical[object] may contain tuples
return False
return array_equivalent(self._values, other._values)
if self.nlevels != other.nlevels:
return False
for i in range(self.nlevels):
self_codes = self.codes[i]
other_codes = other.codes[i]
self_mask = self_codes == -1
other_mask = other_codes == -1
if not np.array_equal(self_mask, other_mask):
return False
self_level = self.levels[i]
other_level = other.levels[i]
new_codes = recode_for_categories(
other_codes, other_level, self_level, copy=False
)
if not np.array_equal(self_codes, new_codes):
return False
if not self_level[:0].equals(other_level[:0]):
# e.g. Int64 != int64
return False
return True
def equal_levels(self, other: MultiIndex) -> bool:
"""
Return True if the levels of both MultiIndex objects are the same
"""
if self.nlevels != other.nlevels:
return False
for i in range(self.nlevels):
if not self.levels[i].equals(other.levels[i]):
return False
return True
# --------------------------------------------------------------------
# Set Methods
def _union(self, other, sort) -> MultiIndex:
other, result_names = self._convert_can_do_setop(other)
if other.has_duplicates:
# This is only necessary if other has dupes,
# otherwise difference is faster
result = super(MultiIndex, self.rename(result_names))._union(
other.rename(result_names), sort
)
if isinstance(result, MultiIndex):
return result
return MultiIndex.from_arrays(
zip(*result, strict=True), sortorder=None, names=result_names
)
else:
right_missing = other.difference(self, sort=False)
if len(right_missing):
result = self.append(right_missing)
else:
result = self._get_reconciled_name_object(other)
if sort is not False:
try:
result = result.sort_values()
except TypeError:
if sort is True:
raise
warnings.warn(
"The values in the array are unorderable. "
"Pass `sort=False` to suppress this warning.",
RuntimeWarning,
stacklevel=find_stack_level(),
)
return result
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
return is_object_dtype(dtype)
def _get_reconciled_name_object(self, other) -> MultiIndex:
"""
If the result of a set operation will be self,
return self, unless the names change, in which
case make a shallow copy of self.
"""
names = self._maybe_match_names(other)
if self.names != names:
return self.rename(names)
return self
def _maybe_match_names(self, other):
"""
Try to find common names to attach to the result of an operation between
a and b. Return a consensus list of names if they match at least partly
or list of None if they have completely different names.
"""
if len(self.names) != len(other.names):
return [None] * len(self.names)
names = []
for a_name, b_name in zip(self.names, other.names, strict=True):
if a_name == b_name:
names.append(a_name)
else:
# TODO: what if they both have np.nan for their names?
names.append(None)
return names
def _wrap_intersection_result(self, other, result) -> MultiIndex:
_, result_names = self._convert_can_do_setop(other)
return result.set_names(result_names)
def _wrap_difference_result(self, other, result: MultiIndex) -> MultiIndex:
_, result_names = self._convert_can_do_setop(other)
if len(result) == 0:
return result.remove_unused_levels().set_names(result_names)
else:
return result.set_names(result_names)
def _convert_can_do_setop(self, other):
result_names = self.names
if not isinstance(other, Index):
if len(other) == 0:
return self[:0], self.names
else:
msg = "other must be a MultiIndex or a list of tuples"
try:
other = MultiIndex.from_tuples(other, names=self.names)
except (ValueError, TypeError) as err:
# ValueError raised by tuples_to_object_array if we
# have non-object dtype
raise TypeError(msg) from err
else:
result_names = get_unanimous_names(self, other)
return other, result_names
# --------------------------------------------------------------------
@doc(Index.astype)
def astype(self, dtype, copy: bool = True):
dtype = pandas_dtype(dtype)
if isinstance(dtype, CategoricalDtype):
msg = "> 1 ndim Categorical are not supported at this time"
raise NotImplementedError(msg)
if not is_object_dtype(dtype):
raise TypeError(
"Setting a MultiIndex dtype to anything other than object "
"is not supported"
)
if copy is True:
return self._view()
return self
def _validate_fill_value(self, item):
if isinstance(item, MultiIndex):
# GH#43212
if item.nlevels != self.nlevels:
raise ValueError("Item must have length equal to number of levels.")
return item._values
elif not isinstance(item, tuple):
# Pad the key with empty strings if lower levels of the key
# aren't specified:
item = (item,) + ("",) * (self.nlevels - 1)
elif len(item) != self.nlevels:
raise ValueError("Item must have length equal to number of levels.")
return item
def putmask(self, mask, value: MultiIndex) -> MultiIndex:
"""
Return a new MultiIndex of the values set with the mask.
Parameters
----------
mask : array like
value : MultiIndex
Must either be the same length as self or length one
Returns
-------
MultiIndex
"""
mask, noop = validate_putmask(self, mask)
if noop:
return self.copy()
if len(mask) == len(value):
subset = value[mask].remove_unused_levels()
else:
subset = value.remove_unused_levels()
new_levels = []
new_codes = []
for i, (value_level, level, level_codes) in enumerate(
zip(subset.levels, self.levels, self.codes, strict=True)
):
new_level = level.union(value_level, sort=False)
value_codes = new_level.get_indexer_for(subset.get_level_values(i))
new_code = ensure_int64(level_codes)
new_code[mask] = value_codes
new_levels.append(new_level)
new_codes.append(new_code)
return MultiIndex(
levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False
)
def insert(self, loc: int, item) -> MultiIndex:
"""
Make new MultiIndex inserting new item at location
Parameters
----------
loc : int
item : tuple
Must be same length as number of levels in the MultiIndex
Returns
-------
new_index : Index
"""
item = self._validate_fill_value(item)
new_levels = []
new_codes = []
for k, level, level_codes in zip(item, self.levels, self.codes, strict=True):
if k not in level:
# have to insert into level
# must insert at end otherwise you have to recompute all the
# other codes
lev_loc = len(level)
level = level.insert(lev_loc, k)
if isna(level[lev_loc]): # GH 59003, 60388
lev_loc = -1
else:
lev_loc = level.get_loc(k)
new_levels.append(level)
new_codes.append(np.insert(ensure_int64(level_codes), loc, lev_loc))
return MultiIndex(
levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False
)
def delete(self, loc) -> MultiIndex:
"""
Make new index with passed location deleted
Returns
-------
new_index : MultiIndex
"""
new_codes = [np.delete(level_codes, loc) for level_codes in self.codes]
return MultiIndex(
levels=self.levels,
codes=new_codes,
names=self.names,
verify_integrity=False,
)
@doc(Index.isin)
def isin(self, values, level=None) -> npt.NDArray[np.bool_]:
if isinstance(values, Generator):
values = list(values)
if level is None:
if len(values) == 0:
return np.zeros((len(self),), dtype=np.bool_)
if not isinstance(values, MultiIndex):
values = MultiIndex.from_tuples(values)
return values.unique().get_indexer_for(self) != -1
else:
num = self._get_level_number(level)
levs = self.get_level_values(num)
if levs.size == 0:
return np.zeros(len(levs), dtype=np.bool_)
return levs.isin(values)
# error: Incompatible types in assignment (expression has type overloaded function,
# base class "Index" defined the type as "Callable[[Index, Any, bool], Any]")
rename = Index.set_names # type: ignore[assignment]
# ---------------------------------------------------------------
# Arithmetic/Numeric Methods - Disabled
__add__ = make_invalid_op("__add__")
__radd__ = make_invalid_op("__radd__")
__iadd__ = make_invalid_op("__iadd__")
__sub__ = make_invalid_op("__sub__")
__rsub__ = make_invalid_op("__rsub__")
__isub__ = make_invalid_op("__isub__")
__pow__ = make_invalid_op("__pow__")
__rpow__ = make_invalid_op("__rpow__")
__mul__ = make_invalid_op("__mul__")
__rmul__ = make_invalid_op("__rmul__")
__floordiv__ = make_invalid_op("__floordiv__")
__rfloordiv__ = make_invalid_op("__rfloordiv__")
__truediv__ = make_invalid_op("__truediv__")
__rtruediv__ = make_invalid_op("__rtruediv__")
__mod__ = make_invalid_op("__mod__")
__rmod__ = make_invalid_op("__rmod__")
__divmod__ = make_invalid_op("__divmod__")
__rdivmod__ = make_invalid_op("__rdivmod__")
# Unary methods disabled
__neg__ = make_invalid_op("__neg__")
__pos__ = make_invalid_op("__pos__")
__abs__ = make_invalid_op("__abs__")
__invert__ = make_invalid_op("__invert__")
def _lexsort_depth(codes: list[np.ndarray], nlevels: int) -> int:
"""Count depth (up to a maximum of `nlevels`) with which codes are lexsorted."""
int64_codes = [ensure_int64(level_codes) for level_codes in codes]
for k in range(nlevels, 0, -1):
if libalgos.is_lexsorted(int64_codes[:k]):
return k
return 0
def sparsify_labels(label_list, start: int = 0, sentinel: object = ""):
pivoted = list(zip(*label_list, strict=True))
k = len(label_list)
result = pivoted[: start + 1]
prev = pivoted[start]
for cur in pivoted[start + 1 :]:
sparse_cur = []
for i, (p, t) in enumerate(zip(prev, cur, strict=True)):
if i == k - 1:
sparse_cur.append(t)
result.append(sparse_cur) # type: ignore[arg-type]
break
if p == t:
sparse_cur.append(sentinel)
else:
sparse_cur.extend(cur[i:])
result.append(sparse_cur) # type: ignore[arg-type]
break
prev = cur
return list(zip(*result, strict=True))
def _get_na_rep(dtype: DtypeObj) -> str:
if isinstance(dtype, ExtensionDtype):
return f"{dtype.na_value}"
else:
dtype_type = dtype.type
return {np.datetime64: "NaT", np.timedelta64: "NaT"}.get(dtype_type, "NaN")
def maybe_droplevels(index: Index, key) -> Index:
"""
Attempt to drop level or levels from the given index.
Parameters
----------
index: Index
key : scalar or tuple
Returns
-------
Index
"""
# drop levels
original_index = index
if isinstance(key, tuple):
# Caller is responsible for ensuring the key is not an entry in the first
# level of the MultiIndex.
for _ in key:
try:
index = index._drop_level_numbers([0])
except ValueError:
# we have dropped too much, so back out
return original_index
else:
try:
index = index._drop_level_numbers([0])
except ValueError:
pass
return index
def _coerce_indexer_frozen(array_like, categories, copy: bool = False) -> np.ndarray:
"""
Coerce the array-like indexer to the smallest integer dtype that can encode all
of the given categories.
Parameters
----------
array_like : array-like
categories : array-like
copy : bool
Returns
-------
np.ndarray
Non-writeable.
"""
array_like = coerce_indexer_dtype(array_like, categories)
if copy:
array_like = array_like.copy()
array_like.flags.writeable = False
return array_like
def _require_listlike(level, arr, arrname: str):
"""
Ensure that level is either None or listlike, and arr is list-of-listlike.
"""
if level is not None and not is_list_like(level):
if not is_list_like(arr):
raise TypeError(f"{arrname} must be list-like")
if len(arr) > 0 and is_list_like(arr[0]):
raise TypeError(f"{arrname} must be list-like")
level = [level]
arr = [arr]
elif level is None or is_list_like(level):
if not is_list_like(arr) or not is_list_like(arr[0]):
raise TypeError(f"{arrname} must be list of lists-like")
return level, arr
def cartesian_product(X: list[np.ndarray]) -> list[np.ndarray]:
"""
Numpy version of itertools.product.
Sometimes faster (for large inputs)...
Parameters
----------
X : list-like of list-likes
Returns
-------
product : list of ndarrays
Examples
--------
>>> cartesian_product([list("ABC"), [1, 2]])
[array(['A', 'A', 'B', 'B', 'C', 'C'], dtype='<U1'), array([1, 2, 1, 2, 1, 2])]
See Also
--------
itertools.product : Cartesian product of input iterables. Equivalent to
nested for-loops.
"""
msg = "Input must be a list-like of list-likes"
if not is_list_like(X):
raise TypeError(msg)
for x in X:
if not is_list_like(x):
raise TypeError(msg)
if len(X) == 0:
return []
lenX = np.fromiter((len(x) for x in X), dtype=np.intp)
cumprodX = np.cumprod(lenX)
if np.any(cumprodX < 0):
raise ValueError("Product space too large to allocate arrays!")
a = np.roll(cumprodX, 1)
a[0] = 1
if cumprodX[-1] != 0:
b = cumprodX[-1] / cumprodX
else:
# if any factor is empty, the cartesian product is empty
b = np.zeros_like(cumprodX)
return [
np.tile(
np.repeat(x, b[i]),
np.prod(a[i]),
)
for i, x in enumerate(X)
]
|
MultiIndex
|
python
|
huggingface__transformers
|
src/transformers/models/vits/modeling_vits.py
|
{
"start": 37303,
"end": 44530
}
|
class ____(nn.Module):
"""Multi-headed attention with relative positional representation."""
def __init__(self, config: VitsConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.dropout = config.attention_dropout
self.window_size = config.window_size
self.head_dim = self.embed_dim // self.num_heads
self.scaling = self.head_dim**-0.5
if (self.head_dim * self.num_heads) != self.embed_dim:
raise ValueError(
f"hidden_size must be divisible by num_attention_heads (got `hidden_size`: {self.embed_dim}"
f" and `num_attention_heads`: {self.num_heads})."
)
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_bias)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_bias)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_bias)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_bias)
if self.window_size:
self.emb_rel_k = nn.Parameter(torch.randn(1, self.window_size * 2 + 1, self.head_dim) * self.scaling)
self.emb_rel_v = nn.Parameter(torch.randn(1, self.window_size * 2 + 1, self.head_dim) * self.scaling)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if self.window_size is not None:
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, src_len)
relative_logits = torch.matmul(query_states, key_relative_embeddings.transpose(-2, -1))
rel_pos_bias = self._relative_position_to_absolute_position(relative_logits)
attn_weights += rel_pos_bias
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
if self.window_size is not None:
value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, src_len)
relative_weights = self._absolute_position_to_relative_position(attn_probs)
rel_pos_bias = torch.matmul(relative_weights, value_relative_embeddings)
attn_output += rel_pos_bias
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned across GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped
def _get_relative_embeddings(self, relative_embeddings, length):
pad_length = max(length - (self.window_size + 1), 0)
if pad_length > 0:
relative_embeddings = nn.functional.pad(relative_embeddings, [0, 0, pad_length, pad_length, 0, 0])
slice_start_position = max((self.window_size + 1) - length, 0)
slice_end_position = slice_start_position + 2 * length - 1
return relative_embeddings[:, slice_start_position:slice_end_position]
def _relative_position_to_absolute_position(self, x):
batch_heads, length, _ = x.size()
# Concat columns of pad to shift from relative to absolute indexing.
x = nn.functional.pad(x, [0, 1, 0, 0, 0, 0])
# Concat extra elements so to add up to shape (len+1, 2*len-1).
x_flat = x.view([batch_heads, length * 2 * length])
x_flat = nn.functional.pad(x_flat, [0, length - 1, 0, 0])
# Reshape and slice out the padded elements.
x_final = x_flat.view([batch_heads, length + 1, 2 * length - 1])
x_final = x_final[:, :length, length - 1 :]
return x_final
def _absolute_position_to_relative_position(self, x):
batch_heads, length, _ = x.size()
# Pad along column
x = nn.functional.pad(x, [0, length - 1, 0, 0, 0, 0])
x_flat = x.view([batch_heads, length * (2 * length - 1)])
# Add 0's in the beginning that will skew the elements after reshape
x_flat = nn.functional.pad(x_flat, [length, 0, 0, 0])
x_final = x_flat.view([batch_heads, length, 2 * length])[:, :, 1:]
return x_final
|
VitsAttention
|
python
|
lepture__authlib
|
authlib/jose/errors.py
|
{
"start": 709,
"end": 958
}
|
class ____(JoseError):
error = "invalid_crit_header_parameter_name"
def __init__(self, name):
description = f"Invalid Header Parameter Name: {name}"
super().__init__(description=description)
|
InvalidCritHeaderParameterNameError
|
python
|
astropy__astropy
|
astropy/visualization/interval.py
|
{
"start": 6051,
"end": 6813
}
|
class ____(AsymmetricPercentileInterval):
"""
Interval based on a keeping a specified fraction of pixels.
Parameters
----------
percentile : float
The fraction of pixels to keep. The same fraction of pixels is
eliminated from both ends.
n_samples : int, optional
Maximum number of values to use. If this is specified, and there
are more values in the dataset as this, then values are randomly
sampled from the array (with replacement).
"""
def __init__(self, percentile, n_samples=None):
lower_percentile = (100 - percentile) * 0.5
upper_percentile = 100 - lower_percentile
super().__init__(lower_percentile, upper_percentile, n_samples=n_samples)
|
PercentileInterval
|
python
|
modin-project__modin
|
modin/pandas/base.py
|
{
"start": 6281,
"end": 161151
}
|
class ____(QueryCompilerCaster, ClassLogger):
"""
Implement most of the common code that exists in DataFrame/Series.
Since both objects share the same underlying representation, and the algorithms
are the same, we use this object to define the general behavior of those objects
and then use those objects to define the output type.
"""
# Pandas class that we pretend to be; usually it has the same name as our class
# but lives in "pandas" namespace.
_pandas_class = pandas.core.generic.NDFrame
_query_compiler: BaseQueryCompiler
_siblings: list[BasePandasDataset]
_extensions: EXTENSION_DICT_TYPE = EXTENSION_DICT_TYPE(dict)
_pinned: bool = False
@cached_property
def _is_dataframe(self) -> bool:
"""
Tell whether this is a dataframe.
Ideally, other methods of BasePandasDataset shouldn't care whether this
is a dataframe or a series, but sometimes we need to know. This method
is better than hasattr(self, "columns"), which for series will call
self.__getattr__("columns"), which requires materializing the index.
Returns
-------
bool : Whether this is a dataframe.
"""
return issubclass(self._pandas_class, pandas.DataFrame)
@abc.abstractmethod
def _create_or_update_from_compiler(
self, new_query_compiler: BaseQueryCompiler, inplace: bool = False
) -> Self | None:
"""
Return or update a ``DataFrame`` or ``Series`` with given `new_query_compiler`.
Parameters
----------
new_query_compiler : BaseQueryCompiler
QueryCompiler to use to manage the data.
inplace : bool, default: False
Whether or not to perform update or creation inplace.
Returns
-------
DataFrame, Series or None
None if update was done, ``DataFrame`` or ``Series`` otherwise.
"""
pass
def _add_sibling(self, sibling: BasePandasDataset) -> None:
"""
Add a DataFrame or Series object to the list of siblings.
Siblings are objects that share the same query compiler. This function is called
when a shallow copy is made.
Parameters
----------
sibling : BasePandasDataset
Dataset to add to siblings list.
"""
sibling._siblings = self._siblings + [self]
self._siblings += [sibling]
for sib in self._siblings:
sib._siblings += [sibling]
def _build_repr_df(
self, num_rows: int, num_cols: int
) -> pandas.DataFrame | pandas.Series:
"""
Build pandas DataFrame for string representation.
Parameters
----------
num_rows : int
Number of rows to show in string representation. If number of
rows in this dataset is greater than `num_rows` then half of
`num_rows` rows from the beginning and half of `num_rows` rows
from the end are shown.
num_cols : int
Number of columns to show in string representation. If number of
columns in this dataset is greater than `num_cols` then half of
`num_cols` columns from the beginning and half of `num_cols`
columns from the end are shown.
Returns
-------
pandas.DataFrame or pandas.Series
A pandas dataset with `num_rows` or fewer rows and `num_cols` or fewer columns.
"""
# Fast track for empty dataframe.
if len(self) == 0 or (
self._is_dataframe and self._query_compiler.get_axis_len(1) == 0
):
return pandas.DataFrame(
index=self.index,
columns=self.columns if self._is_dataframe else None,
)
row_indexer = _get_repr_axis_label_indexer(self.index, num_rows)
if self._is_dataframe:
indexer = row_indexer, _get_repr_axis_label_indexer(self.columns, num_cols)
else:
indexer = row_indexer
return self.iloc[indexer]._query_compiler.to_pandas()
def _update_inplace(self, new_query_compiler: BaseQueryCompiler) -> None:
"""
Update the current DataFrame inplace.
Parameters
----------
new_query_compiler : BaseQueryCompiler
The new QueryCompiler to use to manage the data.
"""
old_query_compiler = self._query_compiler
self._query_compiler = new_query_compiler
for sib in self._siblings:
sib._query_compiler = new_query_compiler
old_query_compiler.free()
def _validate_other(
self,
other,
axis,
dtype_check=False,
compare_index=False,
):
"""
Help to check validity of other in inter-df operations.
Parameters
----------
other : modin.pandas.BasePandasDataset
Another dataset to validate against `self`.
axis : {None, 0, 1}
Specifies axis along which to do validation. When `1` or `None`
is specified, validation is done along `index`, if `0` is specified
validation is done along `columns` of `other` frame.
dtype_check : bool, default: False
Validates that both frames have compatible dtypes.
compare_index : bool, default: False
Compare Index if True.
Returns
-------
BaseQueryCompiler or Any
Other frame if it is determined to be valid.
Raises
------
ValueError
If `other` is `Series` and its length is different from
length of `self` `axis`.
TypeError
If any validation checks fail.
"""
if isinstance(other, BasePandasDataset):
return other._query_compiler
if not is_list_like(other):
# We skip dtype checking if the other is a scalar. Note that pandas
# is_scalar can be misleading as it is False for almost all objects,
# even when those objects should be treated as scalars. See e.g.
# https://github.com/modin-project/modin/issues/5236. Therefore, we
# detect scalars by checking that `other` is neither a list-like nor
# another BasePandasDataset.
return other
axis = self._get_axis_number(axis) if axis is not None else 1
result = other
if axis == 0:
if len(other) != len(self._query_compiler.index):
raise ValueError(
f"Unable to coerce to Series, length must be {len(self._query_compiler.index)}: "
+ f"given {len(other)}"
)
else:
if len(other) != len(self._query_compiler.columns):
raise ValueError(
f"Unable to coerce to Series, length must be {len(self._query_compiler.columns)}: "
+ f"given {len(other)}"
)
if hasattr(other, "dtype"):
other_dtypes = [other.dtype] * len(other)
elif is_dict_like(other):
other_dtypes = [
other[label] if pandas.isna(other[label]) else type(other[label])
for label in self._get_axis(axis)
# The binary operation is applied for intersection of axis labels
# and dictionary keys. So filtering out extra keys.
if label in other
]
else:
other_dtypes = [x if pandas.isna(x) else type(x) for x in other]
if compare_index:
if not self.index.equals(other.index):
raise TypeError("Cannot perform operation with non-equal index")
# Do dtype checking.
if dtype_check:
self_dtypes = self._get_dtypes()
if is_dict_like(other):
# The binary operation is applied for the intersection of axis labels
# and dictionary keys. So filtering `self_dtypes` to match the `other`
# dictionary.
self_dtypes = [
dtype
for label, dtype in zip(self._get_axis(axis), self._get_dtypes())
if label in other
]
# TODO(https://github.com/modin-project/modin/issues/5239):
# this spuriously rejects other that is a list including some
# custom type that can be added to self's elements.
for self_dtype, other_dtype in zip(self_dtypes, other_dtypes):
if not (
(is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype))
or (is_numeric_dtype(self_dtype) and pandas.isna(other_dtype))
or (is_object_dtype(self_dtype) and is_object_dtype(other_dtype))
or (
lib.is_np_dtype(self_dtype, "mM")
and lib.is_np_dtype(self_dtype, "mM")
)
or is_dtype_equal(self_dtype, other_dtype)
):
raise TypeError("Cannot do operation with improper dtypes")
return result
def _validate_function(self, func, on_invalid=None) -> None:
"""
Check the validity of the function which is intended to be applied to the frame.
Parameters
----------
func : object
on_invalid : callable(str, cls), optional
Function to call in case invalid `func` is met, `on_invalid` takes an error
message and an exception type as arguments. If not specified raise an
appropriate exception.
**Note:** This parameter is a hack to concord with pandas error types.
"""
def error_raiser(msg, exception=Exception):
raise exception(msg)
if on_invalid is None:
on_invalid = error_raiser
if isinstance(func, dict):
[self._validate_function(fn, on_invalid) for fn in func.values()]
return
# We also could validate this, but it may be quite expensive for lazy-frames
# if not all(idx in self._get_axis(axis) for idx in func.keys()):
# error_raiser("Invalid dict keys", KeyError)
if not is_list_like(func):
func = [func]
for fn in func:
if isinstance(fn, str):
if not (hasattr(self, fn) or hasattr(np, fn)):
on_invalid(
f"'{fn}' is not a valid function for '{type(self).__name__}' object",
AttributeError,
)
elif not callable(fn):
on_invalid(
f"One of the passed functions has an invalid type: {type(fn)}: {fn}, "
+ "only callable or string is acceptable.",
TypeError,
)
def _binary_op(self, op, other, **kwargs) -> Self:
"""
Do binary operation between two datasets.
Parameters
----------
op : str
Name of binary operation.
other : modin.pandas.BasePandasDataset
Second operand of binary operation.
**kwargs : dict
Additional parameters to binary operation.
Returns
-------
modin.pandas.BasePandasDataset
Result of binary operation.
"""
# _axis indicates the operator will use the default axis
if kwargs.pop("_axis", None) is None:
if kwargs.get("axis", None) is not None:
kwargs["axis"] = axis = self._get_axis_number(kwargs.get("axis", None))
else:
kwargs["axis"] = axis = 1
else:
axis = 0
if kwargs.get("level", None) is not None:
# Broadcast is an internally used argument
kwargs.pop("broadcast", None)
return self._default_to_pandas(
getattr(self._pandas_class, op), other, **kwargs
)
other = self._validate_other(other, axis, dtype_check=True)
exclude_list = [
"__add__",
"__radd__",
"__and__",
"__rand__",
"__or__",
"__ror__",
"__xor__",
"__rxor__",
]
if op in exclude_list:
kwargs.pop("axis")
# Series logical operations take an additional fill_value argument that DF does not
series_specialize_list = [
"eq",
"ge",
"gt",
"le",
"lt",
"ne",
]
if not self._is_dataframe and op in series_specialize_list:
op = "series_" + op
new_query_compiler = getattr(self._query_compiler, op)(other, **kwargs)
return self._create_or_update_from_compiler(new_query_compiler)
def _default_to_pandas(self, op, *args, reason: str = None, **kwargs):
"""
Convert dataset to pandas type and call a pandas function on it.
Parameters
----------
op : str
Name of pandas function.
*args : list
Additional positional arguments to be passed to `op`.
reason : str, optional
**kwargs : dict
Additional keywords arguments to be passed to `op`.
Returns
-------
object
Result of operation.
"""
empty_self_str = "" if not self.empty else " for empty DataFrame"
self._query_compiler._maybe_warn_on_default(
message="`{}.{}`{}".format(
type(self).__name__,
op if isinstance(op, str) else op.__name__,
empty_self_str,
),
reason=reason,
)
args = try_cast_to_pandas(args)
kwargs = try_cast_to_pandas(kwargs)
pandas_obj = self._to_pandas()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
if callable(op):
result = op(pandas_obj, *args, **kwargs)
elif isinstance(op, str):
# The inner `getattr` is ensuring that we are treating this object (whether
# it is a DataFrame, Series, etc.) as a pandas object. The outer `getattr`
# will get the operation (`op`) from the pandas version of the class and run
# it on the object after we have converted it to pandas.
attr = getattr(self._pandas_class, op)
if isinstance(attr, property):
result = getattr(pandas_obj, op)
else:
result = attr(pandas_obj, *args, **kwargs)
else:
ErrorMessage.catch_bugs_and_request_email(
failure_condition=True,
extra_log="{} is an unsupported operation".format(op),
)
if isinstance(result, pandas.DataFrame):
from .dataframe import DataFrame
return DataFrame(result)
elif isinstance(result, pandas.Series):
from .series import Series
return Series(result)
# inplace
elif result is None:
return self._create_or_update_from_compiler(
getattr(pd, type(pandas_obj).__name__)(pandas_obj)._query_compiler,
inplace=True,
)
else:
try:
if (
isinstance(result, (list, tuple))
and len(result) == 2
and isinstance(result[0], pandas.DataFrame)
):
# Some operations split the DataFrame into two (e.g. align). We need to wrap
# both of the returned results
if isinstance(result[1], pandas.DataFrame):
second = self.__constructor__(result[1])
else:
second = result[1]
return self.__constructor__(result[0]), second
else:
return result
except TypeError:
return result
@classmethod
def _get_axis_number(cls, axis) -> int:
"""
Convert axis name or number to axis index.
Parameters
----------
axis : int, str or pandas._libs.lib.NoDefault
Axis name ('index' or 'columns') or number to be converted to axis index.
Returns
-------
int
0 or 1 - axis index in the array of axes stored in the dataframe.
"""
if axis is lib.no_default:
axis = None
return cls._pandas_class._get_axis_number(axis) if axis is not None else 0
@cached_property
def __constructor__(self) -> type[Self]:
"""
Construct DataFrame or Series object depending on self type.
Returns
-------
modin.pandas.BasePandasDataset
Constructed object.
"""
return type(self)
def abs(self) -> Self: # noqa: RT01, D200
"""
Return a `BasePandasDataset` with absolute numeric value of each element.
"""
self._validate_dtypes(numeric_only=True)
return self.__constructor__(query_compiler=self._query_compiler.abs())
def _set_index(self, new_index) -> None:
"""
Set the index for this DataFrame.
Parameters
----------
new_index : pandas.Index
The new index to set this.
"""
self._query_compiler.index = new_index
def _get_index(self) -> pandas.Index:
"""
Get the index for this DataFrame.
Returns
-------
pandas.Index
The union of all indexes across the partitions.
"""
return self._query_compiler.index
index: pandas.Index = property(_get_index, _set_index)
def _get_axis(self, axis) -> pandas.Index:
"""
Return index labels of the specified axis.
Parameters
----------
axis : {0, 1}
Axis to return labels on.
0 is for index, when 1 is for columns.
Returns
-------
pandas.Index
"""
return self.index if axis == 0 else self.columns
def add(
self, other, axis="columns", level=None, fill_value=None
) -> Self: # noqa: PR01, RT01, D200
"""
Return addition of `BasePandasDataset` and `other`, element-wise (binary operator `add`).
"""
return self._binary_op(
"add", other, axis=axis, level=level, fill_value=fill_value
)
def aggregate(
self, func=None, axis=0, *args, **kwargs
) -> DataFrame | Series | Scalar: # noqa: PR01, RT01, D200
"""
Aggregate using one or more operations over the specified axis.
"""
axis = self._get_axis_number(axis)
result = None
if axis == 0:
result = self._aggregate(func, _axis=axis, *args, **kwargs)
# TODO: handle case when axis == 1
if result is None:
kwargs.pop("is_transform", None)
return self.apply(func, axis=axis, args=args, **kwargs)
return result
agg: DataFrame | Series | Scalar = aggregate
def _aggregate(self, func, *args, **kwargs):
"""
Aggregate using one or more operations over index axis.
Parameters
----------
func : function, str, list or dict
Function to use for aggregating the data.
*args : list
Positional arguments to pass to func.
**kwargs : dict
Keyword arguments to pass to func.
Returns
-------
scalar or BasePandasDataset
See Also
--------
aggregate : Aggregate along any axis.
"""
_axis = kwargs.pop("_axis", 0)
kwargs.pop("_level", None)
if isinstance(func, str):
kwargs.pop("is_transform", None)
return self._string_function(func, *args, **kwargs)
# Dictionaries have complex behavior because they can be renamed here.
elif func is None or isinstance(func, dict):
return self._default_to_pandas("agg", func, *args, **kwargs)
kwargs.pop("is_transform", None)
return self.apply(func, axis=_axis, args=args, **kwargs)
def _string_function(self, func, *args, **kwargs):
"""
Execute a function identified by its string name.
Parameters
----------
func : str
Function name to call on `self`.
*args : list
Positional arguments to pass to func.
**kwargs : dict
Keyword arguments to pass to func.
Returns
-------
object
Function result.
"""
assert isinstance(func, str)
f = getattr(self, func, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
assert len(args) == 0
assert (
len([kwarg for kwarg in kwargs if kwarg not in ["axis", "_level"]]) == 0
)
return f
f = getattr(np, func, None)
if f is not None:
return self._default_to_pandas("agg", func, *args, **kwargs)
raise ValueError("{} is an unknown string function".format(func))
def _get_dtypes(self) -> list:
"""
Get dtypes as list.
Returns
-------
list
Either a one-element list that contains `dtype` if object denotes a Series
or a list that contains `dtypes` if object denotes a DataFrame.
"""
if hasattr(self, "dtype"):
return [self.dtype]
else:
return list(self.dtypes)
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=None,
fill_value=None,
method=lib.no_default,
limit=lib.no_default,
fill_axis=lib.no_default,
broadcast_axis=lib.no_default,
) -> tuple[Self, Self]: # noqa: PR01, RT01, D200
"""
Align two objects on their axes with the specified join method.
"""
if (
method is not lib.no_default
or limit is not lib.no_default
or fill_axis is not lib.no_default
):
warnings.warn(
"The 'method', 'limit', and 'fill_axis' keywords in "
+ f"{type(self).__name__}.align are deprecated and will be removed "
+ "in a future version. Call fillna directly on the returned objects "
+ "instead.",
FutureWarning,
)
if fill_axis is lib.no_default:
fill_axis = 0
if method is lib.no_default:
method = None
if limit is lib.no_default:
limit = None
if broadcast_axis is not lib.no_default:
msg = (
f"The 'broadcast_axis' keyword in {type(self).__name__}.align is "
+ "deprecated and will be removed in a future version."
)
if broadcast_axis is not None:
if self.ndim == 1 and other.ndim == 2:
msg += (
" Use left = DataFrame({col: left for col in right.columns}, "
+ "index=right.index) before calling `left.align(right)` instead."
)
elif self.ndim == 2 and other.ndim == 1:
msg += (
" Use right = DataFrame({col: right for col in left.columns}, "
+ "index=left.index) before calling `left.align(right)` instead"
)
warnings.warn(msg, FutureWarning)
else:
broadcast_axis = None
left, right = self._query_compiler.align(
other._query_compiler,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis,
)
return self.__constructor__(query_compiler=left), self.__constructor__(
query_compiler=right
)
@abc.abstractmethod
def _reduce_dimension(self, query_compiler: BaseQueryCompiler) -> Series | Scalar:
"""
Reduce the dimension of data from the `query_compiler`.
Parameters
----------
query_compiler : BaseQueryCompiler
Query compiler to retrieve the data.
Returns
-------
Series | Scalar
"""
pass
def all(
self, axis=0, bool_only=False, skipna=True, **kwargs
) -> Self: # noqa: PR01, RT01, D200
"""
Return whether all elements are True, potentially over an axis.
"""
validate_bool_kwarg(skipna, "skipna", none_allowed=False)
if axis is not None:
axis = self._get_axis_number(axis)
if bool_only and axis == 0:
if hasattr(self, "dtype"):
raise NotImplementedError(
"{}.{} does not implement numeric_only.".format(
type(self).__name__, "all"
)
)
data_for_compute = self[self.columns[self.dtypes == np.bool_]]
return data_for_compute.all(
axis=axis, bool_only=False, skipna=skipna, **kwargs
)
return self._reduce_dimension(
self._query_compiler.all(
axis=axis, bool_only=bool_only, skipna=skipna, **kwargs
)
)
else:
if bool_only:
raise ValueError("Axis must be 0 or 1 (got {})".format(axis))
# Reduce to a scalar if axis is None.
result = self._reduce_dimension(
# FIXME: Judging by pandas docs `**kwargs` serves only compatibility
# purpose and does not affect the result, we shouldn't pass them to the query compiler.
self._query_compiler.all(
axis=0,
bool_only=bool_only,
skipna=skipna,
**kwargs,
)
)
if isinstance(result, BasePandasDataset):
return result.all(
axis=axis, bool_only=bool_only, skipna=skipna, **kwargs
)
return result
def any(
self, *, axis=0, bool_only=False, skipna=True, **kwargs
) -> Self: # noqa: PR01, RT01, D200
"""
Return whether any element is True, potentially over an axis.
"""
validate_bool_kwarg(skipna, "skipna", none_allowed=False)
if axis is not None:
axis = self._get_axis_number(axis)
if bool_only and axis == 0:
if hasattr(self, "dtype"):
raise NotImplementedError(
"{}.{} does not implement numeric_only.".format(
type(self).__name__, "all"
)
)
data_for_compute = self[self.columns[self.dtypes == np.bool_]]
return data_for_compute.any(
axis=axis, bool_only=False, skipna=skipna, **kwargs
)
return self._reduce_dimension(
self._query_compiler.any(
axis=axis, bool_only=bool_only, skipna=skipna, **kwargs
)
)
else:
if bool_only:
raise ValueError("Axis must be 0 or 1 (got {})".format(axis))
# Reduce to a scalar if axis is None.
result = self._reduce_dimension(
self._query_compiler.any(
axis=0,
bool_only=bool_only,
skipna=skipna,
**kwargs,
)
)
if isinstance(result, BasePandasDataset):
return result.any(
axis=axis, bool_only=bool_only, skipna=skipna, **kwargs
)
return result
def apply(
self,
func,
axis,
raw,
result_type,
args,
**kwds,
) -> BaseQueryCompiler: # noqa: PR01, RT01, D200
"""
Apply a function along an axis of the `BasePandasDataset`.
"""
def error_raiser(msg, exception):
"""Convert passed exception to the same type as pandas do and raise it."""
# HACK: to concord with pandas error types by replacing all of the
# TypeErrors to the AssertionErrors
exception = exception if exception is not TypeError else AssertionError
raise exception(msg)
self._validate_function(func, on_invalid=error_raiser)
axis = self._get_axis_number(axis)
if isinstance(func, str):
# if axis != 1 function can be bounded to the Series, which doesn't
# support axis parameter
if axis == 1:
kwds["axis"] = axis
result = self._string_function(func, *args, **kwds)
if isinstance(result, BasePandasDataset):
return result._query_compiler
return result
elif isinstance(func, dict):
if self._query_compiler.get_axis_len(1) != len(set(self.columns)):
warnings.warn(
"duplicate column names not supported with apply().",
FutureWarning,
stacklevel=2,
)
query_compiler = self._query_compiler.apply(
func,
axis,
args=args,
raw=raw,
result_type=result_type,
**kwds,
)
return query_compiler
def asfreq(
self, freq, method=None, how=None, normalize=False, fill_value=None
) -> Self: # noqa: PR01, RT01, D200
"""
Convert time series to specified frequency.
"""
return self.__constructor__(
query_compiler=self._query_compiler.asfreq(
freq=freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
)
def asof(self, where, subset=None) -> Self: # noqa: PR01, RT01, D200
"""
Return the last row(s) without any NaNs before `where`.
"""
scalar = not is_list_like(where)
if isinstance(where, pandas.Index):
# Prevent accidental mutation of original:
where = where.copy()
else:
if scalar:
where = [where]
where = pandas.Index(where)
if subset is None:
data = self
else:
# Only relevant for DataFrames:
data = self[subset]
no_na_index = data.dropna().index
new_index = pandas.Index([no_na_index.asof(i) for i in where])
result = self.reindex(new_index)
result.index = where
if scalar:
# Need to return a Series:
result = result.squeeze()
return result
def astype(
self, dtype, copy=None, errors="raise"
) -> Self: # noqa: PR01, RT01, D200
"""
Cast a Modin object to a specified dtype `dtype`.
"""
if copy is None:
copy = True
# dtype can be a series, a dict, or a scalar. If it's series,
# convert it to a dict before passing it to the query compiler.
if isinstance(dtype, (pd.Series, pandas.Series)):
if not dtype.index.is_unique:
raise ValueError("cannot reindex on an axis with duplicate labels")
dtype = {column: dtype for column, dtype in dtype.items()}
# If we got a series or dict originally, dtype is a dict now. Its keys
# must be column names.
if isinstance(dtype, dict):
# avoid materializing columns in lazy mode. the query compiler
# will handle errors where dtype dict includes keys that are not
# in columns.
if (
not self._query_compiler.lazy_column_labels
and not set(dtype.keys()).issubset(set(self._query_compiler.columns))
and errors == "raise"
):
raise KeyError(
"Only a column name can be used for the key in "
+ "a dtype mappings argument."
)
if not copy:
# If the new types match the old ones, then copying can be avoided
if self._query_compiler.frame_has_materialized_dtypes:
frame_dtypes = self._query_compiler.dtypes
if isinstance(dtype, dict):
for col in dtype:
if dtype[col] != frame_dtypes[col]:
copy = True
break
else:
if not (frame_dtypes == dtype).all():
copy = True
else:
copy = True
if copy:
new_query_compiler = self._query_compiler.astype(dtype, errors=errors)
return self._create_or_update_from_compiler(new_query_compiler)
return self
@property
def at(self, axis=None) -> _LocIndexer: # noqa: PR01, RT01, D200
"""
Get a single value for a row/column label pair.
"""
from .indexing import _LocIndexer
return _LocIndexer(self)
def at_time(self, time, asof=False, axis=None) -> Self: # noqa: PR01, RT01, D200
"""
Select values at particular time of day (e.g., 9:30AM).
"""
if asof:
# pandas raises NotImplementedError for asof=True, so we do, too.
raise NotImplementedError("'asof' argument is not supported")
return self.between_time(
start_time=time, end_time=time, inclusive="both", axis=axis
)
@_inherit_docstrings(
pandas.DataFrame.between_time, apilink="pandas.DataFrame.between_time"
)
def between_time(
self,
start_time,
end_time,
inclusive="both",
axis=None,
) -> Self: # noqa: PR01, RT01, D200
return self._create_or_update_from_compiler(
self._query_compiler.between_time(
start_time=pandas.core.tools.times.to_time(start_time),
end_time=pandas.core.tools.times.to_time(end_time),
inclusive=inclusive,
axis=self._get_axis_number(axis),
)
)
def _deprecate_downcast(self, downcast, method_name: str): # noqa: GL08
if downcast is not lib.no_default:
warnings.warn(
f"The 'downcast' keyword in {method_name} is deprecated and "
+ "will be removed in a future version. Use "
+ "res.infer_objects(copy=False) to infer non-object dtype, or "
+ "pd.to_numeric with the 'downcast' keyword to downcast numeric "
+ "results.",
FutureWarning,
)
else:
downcast = None
return downcast
def bfill(
self,
*,
axis=None,
inplace=False,
limit=None,
limit_area=None,
downcast=lib.no_default,
) -> Self: # noqa: PR01, RT01, D200
"""
Synonym for `DataFrame.fillna` with ``method='bfill'``.
"""
if limit_area is not None:
return self._default_to_pandas(
"bfill",
reason="'limit_area' parameter isn't supported",
axis=axis,
inplace=inplace,
limit=limit,
limit_area=limit_area,
downcast=downcast,
)
downcast = self._deprecate_downcast(downcast, "bfill")
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", ".*fillna with 'method' is deprecated", category=FutureWarning
)
return self.fillna(
method="bfill",
axis=axis,
limit=limit,
downcast=downcast,
inplace=inplace,
)
def backfill(
self, *, axis=None, inplace=False, limit=None, downcast=lib.no_default
) -> Self: # noqa: PR01, RT01, D200
"""
Synonym for `DataFrame.bfill`.
"""
warnings.warn(
"DataFrame.backfill/Series.backfill is deprecated. Use DataFrame.bfill/Series.bfill instead",
FutureWarning,
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
return self.bfill(
axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
def bool(self) -> bool: # noqa: RT01, D200
"""
Return the bool of a single element `BasePandasDataset`.
"""
warnings.warn(
f"{type(self).__name__}.bool is now deprecated and will be removed "
+ "in future version of pandas",
FutureWarning,
)
shape = self.shape
if shape != (1,) and shape != (1, 1):
raise ValueError(
"""The PandasObject does not have exactly
1 element. Return the bool of a single
element PandasObject. The truth value is
ambiguous. Use a.empty, a.item(), a.any()
or a.all()."""
)
else:
return self._to_pandas().bool()
def clip(
self, lower=None, upper=None, *, axis=None, inplace=False, **kwargs
) -> Self: # noqa: PR01, RT01, D200
"""
Trim values at input threshold(s).
"""
# validate inputs
if axis is not None:
axis = self._get_axis_number(axis)
self._validate_dtypes(numeric_only=True)
inplace = validate_bool_kwarg(inplace, "inplace")
axis = numpy_compat.function.validate_clip_with_axis(axis, (), kwargs)
# any np.nan bounds are treated as None
if lower is not None and np.any(np.isnan(lower)):
lower = None
if upper is not None and np.any(np.isnan(upper)):
upper = None
if is_list_like(lower) or is_list_like(upper):
lower = self._validate_other(lower, axis)
upper = self._validate_other(upper, axis)
# FIXME: Judging by pandas docs `*args` and `**kwargs` serves only compatibility
# purpose and does not affect the result, we shouldn't pass them to the query compiler.
new_query_compiler = self._query_compiler.clip(
lower=lower, upper=upper, axis=axis, **kwargs
)
return self._create_or_update_from_compiler(new_query_compiler, inplace)
def combine(
self, other, func, fill_value=None, **kwargs
) -> Self: # noqa: PR01, RT01, D200
"""
Perform combination of `BasePandasDataset`-s according to `func`.
"""
return self._binary_op(
"combine", other, _axis=0, func=func, fill_value=fill_value, **kwargs
)
def combine_first(self, other) -> Self: # noqa: PR01, RT01, D200
"""
Update null elements with value in the same location in `other`.
"""
return self._binary_op("combine_first", other, _axis=0)
def copy(self, deep=True) -> Self: # noqa: PR01, RT01, D200
"""
Make a copy of the object's metadata.
"""
if deep:
return self.__constructor__(query_compiler=self._query_compiler.copy())
new_obj = self.__constructor__(query_compiler=self._query_compiler)
self._add_sibling(new_obj)
return new_obj
def count(
self, axis=0, numeric_only=False
) -> Series | Scalar: # noqa: PR01, RT01, D200
"""
Count non-NA cells for `BasePandasDataset`.
"""
axis = self._get_axis_number(axis)
# select_dtypes is only implemented on DataFrames, but the numeric_only
# flag will always be set to false by the Series frontend
frame = self.select_dtypes([np.number, np.bool_]) if numeric_only else self
return frame._reduce_dimension(
frame._query_compiler.count(axis=axis, numeric_only=numeric_only)
)
def cummax(
self, axis=None, skipna=True, *args, **kwargs
) -> Self: # noqa: PR01, RT01, D200
"""
Return cumulative maximum over a `BasePandasDataset` axis.
"""
axis = self._get_axis_number(axis)
if axis == 1:
self._validate_dtypes(numeric_only=True)
return self.__constructor__(
# FIXME: Judging by pandas docs `*args` and `**kwargs` serves only compatibility
# purpose and does not affect the result, we shouldn't pass them to the query compiler.
query_compiler=self._query_compiler.cummax(
fold_axis=axis, axis=axis, skipna=skipna, **kwargs
)
)
def cummin(
self, axis=None, skipna=True, *args, **kwargs
) -> Self: # noqa: PR01, RT01, D200
"""
Return cumulative minimum over a `BasePandasDataset` axis.
"""
axis = self._get_axis_number(axis)
if axis == 1:
self._validate_dtypes(numeric_only=True)
return self.__constructor__(
# FIXME: Judging by pandas docs `*args` and `**kwargs` serves only compatibility
# purpose and does not affect the result, we shouldn't pass them to the query compiler.
query_compiler=self._query_compiler.cummin(
fold_axis=axis, axis=axis, skipna=skipna, **kwargs
)
)
def cumprod(
self, axis=None, skipna=True, *args, **kwargs
) -> Self: # noqa: PR01, RT01, D200
"""
Return cumulative product over a `BasePandasDataset` axis.
"""
axis = self._get_axis_number(axis)
self._validate_dtypes(numeric_only=True)
return self.__constructor__(
# FIXME: Judging by pandas docs `**kwargs` serves only compatibility
# purpose and does not affect the result, we shouldn't pass them to the query compiler.
query_compiler=self._query_compiler.cumprod(
fold_axis=axis, axis=axis, skipna=skipna, **kwargs
)
)
def cumsum(
self, axis=None, skipna=True, *args, **kwargs
) -> Self: # noqa: PR01, RT01, D200
"""
Return cumulative sum over a `BasePandasDataset` axis.
"""
axis = self._get_axis_number(axis)
self._validate_dtypes(numeric_only=True)
return self.__constructor__(
# FIXME: Judging by pandas docs `*args` and `**kwargs` serves only compatibility
# purpose and does not affect the result, we shouldn't pass them to the query compiler.
query_compiler=self._query_compiler.cumsum(
fold_axis=axis, axis=axis, skipna=skipna, **kwargs
)
)
def describe(
self,
percentiles=None,
include=None,
exclude=None,
) -> Self: # noqa: PR01, RT01, D200
"""
Generate descriptive statistics.
"""
# copied from pandas.core.describe.describe_ndframe
percentiles = _refine_percentiles(percentiles)
data = self
if self._is_dataframe:
# include/exclude arguments are ignored for Series
if (include is None) and (exclude is None):
# when some numerics are found, keep only numerics
default_include: list[npt.DTypeLike] = [np.number]
default_include.append("datetime")
data = self.select_dtypes(include=default_include)
if len(data.columns) == 0:
data = self
elif include == "all":
if exclude is not None:
msg = "exclude must be None when include is 'all'"
raise ValueError(msg)
data = self
else:
data = self.select_dtypes(
include=include,
exclude=exclude,
)
if data.empty:
# Match pandas error from concatenting empty list of series descriptions.
raise ValueError("No objects to concatenate")
return self.__constructor__(
query_compiler=data._query_compiler.describe(percentiles=percentiles)
)
def diff(self, periods=1, axis=0) -> Self: # noqa: PR01, RT01, D200
"""
First discrete difference of element.
"""
# Attempting to match pandas error behavior here
if not isinstance(periods, int):
raise ValueError(f"periods must be an int. got {type(periods)} instead")
# Attempting to match pandas error behavior here
for dtype in self._get_dtypes():
if not (is_numeric_dtype(dtype) or lib.is_np_dtype(dtype, "mM")):
raise TypeError(f"unsupported operand type for -: got {dtype}")
axis = self._get_axis_number(axis)
return self.__constructor__(
query_compiler=self._query_compiler.diff(axis=axis, periods=periods)
)
def drop(
self,
labels=None,
*,
axis=0,
index=None,
columns=None,
level=None,
inplace=False,
errors="raise",
) -> Self: # noqa: PR01, RT01, D200
"""
Drop specified labels from `BasePandasDataset`.
"""
# TODO implement level
if level is not None:
return self._default_to_pandas(
"drop",
labels=labels,
axis=axis,
index=index,
columns=columns,
level=level,
inplace=inplace,
errors=errors,
)
inplace = validate_bool_kwarg(inplace, "inplace")
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
axis_name = pandas.DataFrame._get_axis_name(axis)
axes = {axis_name: labels}
elif index is not None or columns is not None:
axes = {"index": index}
if self.ndim == 2:
axes["columns"] = columns
else:
raise ValueError(
"Need to specify at least one of 'labels', 'index' or 'columns'"
)
for axis in ["index", "columns"]:
if axis not in axes:
axes[axis] = None
elif axes[axis] is not None:
if not is_list_like(axes[axis]):
axes[axis] = [axes[axis]]
# In case of lazy execution we should bypass these error checking components
# because they can force the materialization of the row or column labels.
if (axis == "index" and self._query_compiler.lazy_row_labels) or (
axis == "columns" and self._query_compiler.lazy_column_labels
):
continue
if errors == "raise":
non_existent = pandas.Index(axes[axis]).difference(
getattr(self, axis)
)
if len(non_existent):
raise KeyError(f"labels {non_existent} not contained in axis")
else:
axes[axis] = [
obj for obj in axes[axis] if obj in getattr(self, axis)
]
# If the length is zero, we will just do nothing
if not len(axes[axis]):
axes[axis] = None
new_query_compiler = self._query_compiler.drop(
index=axes["index"], columns=axes["columns"], errors=errors
)
return self._create_or_update_from_compiler(new_query_compiler, inplace)
def dropna(
self,
*,
axis: Axis = 0,
how: str | lib.NoDefault = lib.no_default,
thresh: int | lib.NoDefault = lib.no_default,
subset: IndexLabel = None,
inplace: bool = False,
ignore_index: bool = False,
) -> Self: # noqa: PR01, RT01, D200
"""
Remove missing values.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if is_list_like(axis):
raise TypeError("supplying multiple axes to axis is no longer supported.")
axis = self._get_axis_number(axis)
if how is not None and how not in ["any", "all", lib.no_default]:
raise ValueError("invalid how option: %s" % how)
if how is None and thresh is None:
raise TypeError("must specify how or thresh")
if subset is not None:
if axis == 1:
indices = self.index.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
else:
indices = self.columns.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
new_query_compiler = self._query_compiler.dropna(
axis=axis, how=how, thresh=thresh, subset=subset
)
if ignore_index:
new_query_compiler.index = pandas.RangeIndex(
stop=len(new_query_compiler.index)
)
return self._create_or_update_from_compiler(new_query_compiler, inplace)
def droplevel(self, level, axis=0) -> Self: # noqa: PR01, RT01, D200
"""
Return `BasePandasDataset` with requested index / column level(s) removed.
"""
axis = self._get_axis_number(axis)
result = self.copy()
if axis == 0:
index_columns = result.index.names.copy()
if is_integer(level):
level = index_columns[level]
elif is_list_like(level):
level = [
index_columns[lev] if is_integer(lev) else lev for lev in level
]
if is_list_like(level):
for lev in level:
index_columns.remove(lev)
else:
index_columns.remove(level)
if len(result.columns.names) > 1:
# In this case, we are dealing with a MultiIndex column, so we need to
# be careful when dropping the additional index column.
if is_list_like(level):
drop_labels = [(lev, "") for lev in level]
else:
drop_labels = [(level, "")]
result = result.reset_index().drop(columns=drop_labels)
else:
result = result.reset_index().drop(columns=level)
result = result.set_index(index_columns)
else:
result.columns = self.columns.droplevel(level)
return result
def drop_duplicates(
self, keep="first", inplace=False, **kwargs
) -> Self: # noqa: PR01, RT01, D200
"""
Return `BasePandasDataset` with duplicate rows removed.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
ignore_index = kwargs.get("ignore_index", False)
subset = kwargs.get("subset", None)
if subset is not None:
if is_list_like(subset):
if not isinstance(subset, list):
subset = list(subset)
else:
subset = [subset]
if len(diff := pandas.Index(subset).difference(self.columns)) > 0:
raise KeyError(diff)
result_qc = self._query_compiler.unique(
keep=keep, ignore_index=ignore_index, subset=subset
)
result = self.__constructor__(query_compiler=result_qc)
if inplace:
self._update_inplace(result._query_compiler)
else:
return result
def eq(self, other, axis="columns", level=None) -> Self: # noqa: PR01, RT01, D200
"""
Get equality of `BasePandasDataset` and `other`, element-wise (binary operator `eq`).
"""
return self._binary_op("eq", other, axis=axis, level=level, dtypes=np.bool_)
def explode(
self, column, ignore_index: bool = False
) -> Self: # noqa: PR01, RT01, D200
"""
Transform each element of a list-like to a row.
"""
exploded = self.__constructor__(
query_compiler=self._query_compiler.explode(column)
)
if ignore_index:
exploded = exploded.reset_index(drop=True)
return exploded
def ewm(
self,
com: float | None = None,
span: float | None = None,
halflife: float | TimedeltaConvertibleTypes | None = None,
alpha: float | None = None,
min_periods: int | None = 0,
adjust: bool = True,
ignore_na: bool = False,
axis: Axis = lib.no_default,
times: str | np.ndarray | BasePandasDataset | None = None,
method: str = "single",
) -> pandas.core.window.ewm.ExponentialMovingWindow: # noqa: PR01, RT01, D200
"""
Provide exponentially weighted (EW) calculations.
"""
return self._default_to_pandas(
"ewm",
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
times=times,
method=method,
)
def expanding(
self, min_periods=1, axis=lib.no_default, method="single"
) -> Expanding: # noqa: PR01, RT01, D200
"""
Provide expanding window calculations.
"""
from .window import Expanding
if axis is not lib.no_default:
axis = self._get_axis_number(axis)
name = "expanding"
if axis == 1:
warnings.warn(
f"Support for axis=1 in {type(self).__name__}.{name} is "
+ "deprecated and will be removed in a future version. "
+ f"Use obj.T.{name}(...) instead",
FutureWarning,
)
else:
warnings.warn(
f"The 'axis' keyword in {type(self).__name__}.{name} is "
+ "deprecated and will be removed in a future version. "
+ "Call the method without the axis keyword instead.",
FutureWarning,
)
else:
axis = 0
return Expanding(
self,
min_periods=min_periods,
axis=axis,
method=method,
)
def ffill(
self,
*,
axis=None,
inplace=False,
limit=None,
limit_area=None,
downcast=lib.no_default,
) -> Self | None: # noqa: PR01, RT01, D200
"""
Synonym for `DataFrame.fillna` with ``method='ffill'``.
"""
if limit_area is not None:
return self._default_to_pandas(
"ffill",
reason="'limit_area' parameter isn't supported",
axis=axis,
inplace=inplace,
limit=limit,
limit_area=limit_area,
downcast=downcast,
)
downcast = self._deprecate_downcast(downcast, "ffill")
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", ".*fillna with 'method' is deprecated", category=FutureWarning
)
return self.fillna(
method="ffill",
axis=axis,
limit=limit,
downcast=downcast,
inplace=inplace,
)
def pad(
self, *, axis=None, inplace=False, limit=None, downcast=lib.no_default
) -> Self | None: # noqa: PR01, RT01, D200
"""
Synonym for `DataFrame.ffill`.
"""
warnings.warn(
"DataFrame.pad/Series.pad is deprecated. Use DataFrame.ffill/Series.ffill instead",
FutureWarning,
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
return self.ffill(
axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
def fillna(
self,
squeeze_self,
squeeze_value,
value=None,
method=None,
axis=None,
inplace=False,
limit=None,
downcast=lib.no_default,
) -> Self | None:
"""
Fill NA/NaN values using the specified method.
Parameters
----------
squeeze_self : bool
If True then self contains a Series object, if False then self contains
a DataFrame object.
squeeze_value : bool
If True then value contains a Series object, if False then value contains
a DataFrame object.
value : scalar, dict, Series, or DataFrame, default: None
Value to use to fill holes (e.g. 0), alternately a
dict/Series/DataFrame of values specifying which value to use for
each index (for a Series) or column (for a DataFrame). Values not
in the dict/Series/DataFrame will not be filled. This value cannot
be a list.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default: None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use next valid observation to fill gap.
axis : {None, 0, 1}, default: None
Axis along which to fill missing values.
inplace : bool, default: False
If True, fill in-place. Note: this will modify any
other views on this object (e.g., a no-copy slice for a column in a
DataFrame).
limit : int, default: None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
downcast : dict, default: None
A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible).
Returns
-------
Series, DataFrame or None
Object with missing values filled or None if ``inplace=True``.
"""
if method is not None:
warnings.warn(
f"{type(self).__name__}.fillna with 'method' is deprecated and "
+ "will raise in a future version. Use obj.ffill() or obj.bfill() "
+ "instead.",
FutureWarning,
)
downcast = self._deprecate_downcast(downcast, "fillna")
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
if isinstance(value, (list, tuple)):
raise TypeError(
'"value" parameter must be a scalar or dict, but '
+ f'you passed a "{type(value).__name__}"'
)
if value is None and method is None:
raise ValueError("must specify a fill method or value")
if value is not None and method is not None:
raise ValueError("cannot specify both a fill method and value")
if method is not None and method not in ["backfill", "bfill", "pad", "ffill"]:
expecting = "pad (ffill) or backfill (bfill)"
msg = "Invalid fill method. Expecting {expecting}. Got {method}".format(
expecting=expecting, method=method
)
raise ValueError(msg)
if limit is not None:
if not isinstance(limit, int):
raise ValueError("Limit must be an integer")
elif limit <= 0:
raise ValueError("Limit must be greater than 0")
if isinstance(value, BasePandasDataset):
value = value._query_compiler
new_query_compiler = self._query_compiler.fillna(
squeeze_self=squeeze_self,
squeeze_value=squeeze_value,
value=value,
method=method,
axis=axis,
inplace=False,
limit=limit,
downcast=downcast,
)
return self._create_or_update_from_compiler(new_query_compiler, inplace)
def filter(
self, items=None, like=None, regex=None, axis=None
) -> Self: # noqa: PR01, RT01, D200
"""
Subset the `BasePandasDataset` rows or columns according to the specified index labels.
"""
nkw = count_not_none(items, like, regex)
if nkw > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` are mutually exclusive"
)
if nkw == 0:
raise TypeError("Must pass either `items`, `like`, or `regex`")
if axis is None:
axis = "columns" # This is the default info axis for dataframes
axis = self._get_axis_number(axis)
labels = self.columns if axis else self.index
if items is not None:
bool_arr = labels.isin(items)
elif like is not None:
def f(x):
return like in str(x)
bool_arr = labels.map(f).tolist()
else:
def f(x):
return matcher.search(str(x)) is not None
matcher = re.compile(regex)
bool_arr = labels.map(f).tolist()
if not axis:
return self[bool_arr]
return self[self.columns[bool_arr]]
def first(self, offset) -> Self | None: # noqa: PR01, RT01, D200
"""
Select initial periods of time series data based on a date offset.
"""
warnings.warn(
"first is deprecated and will be removed in a future version. "
+ "Please create a mask and filter using `.loc` instead",
FutureWarning,
)
return self._create_or_update_from_compiler(
self._query_compiler.first(offset=to_offset(offset))
)
def first_valid_index(self) -> int: # noqa: RT01, D200
"""
Return index for first non-NA value or None, if no non-NA value is found.
"""
return self._query_compiler.first_valid_index()
def floordiv(
self, other, axis="columns", level=None, fill_value=None
) -> Self: # noqa: PR01, RT01, D200
"""
Get integer division of `BasePandasDataset` and `other`, element-wise (binary operator `floordiv`).
"""
return self._binary_op(
"floordiv", other, axis=axis, level=level, fill_value=fill_value
)
def ge(self, other, axis="columns", level=None) -> Self: # noqa: PR01, RT01, D200
"""
Get greater than or equal comparison of `BasePandasDataset` and `other`, element-wise (binary operator `ge`).
"""
return self._binary_op("ge", other, axis=axis, level=level, dtypes=np.bool_)
def get(
self, key, default=None
) -> DataFrame | Series | Scalar: # noqa: PR01, RT01, D200
"""
Get item from object for given key.
"""
# Match pandas behavior here
try:
return self.__getitem__(key)
except (KeyError, ValueError, IndexError):
return default
def gt(self, other, axis="columns", level=None) -> Self: # noqa: PR01, RT01, D200
"""
Get greater than comparison of `BasePandasDataset` and `other`, element-wise (binary operator `gt`).
"""
return self._binary_op("gt", other, axis=axis, level=level, dtypes=np.bool_)
def head(self, n=5) -> Self: # noqa: PR01, RT01, D200
"""
Return the first `n` rows.
"""
return self.iloc[:n]
@property
def iat(self, axis=None) -> _iLocIndexer: # noqa: PR01, RT01, D200
"""
Get a single value for a row/column pair by integer position.
"""
from .indexing import _iLocIndexer
return _iLocIndexer(self)
def idxmax(
self, axis=0, skipna=True, numeric_only=False
) -> Self: # noqa: PR01, RT01, D200
"""
Return index of first occurrence of maximum over requested axis.
"""
axis = self._get_axis_number(axis)
return self._reduce_dimension(
self._query_compiler.idxmax(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
)
def idxmin(
self, axis=0, skipna=True, numeric_only=False
) -> Self: # noqa: PR01, RT01, D200
"""
Return index of first occurrence of minimum over requested axis.
"""
axis = self._get_axis_number(axis)
return self._reduce_dimension(
self._query_compiler.idxmin(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
)
def infer_objects(self, copy=None) -> Self: # noqa: PR01, RT01, D200
"""
Attempt to infer better dtypes for object columns.
"""
new_query_compiler = self._query_compiler.infer_objects()
return self._create_or_update_from_compiler(
new_query_compiler, inplace=False if copy is None else not copy
)
def convert_dtypes(
self,
infer_objects: bool = True,
convert_string: bool = True,
convert_integer: bool = True,
convert_boolean: bool = True,
convert_floating: bool = True,
dtype_backend: DtypeBackend = "numpy_nullable",
) -> Self: # noqa: PR01, RT01, D200
"""
Convert columns to best possible dtypes using dtypes supporting ``pd.NA``.
"""
return self.__constructor__(
query_compiler=self._query_compiler.convert_dtypes(
infer_objects=infer_objects,
convert_string=convert_string,
convert_integer=convert_integer,
convert_boolean=convert_boolean,
convert_floating=convert_floating,
dtype_backend=dtype_backend,
)
)
def isin(self, values) -> Self: # noqa: PR01, RT01, D200
"""
Whether elements in `BasePandasDataset` are contained in `values`.
"""
from .series import Series
ignore_indices = isinstance(values, Series)
values = getattr(values, "_query_compiler", values)
return self.__constructor__(
query_compiler=self._query_compiler.isin(
values=values, ignore_indices=ignore_indices
)
)
def isna(self) -> Self: # noqa: RT01, D200
"""
Detect missing values.
"""
return self.__constructor__(query_compiler=self._query_compiler.isna())
isnull: Self = isna
@property
def iloc(self) -> _iLocIndexer: # noqa: RT01, D200
"""
Purely integer-location based indexing for selection by position.
"""
from .indexing import _iLocIndexer
return _iLocIndexer(self)
@_inherit_docstrings(pandas.DataFrame.kurt, apilink="pandas.DataFrame.kurt")
def kurt(self, axis=0, skipna=True, numeric_only=False, **kwargs) -> Series | float:
return self._stat_operation("kurt", axis, skipna, numeric_only, **kwargs)
kurtosis: Series | float = kurt
def last(self, offset) -> Self: # noqa: PR01, RT01, D200
"""
Select final periods of time series data based on a date offset.
"""
warnings.warn(
"last is deprecated and will be removed in a future version. "
+ "Please create a mask and filter using `.loc` instead",
FutureWarning,
)
return self._create_or_update_from_compiler(
self._query_compiler.last(offset=to_offset(offset))
)
def last_valid_index(self) -> int: # noqa: RT01, D200
"""
Return index for last non-NA value or None, if no non-NA value is found.
"""
return self._query_compiler.last_valid_index()
def le(self, other, axis="columns", level=None) -> Self: # noqa: PR01, RT01, D200
"""
Get less than or equal comparison of `BasePandasDataset` and `other`, element-wise (binary operator `le`).
"""
return self._binary_op("le", other, axis=axis, level=level, dtypes=np.bool_)
def lt(self, other, axis="columns", level=None) -> Self: # noqa: PR01, RT01, D200
"""
Get less than comparison of `BasePandasDataset` and `other`, element-wise (binary operator `lt`).
"""
return self._binary_op("lt", other, axis=axis, level=level, dtypes=np.bool_)
@property
def loc(self) -> _LocIndexer: # noqa: RT01, D200
"""
Get a group of rows and columns by label(s) or a boolean array.
"""
from .indexing import _LocIndexer
return _LocIndexer(self)
def mask(
self,
cond,
other=lib.no_default,
*,
inplace: bool = False,
axis: Optional[Axis] = None,
level: Optional[Level] = None,
) -> Self | None: # noqa: PR01, RT01, D200
"""
Replace values where the condition is True.
"""
return self._create_or_update_from_compiler(
self._query_compiler.mask(
cond,
other=other,
inplace=False,
axis=axis,
level=level,
),
inplace=inplace,
)
def max(
self,
axis: Axis = 0,
skipna=True,
numeric_only=False,
**kwargs,
) -> Series | None: # noqa: PR01, RT01, D200
"""
Return the maximum of the values over the requested axis.
"""
validate_bool_kwarg(skipna, "skipna", none_allowed=False)
orig_axis = axis
axis = self._get_axis_number(axis)
data = self._validate_dtypes_min_max(axis, numeric_only)
res = data._reduce_dimension(
data._query_compiler.max(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
**kwargs,
)
)
if orig_axis is None:
res = res._reduce_dimension(
res._query_compiler.max(
axis=0,
skipna=skipna,
numeric_only=False,
**kwargs,
)
)
return res
def min(
self,
axis: Axis = 0,
skipna: bool = True,
numeric_only=False,
**kwargs,
) -> Series | None: # noqa: PR01, RT01, D200
"""
Return the minimum of the values over the requested axis.
"""
validate_bool_kwarg(skipna, "skipna", none_allowed=False)
orig_axis = axis
axis = self._get_axis_number(axis)
data = self._validate_dtypes_min_max(axis, numeric_only)
res = data._reduce_dimension(
data._query_compiler.min(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
**kwargs,
)
)
if orig_axis is None:
res = res._reduce_dimension(
res._query_compiler.min(
axis=0,
skipna=skipna,
numeric_only=False,
**kwargs,
)
)
return res
def _stat_operation(
self,
op_name: str,
axis: Optional[Union[int, str]],
skipna: bool,
numeric_only: Optional[bool] = False,
**kwargs,
):
"""
Do common statistic reduce operations under frame.
Parameters
----------
op_name : str
Name of method to apply.
axis : int or str
Axis to apply method on.
skipna : bool
Exclude NA/null values when computing the result.
numeric_only : bool, default: False
Include only float, int, boolean columns. If None, will attempt
to use everything, then use only numeric data.
**kwargs : dict
Additional keyword arguments to pass to `op_name`.
Returns
-------
scalar, Series or DataFrame
`scalar` - self is Series and level is not specified.
`Series` - self is Series and level is specified, or
self is DataFrame and level is not specified.
`DataFrame` - self is DataFrame and level is specified.
"""
axis = self._get_axis_number(axis) if axis is not None else None
validate_bool_kwarg(skipna, "skipna", none_allowed=False)
if op_name == "median":
numpy_compat.function.validate_median((), kwargs)
elif op_name in ("sem", "var", "std"):
val_kwargs = {k: v for k, v in kwargs.items() if k != "ddof"}
numpy_compat.function.validate_stat_ddof_func((), val_kwargs, fname=op_name)
else:
numpy_compat.function.validate_stat_func((), kwargs, fname=op_name)
if not numeric_only:
self._validate_dtypes(numeric_only=True)
data = (
self._get_numeric_data(axis if axis is not None else 0)
if numeric_only
else self
)
result_qc = getattr(data._query_compiler, op_name)(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
**kwargs,
)
return (
self._reduce_dimension(result_qc)
if isinstance(result_qc, type(self._query_compiler))
# scalar case
else result_qc
)
def memory_usage(
self, index=True, deep=False
) -> Series | None: # noqa: PR01, RT01, D200
"""
Return the memory usage of the `BasePandasDataset`.
"""
return self._reduce_dimension(
self._query_compiler.memory_usage(index=index, deep=deep)
)
def mod(
self, other, axis="columns", level=None, fill_value=None
) -> Self: # noqa: PR01, RT01, D200
"""
Get modulo of `BasePandasDataset` and `other`, element-wise (binary operator `mod`).
"""
return self._binary_op(
"mod", other, axis=axis, level=level, fill_value=fill_value
)
def mode(
self, axis=0, numeric_only=False, dropna=True
) -> Self: # noqa: PR01, RT01, D200
"""
Get the mode(s) of each element along the selected axis.
"""
axis = self._get_axis_number(axis)
return self.__constructor__(
query_compiler=self._query_compiler.mode(
axis=axis, numeric_only=numeric_only, dropna=dropna
)
)
def mul(
self, other, axis="columns", level=None, fill_value=None
) -> Self: # noqa: PR01, RT01, D200
"""
Get multiplication of `BasePandasDataset` and `other`, element-wise (binary operator `mul`).
"""
return self._binary_op(
"mul", other, axis=axis, level=level, fill_value=fill_value
)
multiply: Self = mul
def ne(self, other, axis="columns", level=None) -> Self: # noqa: PR01, RT01, D200
"""
Get Not equal comparison of `BasePandasDataset` and `other`, element-wise (binary operator `ne`).
"""
return self._binary_op("ne", other, axis=axis, level=level, dtypes=np.bool_)
def notna(self) -> Self: # noqa: RT01, D200
"""
Detect existing (non-missing) values.
"""
return self.__constructor__(query_compiler=self._query_compiler.notna())
notnull: Self = notna
def nunique(self, axis=0, dropna=True) -> Series | int: # noqa: PR01, RT01, D200
"""
Return number of unique elements in the `BasePandasDataset`.
"""
axis = self._get_axis_number(axis)
return self._reduce_dimension(
self._query_compiler.nunique(axis=axis, dropna=dropna)
)
def pct_change(
self,
periods=1,
fill_method=lib.no_default,
limit=lib.no_default,
freq=None,
**kwargs,
) -> Self: # noqa: PR01, RT01, D200
"""
Percentage change between the current and a prior element.
"""
if fill_method not in (lib.no_default, None) or limit is not lib.no_default:
warnings.warn(
"The 'fill_method' keyword being not None and the 'limit' keyword in "
+ f"{type(self).__name__}.pct_change are deprecated and will be removed "
+ "in a future version. Either fill in any non-leading NA values prior "
+ "to calling pct_change or specify 'fill_method=None' to not fill NA "
+ "values.",
FutureWarning,
)
if fill_method is lib.no_default:
if self.isna().values.any():
warnings.warn(
"The default fill_method='pad' in "
+ f"{type(self).__name__}.pct_change is deprecated and will be "
+ "removed in a future version. Call ffill before calling "
+ "pct_change to retain current behavior and silence this warning.",
FutureWarning,
)
fill_method = "pad"
if limit is lib.no_default:
limit = None
# Attempting to match pandas error behavior here
if not isinstance(periods, int):
raise ValueError(f"periods must be an int. got {type(periods)} instead")
# Attempting to match pandas error behavior here
for dtype in self._get_dtypes():
if not is_numeric_dtype(dtype):
raise TypeError(f"unsupported operand type for /: got {dtype}")
return self.__constructor__(
query_compiler=self._query_compiler.pct_change(
periods=periods,
fill_method=fill_method,
limit=limit,
freq=freq,
**kwargs,
)
)
def pipe(
self, func: Callable[..., T] | tuple[Callable[..., T], str], *args, **kwargs
) -> T: # noqa: PR01, RT01, D200
"""
Apply chainable functions that expect `BasePandasDataset`.
"""
return pipe(self, func, *args, **kwargs)
def pop(self, item) -> Series | Scalar: # noqa: PR01, RT01, D200
"""
Return item and drop from frame. Raise KeyError if not found.
"""
result = self[item]
del self[item]
return result
def pow(
self, other, axis="columns", level=None, fill_value=None
) -> Self: # noqa: PR01, RT01, D200
"""
Get exponential power of `BasePandasDataset` and `other`, element-wise (binary operator `pow`).
"""
return self._binary_op(
"pow", other, axis=axis, level=level, fill_value=fill_value
)
def quantile(
self, q, axis, numeric_only, interpolation, method
) -> DataFrame | Series | Scalar: # noqa: PR01, RT01, D200
"""
Return values at the given quantile over requested axis.
"""
axis = self._get_axis_number(axis)
def check_dtype(t):
return is_numeric_dtype(t) or lib.is_np_dtype(t, "mM")
numeric_only_df = self
if not numeric_only:
# If not numeric_only and columns, then check all columns are either
# numeric, timestamp, or timedelta
if not axis and not all(check_dtype(t) for t in self._get_dtypes()):
raise TypeError("can't multiply sequence by non-int of type 'float'")
# If over rows, then make sure that all dtypes are equal for not
# numeric_only
elif axis:
for i in range(1, len(self._get_dtypes())):
pre_dtype = self._get_dtypes()[i - 1]
curr_dtype = self._get_dtypes()[i]
if not is_dtype_equal(pre_dtype, curr_dtype):
raise TypeError(
"Cannot compare type '{0}' with type '{1}'".format(
pre_dtype, curr_dtype
)
)
else:
numeric_only_df = self.drop(
columns=[
i for i in self.dtypes.index if not is_numeric_dtype(self.dtypes[i])
]
)
# check that all qs are between 0 and 1
validate_percentile(q)
axis = numeric_only_df._get_axis_number(axis)
if isinstance(q, (pandas.Series, np.ndarray, pandas.Index, list, tuple)):
return numeric_only_df.__constructor__(
query_compiler=numeric_only_df._query_compiler.quantile_for_list_of_values(
q=q,
axis=axis,
# `numeric_only=True` has already been processed by using `self.drop` function
numeric_only=False,
interpolation=interpolation,
method=method,
)
)
else:
result = numeric_only_df._reduce_dimension(
numeric_only_df._query_compiler.quantile_for_single_value(
q=q,
axis=axis,
# `numeric_only=True` has already been processed by using `self.drop` function
numeric_only=False,
interpolation=interpolation,
method=method,
)
)
if isinstance(result, BasePandasDataset):
result.name = q
return result
@_inherit_docstrings(pandas.DataFrame.rank, apilink="pandas.DataFrame.rank")
def rank(
self,
axis=0,
method: str = "average",
numeric_only=False,
na_option: str = "keep",
ascending: bool = True,
pct: bool = False,
) -> Self:
if axis is None:
raise ValueError(
f"No axis named None for object type {type(self).__name__}"
)
axis = self._get_axis_number(axis)
return self.__constructor__(
query_compiler=self._query_compiler.rank(
axis=axis,
method=method,
numeric_only=numeric_only,
na_option=na_option,
ascending=ascending,
pct=pct,
)
)
def _copy_index_metadata(self, source, destination): # noqa: PR01, RT01, D200
"""
Copy Index metadata from `source` to `destination` inplace.
"""
if hasattr(source, "name") and hasattr(destination, "name"):
destination.name = source.name
if hasattr(source, "names") and hasattr(destination, "names"):
destination.names = source.names
return destination
def _ensure_index(self, index_like, axis=0): # noqa: PR01, RT01, D200
"""
Ensure that we have an index from some index-like object.
"""
if (
self._query_compiler.has_multiindex(axis=axis)
and not isinstance(index_like, pandas.Index)
and is_list_like(index_like)
and len(index_like) > 0
and isinstance(index_like[0], tuple)
):
try:
return pandas.MultiIndex.from_tuples(index_like)
except TypeError:
# not all tuples
pass
return ensure_index(index_like)
def reindex(
self,
index=None,
columns=None,
copy=True,
**kwargs,
) -> Self: # noqa: PR01, RT01, D200
"""
Conform `BasePandasDataset` to new index with optional filling logic.
"""
new_query_compiler = None
if index is not None:
if not isinstance(index, pandas.Index) or not index.equals(self.index):
new_query_compiler = self._query_compiler.reindex(
axis=0, labels=index, **kwargs
)
if new_query_compiler is None:
new_query_compiler = self._query_compiler
final_query_compiler = None
if columns is not None:
if not isinstance(index, pandas.Index) or not columns.equals(self.columns):
final_query_compiler = new_query_compiler.reindex(
axis=1, labels=columns, **kwargs
)
if final_query_compiler is None:
final_query_compiler = new_query_compiler
return self._create_or_update_from_compiler(
final_query_compiler, inplace=False if copy is None else not copy
)
def rename_axis(
self,
mapper=lib.no_default,
*,
index=lib.no_default,
columns=lib.no_default,
axis=0,
copy=None,
inplace=False,
) -> DataFrame | Series | None: # noqa: PR01, RT01, D200
"""
Set the name of the axis for the index or columns.
"""
axes = {"index": index, "columns": columns}
if copy is None:
copy = True
if axis is not None:
axis = self._get_axis_number(axis)
inplace = validate_bool_kwarg(inplace, "inplace")
if mapper is not lib.no_default:
# Use v0.23 behavior if a scalar or list
non_mapper = is_scalar(mapper) or (
is_list_like(mapper) and not is_dict_like(mapper)
)
if non_mapper:
return self._set_axis_name(mapper, axis=axis, inplace=inplace)
else:
raise ValueError("Use `.rename` to alter labels with a mapper.")
else:
# Use new behavior. Means that index and/or columns is specified
result = self if inplace else self.copy(deep=copy)
for axis in range(self.ndim):
v = axes.get(pandas.DataFrame._get_axis_name(axis))
if v is lib.no_default:
continue
non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v))
if non_mapper:
newnames = v
else:
def _get_rename_function(mapper):
if isinstance(mapper, (dict, BasePandasDataset)):
def f(x):
if x in mapper:
return mapper[x]
else:
return x
else:
f = mapper
return f
f = _get_rename_function(v)
curnames = self.index.names if axis == 0 else self.columns.names
newnames = [f(name) for name in curnames]
result._set_axis_name(newnames, axis=axis, inplace=True)
if not inplace:
return result
def reorder_levels(self, order, axis=0) -> Self: # noqa: PR01, RT01, D200
"""
Rearrange index levels using input order.
"""
axis = self._get_axis_number(axis)
new_labels = self._get_axis(axis).reorder_levels(order)
return self.set_axis(new_labels, axis=axis)
def resample(
self,
rule,
axis: Axis = lib.no_default,
closed: Optional[str] = None,
label: Optional[str] = None,
convention: str = lib.no_default,
kind: Optional[str] = lib.no_default,
on: Level = None,
level: Level = None,
origin: str | TimestampConvertibleTypes = "start_day",
offset: Optional[TimedeltaConvertibleTypes] = None,
group_keys=False,
) -> Resampler: # noqa: PR01, RT01, D200
"""
Resample time-series data.
"""
from .resample import Resampler
if axis is not lib.no_default:
axis = self._get_axis_number(axis)
if axis == 1:
warnings.warn(
"DataFrame.resample with axis=1 is deprecated. Do "
+ "`frame.T.resample(...)` without axis instead.",
FutureWarning,
)
else:
warnings.warn(
f"The 'axis' keyword in {type(self).__name__}.resample is "
+ "deprecated and will be removed in a future version.",
FutureWarning,
)
else:
axis = 0
return Resampler(
dataframe=self,
rule=rule,
axis=axis,
closed=closed,
label=label,
convention=convention,
kind=kind,
on=on,
level=level,
origin=origin,
offset=offset,
group_keys=group_keys,
)
def reset_index(
self,
level: IndexLabel = None,
*,
drop: bool = False,
inplace: bool = False,
col_level: Hashable = 0,
col_fill: Hashable = "",
allow_duplicates=lib.no_default,
names: Hashable | Sequence[Hashable] = None,
) -> DataFrame | Series | None: # noqa: PR01, RT01, D200
"""
Reset the index, or a level of it.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# Error checking for matching pandas. Pandas does not allow you to
# insert a dropped index into a DataFrame if these columns already
# exist.
if (
not drop
and not (
self._query_compiler.lazy_column_labels
or self._query_compiler.lazy_row_labels
)
and not self._query_compiler.has_multiindex()
and all(n in self.columns for n in ["level_0", "index"])
):
raise ValueError("cannot insert level_0, already exists")
new_query_compiler = self._query_compiler.reset_index(
drop=drop,
level=level,
col_level=col_level,
col_fill=col_fill,
allow_duplicates=allow_duplicates,
names=names,
)
return self._create_or_update_from_compiler(new_query_compiler, inplace)
def radd(
self, other, axis="columns", level=None, fill_value=None
) -> Self: # noqa: PR01, RT01, D200
"""
Return addition of `BasePandasDataset` and `other`, element-wise (binary operator `radd`).
"""
return self._binary_op(
"radd", other, axis=axis, level=level, fill_value=fill_value
)
def rfloordiv(
self, other, axis="columns", level=None, fill_value=None
) -> Self: # noqa: PR01, RT01, D200
"""
Get integer division of `BasePandasDataset` and `other`, element-wise (binary operator `rfloordiv`).
"""
return self._binary_op(
"rfloordiv", other, axis=axis, level=level, fill_value=fill_value
)
def rmod(
self, other, axis="columns", level=None, fill_value=None
) -> Self: # noqa: PR01, RT01, D200
"""
Get modulo of `BasePandasDataset` and `other`, element-wise (binary operator `rmod`).
"""
return self._binary_op(
"rmod", other, axis=axis, level=level, fill_value=fill_value
)
def rmul(
self, other, axis="columns", level=None, fill_value=None
) -> Self: # noqa: PR01, RT01, D200
"""
Get Multiplication of dataframe and other, element-wise (binary operator `rmul`).
"""
return self._binary_op(
"rmul", other, axis=axis, level=level, fill_value=fill_value
)
def rolling(
self,
window,
min_periods: int | None = None,
center: bool = False,
win_type: str | None = None,
on: str | None = None,
axis: Axis = lib.no_default,
closed: str | None = None,
step: int | None = None,
method: str = "single",
) -> Rolling | Window: # noqa: PR01, RT01, D200
"""
Provide rolling window calculations.
"""
if axis is not lib.no_default:
axis = self._get_axis_number(axis)
name = "rolling"
if axis == 1:
warnings.warn(
f"Support for axis=1 in {type(self).__name__}.{name} is "
+ "deprecated and will be removed in a future version. "
+ f"Use obj.T.{name}(...) instead",
FutureWarning,
)
else:
warnings.warn(
f"The 'axis' keyword in {type(self).__name__}.{name} is "
+ "deprecated and will be removed in a future version. "
+ "Call the method without the axis keyword instead.",
FutureWarning,
)
else:
axis = 0
if win_type is not None:
from .window import Window
return Window(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
step=step,
method=method,
)
from .window import Rolling
return Rolling(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
step=step,
method=method,
)
def round(self, decimals=0, *args, **kwargs) -> Self: # noqa: PR01, RT01, D200
"""
Round a `BasePandasDataset` to a variable number of decimal places.
"""
# FIXME: Judging by pandas docs `*args` and `**kwargs` serves only compatibility
# purpose and does not affect the result, we shouldn't pass them to the query compiler.
return self.__constructor__(
query_compiler=self._query_compiler.round(decimals=decimals, **kwargs)
)
def rpow(
self, other, axis="columns", level=None, fill_value=None
) -> Self: # noqa: PR01, RT01, D200
"""
Get exponential power of `BasePandasDataset` and `other`, element-wise (binary operator `rpow`).
"""
return self._binary_op(
"rpow", other, axis=axis, level=level, fill_value=fill_value
)
def rsub(
self, other, axis="columns", level=None, fill_value=None
) -> Self: # noqa: PR01, RT01, D200
"""
Get subtraction of `BasePandasDataset` and `other`, element-wise (binary operator `rsub`).
"""
return self._binary_op(
"rsub", other, axis=axis, level=level, fill_value=fill_value
)
def rtruediv(
self, other, axis="columns", level=None, fill_value=None
) -> Self: # noqa: PR01, RT01, D200
"""
Get floating division of `BasePandasDataset` and `other`, element-wise (binary operator `rtruediv`).
"""
return self._binary_op(
"rtruediv", other, axis=axis, level=level, fill_value=fill_value
)
rdiv: Self = rtruediv
def sample(
self,
n: int | None = None,
frac: float | None = None,
replace: bool = False,
weights=None,
random_state: RandomState | None = None,
axis: Axis | None = None,
ignore_index: bool = False,
) -> Self: # noqa: PR01, RT01, D200
"""
Return a random sample of items from an axis of object.
"""
axis = self._get_axis_number(axis)
if axis:
axis_labels = self.columns
axis_length = len(axis_labels)
else:
# Getting rows requires indices instead of labels. RangeIndex provides this.
axis_labels = pandas.RangeIndex(len(self))
axis_length = len(axis_labels)
if weights is not None:
# Index of the weights Series should correspond to the index of the
# Dataframe in order to sample
if isinstance(weights, BasePandasDataset):
weights = weights.reindex(self._get_axis(axis))
# If weights arg is a string, the weights used for sampling will
# the be values in the column corresponding to that string
if isinstance(weights, str):
if axis == 0:
try:
weights = self[weights]
except KeyError:
raise KeyError("String passed to weights not a valid column")
else:
raise ValueError(
"Strings can only be passed to "
+ "weights when sampling from rows on "
+ "a DataFrame"
)
weights = pandas.Series(weights, dtype="float64")
if len(weights) != axis_length:
raise ValueError(
"Weights and axis to be sampled must be of same length"
)
if (weights == np.inf).any() or (weights == -np.inf).any():
raise ValueError("weight vector may not include `inf` values")
if (weights < 0).any():
raise ValueError("weight vector many not include negative values")
# weights cannot be NaN when sampling, so we must set all nan
# values to 0
weights = weights.fillna(0)
# If passed in weights are not equal to 1, renormalize them
# otherwise numpy sampling function will error
weights_sum = weights.sum()
if weights_sum != 1:
if weights_sum != 0:
weights = weights / weights_sum
else:
raise ValueError("Invalid weights: weights sum to zero")
weights = weights.values
if n is None and frac is None:
# default to n = 1 if n and frac are both None (in accordance with
# pandas specification)
n = 1
elif n is not None and frac is None and n % 1 != 0:
# n must be an integer
raise ValueError("Only integers accepted as `n` values")
elif n is None and frac is not None:
# compute the number of samples based on frac
n = int(round(frac * axis_length))
elif n is not None and frac is not None:
# Pandas specification does not allow both n and frac to be passed
# in
raise ValueError("Please enter a value for `frac` OR `n`, not both")
if n < 0:
raise ValueError(
"A negative number of rows requested. Please provide positive value."
)
if n == 0:
# This returns an empty object, and since it is a weird edge case that
# doesn't need to be distributed, we default to pandas for n=0.
# We don't need frac to be set to anything since n is already 0.
return self._default_to_pandas(
"sample",
n=n,
frac=None,
replace=replace,
weights=weights,
random_state=random_state,
axis=axis,
ignore_index=ignore_index,
)
if random_state is not None:
# Get a random number generator depending on the type of
# random_state that is passed in
if isinstance(random_state, int):
random_num_gen = np.random.RandomState(random_state)
elif isinstance(random_state, np.random.RandomState):
random_num_gen = random_state
else:
# random_state must be an int or a numpy RandomState object
raise ValueError(
"Please enter an `int` OR a "
+ "np.random.RandomState for random_state"
)
# choose random numbers and then get corresponding labels from
# chosen axis
sample_indices = random_num_gen.choice(
np.arange(0, axis_length), size=n, replace=replace, p=weights
)
samples = axis_labels[sample_indices]
else:
# randomly select labels from chosen axis
samples = np.random.choice(
a=axis_labels, size=n, replace=replace, p=weights
)
if axis:
query_compiler = self._query_compiler.getitem_column_array(samples)
return self.__constructor__(query_compiler=query_compiler)
else:
query_compiler = self._query_compiler.getitem_row_array(samples)
return self.__constructor__(query_compiler=query_compiler)
def sem(
self,
axis: Axis = 0,
skipna: bool = True,
ddof: int = 1,
numeric_only=False,
**kwargs,
) -> Series | float: # noqa: PR01, RT01, D200
"""
Return unbiased standard error of the mean over requested axis.
"""
return self._stat_operation(
"sem", axis, skipna, numeric_only, ddof=ddof, **kwargs
)
def mean(
self,
axis: Axis = 0,
skipna=True,
numeric_only=False,
**kwargs,
) -> Series | float: # noqa: PR01, RT01, D200
"""
Return the mean of the values over the requested axis.
"""
return self._stat_operation("mean", axis, skipna, numeric_only, **kwargs)
def median(
self,
axis: Axis = 0,
skipna=True,
numeric_only=False,
**kwargs,
) -> Series | float: # noqa: PR01, RT01, D200
"""
Return the mean of the values over the requested axis.
"""
return self._stat_operation("median", axis, skipna, numeric_only, **kwargs)
def set_axis(
self,
labels,
*,
axis: Axis = 0,
copy=None,
) -> Self: # noqa: PR01, RT01, D200
"""
Assign desired index to given axis.
"""
if copy is None:
copy = True
obj = self.copy() if copy else self
setattr(obj, pandas.DataFrame._get_axis_name(axis), labels)
return obj
def set_flags(
self, *, copy: bool = False, allows_duplicate_labels: Optional[bool] = None
) -> Self: # noqa: PR01, RT01, D200
"""
Return a new `BasePandasDataset` with updated flags.
"""
return self._default_to_pandas(
pandas.DataFrame.set_flags,
copy=copy,
allows_duplicate_labels=allows_duplicate_labels,
)
@property
def flags(self):
return self._default_to_pandas(lambda df: df.flags)
def shift(
self,
periods: int = 1,
freq=None,
axis: Axis = 0,
fill_value: Hashable = lib.no_default,
suffix=None,
) -> Self | DataFrame: # noqa: PR01, RT01, D200
"""
Shift index by desired number of periods with an optional time `freq`.
"""
if suffix:
return self._default_to_pandas(
lambda df: df.shift(
periods=periods,
freq=freq,
axis=axis,
fill_value=fill_value,
suffix=suffix,
)
)
if freq is not None and fill_value is not lib.no_default:
raise ValueError(
"Cannot pass both 'freq' and 'fill_value' to "
+ f"{type(self).__name__}.shift"
)
if periods == 0:
# Check obvious case first
return self.copy()
return self._create_or_update_from_compiler(
new_query_compiler=self._query_compiler.shift(
periods, freq, axis, fill_value
),
inplace=False,
)
def skew(
self,
axis: Axis = 0,
skipna: bool = True,
numeric_only=False,
**kwargs,
) -> Series | float: # noqa: PR01, RT01, D200
"""
Return unbiased skew over requested axis.
"""
return self._stat_operation("skew", axis, skipna, numeric_only, **kwargs)
def sort_index(
self,
*,
axis=0,
level=None,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
sort_remaining=True,
ignore_index: bool = False,
key: Optional[IndexKeyFunc] = None,
) -> Self | None: # noqa: PR01, RT01, D200
"""
Sort object by labels (along an axis).
"""
# pandas throws this exception. See pandas issie #39434
if ascending is None:
raise ValueError(
"the `axis` parameter is not supported in the pandas implementation of argsort()"
)
axis = self._get_axis_number(axis)
inplace = validate_bool_kwarg(inplace, "inplace")
new_query_compiler = self._query_compiler.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
kind=kind,
na_position=na_position,
sort_remaining=sort_remaining,
ignore_index=ignore_index,
key=key,
)
return self._create_or_update_from_compiler(new_query_compiler, inplace)
def sort_values(
self,
by,
*,
axis=0,
ascending=True,
inplace: bool = False,
kind="quicksort",
na_position="last",
ignore_index: bool = False,
key: Optional[IndexKeyFunc] = None,
) -> Self | None: # noqa: PR01, RT01, D200
"""
Sort by the values along either axis.
"""
axis = self._get_axis_number(axis)
inplace = validate_bool_kwarg(inplace, "inplace")
ascending = validate_ascending(ascending)
if axis == 0:
result = self._query_compiler.sort_rows_by_column_values(
by,
ascending=ascending,
kind=kind,
na_position=na_position,
ignore_index=ignore_index,
key=key,
)
else:
result = self._query_compiler.sort_columns_by_row_values(
by,
ascending=ascending,
kind=kind,
na_position=na_position,
ignore_index=ignore_index,
key=key,
)
return self._create_or_update_from_compiler(result, inplace)
def std(
self,
axis: Axis = 0,
skipna: bool = True,
ddof: int = 1,
numeric_only=False,
**kwargs,
) -> Series | float: # noqa: PR01, RT01, D200
"""
Return sample standard deviation over requested axis.
"""
return self._stat_operation(
"std", axis, skipna, numeric_only, ddof=ddof, **kwargs
)
def sub(
self, other, axis="columns", level=None, fill_value=None
) -> Self: # noqa: PR01, RT01, D200
"""
Get subtraction of `BasePandasDataset` and `other`, element-wise (binary operator `sub`).
"""
return self._binary_op(
"sub", other, axis=axis, level=level, fill_value=fill_value
)
subtract: Self = sub
def swapaxes(self, axis1, axis2, copy=None) -> Self: # noqa: PR01, RT01, D200
"""
Interchange axes and swap values axes appropriately.
"""
if copy is None:
copy = True
axis1 = self._get_axis_number(axis1)
axis2 = self._get_axis_number(axis2)
if axis1 != axis2:
return self.transpose()
if copy:
return self.copy()
return self
def swaplevel(self, i=-2, j=-1, axis=0) -> Self: # noqa: PR01, RT01, D200
"""
Swap levels `i` and `j` in a `MultiIndex`.
"""
axis = self._get_axis_number(axis)
idx = self.index if axis == 0 else self.columns
return self.set_axis(idx.swaplevel(i, j), axis=axis)
def tail(self, n=5) -> Self: # noqa: PR01, RT01, D200
"""
Return the last `n` rows.
"""
if n != 0:
return self.iloc[-n:]
return self.iloc[len(self) :]
def take(self, indices, axis=0, **kwargs) -> Self: # noqa: PR01, RT01, D200
"""
Return the elements in the given *positional* indices along an axis.
"""
axis = self._get_axis_number(axis)
slice_obj = indices if axis == 0 else (slice(None), indices)
return self.iloc[slice_obj]
def to_clipboard(
self, excel=True, sep=None, **kwargs
): # pragma: no cover # noqa: PR01, RT01, D200
"""
Copy object to the system clipboard.
"""
return self._default_to_pandas("to_clipboard", excel=excel, sep=sep, **kwargs)
@expanduser_path_arg("path_or_buf")
def to_csv(
self,
path_or_buf=None,
sep=",",
na_rep="",
float_format=None,
columns=None,
header=True,
index=True,
index_label=None,
mode="w",
encoding=None,
compression="infer",
quoting=None,
quotechar='"',
lineterminator=None,
chunksize=None,
date_format=None,
doublequote=True,
escapechar=None,
decimal=".",
errors: str = "strict",
storage_options: StorageOptions = None,
) -> str | None: # pragma: no cover
from modin.core.execution.dispatching.factories.dispatcher import (
FactoryDispatcher,
)
return FactoryDispatcher.to_csv(
self._query_compiler,
path_or_buf=path_or_buf,
sep=sep,
na_rep=na_rep,
float_format=float_format,
columns=columns,
header=header,
index=index,
index_label=index_label,
mode=mode,
encoding=encoding,
compression=compression,
quoting=quoting,
quotechar=quotechar,
lineterminator=lineterminator,
chunksize=chunksize,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar,
decimal=decimal,
errors=errors,
storage_options=storage_options,
)
@expanduser_path_arg("excel_writer")
def to_excel(
self,
excel_writer,
sheet_name="Sheet1",
na_rep="",
float_format=None,
columns=None,
header=True,
index=True,
index_label=None,
startrow=0,
startcol=0,
engine=None,
merge_cells=True,
inf_rep="inf",
freeze_panes=None,
storage_options: StorageOptions = None,
engine_kwargs=None,
) -> None: # pragma: no cover # noqa: PR01, RT01, D200
"""
Write object to an Excel sheet.
"""
return self._default_to_pandas(
"to_excel",
excel_writer,
sheet_name=sheet_name,
na_rep=na_rep,
float_format=float_format,
columns=columns,
header=header,
index=index,
index_label=index_label,
startrow=startrow,
startcol=startcol,
engine=engine,
merge_cells=merge_cells,
inf_rep=inf_rep,
freeze_panes=freeze_panes,
storage_options=storage_options,
engine_kwargs=engine_kwargs,
)
def to_dict(self, orient="dict", into=dict, index=True) -> dict:
return self._query_compiler.dataframe_to_dict(orient, into, index)
@expanduser_path_arg("path_or_buf")
def to_hdf(
self,
path_or_buf,
key: str,
mode: Literal["a", "w", "r+"] = "a",
complevel: int | None = None,
complib: Literal["zlib", "lzo", "bzip2", "blosc"] | None = None,
append: bool = False,
format: Literal["fixed", "table"] | None = None,
index: bool = True,
min_itemsize: int | dict[str, int] | None = None,
nan_rep=None,
dropna: bool | None = None,
data_columns: Literal[True] | list[str] | None = None,
errors: str = "strict",
encoding: str = "UTF-8",
) -> None: # pragma: no cover # noqa: PR01, RT01, D200
"""
Write the contained data to an HDF5 file using HDFStore.
"""
return self._default_to_pandas(
"to_hdf",
path_or_buf,
key=key,
mode=mode,
complevel=complevel,
complib=complib,
append=append,
format=format,
index=index,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
dropna=dropna,
data_columns=data_columns,
errors=errors,
encoding=encoding,
)
@expanduser_path_arg("path_or_buf")
def to_json(
self,
path_or_buf=None,
orient=None,
date_format=None,
double_precision=10,
force_ascii=True,
date_unit="ms",
default_handler=None,
lines=False,
compression="infer",
index=None,
indent=None,
storage_options: StorageOptions = None,
mode="w",
) -> str | None: # pragma: no cover # noqa: PR01, RT01, D200
"""
Convert the object to a JSON string.
"""
from modin.core.execution.dispatching.factories.dispatcher import (
FactoryDispatcher,
)
return FactoryDispatcher.to_json(
self._query_compiler,
path_or_buf,
orient=orient,
date_format=date_format,
double_precision=double_precision,
force_ascii=force_ascii,
date_unit=date_unit,
default_handler=default_handler,
lines=lines,
compression=compression,
index=index,
indent=indent,
storage_options=storage_options,
mode=mode,
)
@expanduser_path_arg("buf")
def to_latex(
self,
buf=None,
columns=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
bold_rows=False,
column_format=None,
longtable=None,
escape=None,
encoding=None,
decimal=".",
multicolumn=None,
multicolumn_format=None,
multirow=None,
caption=None,
label=None,
position=None,
) -> str | None: # pragma: no cover # noqa: PR01, RT01, D200
"""
Render object to a LaTeX tabular, longtable, or nested table.
"""
return self._default_to_pandas(
"to_latex",
buf=buf,
columns=columns,
header=header,
index=index,
na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify,
index_names=index_names,
bold_rows=bold_rows,
column_format=column_format,
longtable=longtable,
escape=escape,
encoding=encoding,
decimal=decimal,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow,
caption=caption,
label=label,
position=position,
)
@expanduser_path_arg("buf")
def to_markdown(
self,
buf=None,
mode: str = "wt",
index: bool = True,
storage_options: StorageOptions = None,
**kwargs,
) -> str: # noqa: PR01, RT01, D200
"""
Print `BasePandasDataset` in Markdown-friendly format.
"""
return self._default_to_pandas(
"to_markdown",
buf=buf,
mode=mode,
index=index,
storage_options=storage_options,
**kwargs,
)
@expanduser_path_arg("path")
def to_pickle(
self,
path,
compression: CompressionOptions = "infer",
protocol: int = pkl.HIGHEST_PROTOCOL,
storage_options: StorageOptions = None,
) -> None: # pragma: no cover # noqa: PR01, D200
"""
Pickle (serialize) object to file.
"""
from modin.pandas import to_pickle
to_pickle(
self,
path,
compression=compression,
protocol=protocol,
storage_options=storage_options,
)
def _to_bare_numpy(
self, dtype=None, copy=False, na_value=lib.no_default
): # noqa: PR01, RT01, D200
"""
Convert the `BasePandasDataset` to a NumPy array.
"""
return self._query_compiler.to_numpy(
dtype=dtype,
copy=copy,
na_value=na_value,
)
def to_numpy(
self, dtype=None, copy=False, na_value=lib.no_default
) -> np.ndarray: # noqa: PR01, RT01, D200
"""
Convert the `BasePandasDataset` to a NumPy array or a Modin wrapper for NumPy array.
"""
from modin.config import ModinNumpy
if ModinNumpy.get():
from ..numpy.arr import array
return array(self, copy=copy)
return self._to_bare_numpy(
dtype=dtype,
copy=copy,
na_value=na_value,
)
# TODO(williamma12): When this gets implemented, have the series one call this.
def to_period(
self, freq=None, axis=0, copy=None
) -> Self: # pragma: no cover # noqa: PR01, RT01, D200
"""
Convert `BasePandasDataset` from DatetimeIndex to PeriodIndex.
"""
return self._default_to_pandas("to_period", freq=freq, axis=axis, copy=copy)
@expanduser_path_arg("buf")
def to_string(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
justify=None,
max_rows=None,
min_rows=None,
max_cols=None,
show_dimensions=False,
decimal=".",
line_width=None,
max_colwidth=None,
encoding=None,
) -> str | None: # noqa: PR01, RT01, D200
"""
Render a `BasePandasDataset` to a console-friendly tabular output.
"""
return self._default_to_pandas(
"to_string",
buf=buf,
columns=columns,
col_space=col_space,
header=header,
index=index,
na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify,
index_names=index_names,
justify=justify,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal,
line_width=line_width,
max_colwidth=max_colwidth,
encoding=encoding,
)
def to_sql(
self,
name,
con,
schema=None,
if_exists="fail",
index=True,
index_label=None,
chunksize=None,
dtype=None,
method=None,
) -> int | None: # noqa: PR01, D200
"""
Write records stored in a `BasePandasDataset` to a SQL database.
"""
new_query_compiler = self._query_compiler
# writing the index to the database by inserting it to the DF
if index:
new_query_compiler = new_query_compiler.reset_index()
if index_label is not None:
if not is_list_like(index_label):
index_label = [index_label]
new_query_compiler.columns = list(index_label) + list(
new_query_compiler.columns[len(index_label) :]
)
# so pandas._to_sql will not write the index to the database as well
index = False
from modin.core.execution.dispatching.factories.dispatcher import (
FactoryDispatcher,
)
FactoryDispatcher.to_sql(
new_query_compiler,
name=name,
con=con,
schema=schema,
if_exists=if_exists,
index=index,
index_label=index_label,
chunksize=chunksize,
dtype=dtype,
method=method,
)
# TODO(williamma12): When this gets implemented, have the series one call this.
def to_timestamp(
self, freq=None, how="start", axis=0, copy=None
) -> Self: # noqa: PR01, RT01, D200
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period.
"""
return self._default_to_pandas(
"to_timestamp", freq=freq, how=how, axis=axis, copy=copy
)
def to_xarray(self): # noqa: PR01, RT01, D200
"""
Return an xarray object from the `BasePandasDataset`.
"""
return self._default_to_pandas("to_xarray")
def truediv(
self, other, axis="columns", level=None, fill_value=None
) -> Self: # noqa: PR01, RT01, D200
"""
Get floating division of `BasePandasDataset` and `other`, element-wise (binary operator `truediv`).
"""
return self._binary_op(
"truediv", other, axis=axis, level=level, fill_value=fill_value
)
div: Self = truediv
divide: Self = truediv
def truncate(
self, before=None, after=None, axis=None, copy=None
) -> Self: # noqa: PR01, RT01, D200
"""
Truncate a `BasePandasDataset` before and after some index value.
"""
axis = self._get_axis_number(axis)
if (
not self._get_axis(axis).is_monotonic_increasing
and not self._get_axis(axis).is_monotonic_decreasing
):
raise ValueError("truncate requires a sorted index")
if before is not None and after is not None and before > after:
raise ValueError(f"Truncate: {after} must be after {before}")
s = slice(*self._get_axis(axis).slice_locs(before, after))
slice_obj = s if axis == 0 else (slice(None), s)
return self.iloc[slice_obj]
def transform(
self, func, axis=0, *args, **kwargs
) -> Self: # noqa: PR01, RT01, D200
"""
Call ``func`` on self producing a `BasePandasDataset` with the same axis shape as self.
"""
kwargs["is_transform"] = True
self._validate_function(func)
try:
result = self.agg(func, axis=axis, *args, **kwargs)
except (TypeError, pandas.errors.SpecificationError):
raise
except Exception as err:
raise ValueError("Transform function failed") from err
if getattr(result, "_pandas_class", None) not in (
pandas.Series,
pandas.DataFrame,
) or not result.index.equals(self.index):
raise ValueError("Function did not transform")
return result
def tz_convert(
self, tz, axis=0, level=None, copy=None
) -> Self: # noqa: PR01, RT01, D200
"""
Convert tz-aware axis to target time zone.
"""
if copy is None:
copy = True
return self._create_or_update_from_compiler(
self._query_compiler.tz_convert(
tz, axis=self._get_axis_number(axis), level=level, copy=copy
),
inplace=(not copy),
)
def tz_localize(
self, tz, axis=0, level=None, copy=None, ambiguous="raise", nonexistent="raise"
) -> Self: # noqa: PR01, RT01, D200
"""
Localize tz-naive index of a `BasePandasDataset` to target time zone.
"""
if copy is None:
copy = True
return self._create_or_update_from_compiler(
self._query_compiler.tz_localize(
tz,
axis=self._get_axis_number(axis),
level=level,
copy=copy,
ambiguous=ambiguous,
nonexistent=nonexistent,
),
inplace=(not copy),
)
def interpolate(
self,
method="linear",
*,
axis=0,
limit=None,
inplace=False,
limit_direction: Optional[str] = None,
limit_area=None,
downcast=lib.no_default,
**kwargs,
) -> Self: # noqa: PR01, RT01, D200
if downcast is not lib.no_default:
warnings.warn(
f"The 'downcast' keyword in {type(self).__name__}.interpolate "
+ "is deprecated and will be removed in a future version. "
+ "Call result.infer_objects(copy=False) on the result instead.",
FutureWarning,
)
else:
downcast = None
return self._create_or_update_from_compiler(
self._query_compiler.interpolate(
method=method,
axis=axis,
limit=limit,
inplace=False,
limit_direction=limit_direction,
limit_area=limit_area,
downcast=downcast,
**kwargs,
),
inplace=inplace,
)
# TODO: uncomment the following lines when #3331 issue will be closed
# @prepend_to_notes(
# """
# In comparison with pandas, Modin's ``value_counts`` returns Series with ``MultiIndex``
# only if multiple columns were passed via the `subset` parameter, otherwise, the resulted
# Series's index will be a regular single dimensional ``Index``.
# """
# )
@_inherit_docstrings(
pandas.DataFrame.value_counts, apilink="pandas.DataFrame.value_counts"
)
def value_counts(
self,
subset: Sequence[Hashable] | None = None,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
dropna: bool = True,
) -> Series:
if subset is None:
subset = self._query_compiler.columns
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=".*groupby keys will be sorted anyway.*",
category=UserWarning,
)
counted_values = self.groupby(
by=subset, dropna=dropna, observed=True, sort=False
).size()
if sort:
if counted_values.name is None:
counted_values.name = 0
by = counted_values.name
result = counted_values._query_compiler.sort_rows_by_column_values(
columns=by,
ascending=ascending,
)
counted_values = self._create_or_update_from_compiler(result)
if isinstance(counted_values, pd.DataFrame):
counted_values = counted_values.squeeze(axis=1)
if normalize:
counted_values = counted_values / counted_values.sum()
# TODO: uncomment when strict compability mode will be implemented:
# https://github.com/modin-project/modin/issues/3411
# if STRICT_COMPABILITY and not isinstance(counted_values.index, MultiIndex):
# counted_values.index = pandas.MultiIndex.from_arrays(
# [counted_values.index], names=counted_values.index.names
# )
# https://pandas.pydata.org/pandas-docs/version/2.0/whatsnew/v2.0.0.html#value-counts-sets-the-resulting-name-to-count
counted_values.name = "proportion" if normalize else "count"
return counted_values
def var(
self,
axis: Axis = 0,
skipna: bool = True,
ddof: int = 1,
numeric_only=False,
**kwargs,
) -> Series | float: # noqa: PR01, RT01, D200
"""
Return unbiased variance over requested axis.
"""
return self._stat_operation(
"var", axis, skipna, numeric_only, ddof=ddof, **kwargs
)
def __abs__(self) -> Self:
"""
Return a `BasePandasDataset` with absolute numeric value of each element.
Returns
-------
BasePandasDataset
Object containing the absolute value of each element.
"""
return self.abs()
@_doc_binary_op(
operation="union", bin_op="and", right="other", **_doc_binary_op_kwargs
)
def __and__(self, other) -> Self:
return self._binary_op("__and__", other, axis=0)
@_doc_binary_op(
operation="union", bin_op="rand", right="other", **_doc_binary_op_kwargs
)
def __rand__(self, other) -> Self:
return self._binary_op("__rand__", other, axis=0)
def __array__(
self, dtype: npt.DTypeLike | None = None, copy: bool | None = None
) -> np.ndarray:
"""
Return the values as a NumPy array.
Parameters
----------
dtype : str or np.dtype, optional
The dtype of returned array.
copy : bool, default: None
This parameter has no effect; the method always returns a copy of
the data.
Returns
-------
arr : np.ndarray
NumPy representation of Modin object.
"""
return self._to_bare_numpy(dtype)
def __copy__(self, deep=True) -> Self:
"""
Return the copy of the `BasePandasDataset`.
Parameters
----------
deep : bool, default: True
Whether the copy should be deep or not.
Returns
-------
BasePandasDataset
"""
return self.copy(deep=deep)
def __deepcopy__(self, memo=None) -> Self:
"""
Return the deep copy of the `BasePandasDataset`.
Parameters
----------
memo : Any, optional
Deprecated parameter.
Returns
-------
BasePandasDataset
"""
return self.copy(deep=True)
@_doc_binary_op(
operation="equality comparison",
bin_op="eq",
right="other",
**_doc_binary_op_kwargs,
)
def __eq__(self, other) -> Self:
return self.eq(other)
def __finalize__(self, other, method=None, **kwargs) -> Self:
"""
Propagate metadata from `other` to `self`.
Parameters
----------
other : BasePandasDataset
The object from which to get the attributes that we are going
to propagate.
method : str, optional
A passed method name providing context on where `__finalize__`
was called.
**kwargs : dict
Additional keywords arguments to be passed to `__finalize__`.
Returns
-------
BasePandasDataset
"""
return self._default_to_pandas("__finalize__", other, method=method, **kwargs)
@_doc_binary_op(
operation="greater than or equal comparison",
bin_op="ge",
right="right",
**_doc_binary_op_kwargs,
)
def __ge__(self, right) -> Self:
return self.ge(right)
def __getitem__(self, key) -> Self:
"""
Retrieve dataset according to `key`.
Parameters
----------
key : callable, scalar, slice, str or tuple
The global row index to retrieve data from.
Returns
-------
BasePandasDataset
Located dataset.
"""
if not self._query_compiler.lazy_row_count and len(self) == 0:
return self._default_to_pandas("__getitem__", key)
# see if we can slice the rows
# This lets us reuse code in pandas to error check
indexer = None
if isinstance(key, slice):
indexer = self.index._convert_slice_indexer(key, kind="getitem")
if indexer is not None:
return self._getitem_slice(indexer)
else:
return self._getitem(key)
def xs(
self,
key,
axis=0,
level=None,
drop_level: bool = True,
) -> Self: # noqa: PR01, RT01, D200
"""
Return cross-section from the Series/DataFrame.
"""
axis = self._get_axis_number(axis)
labels = self.columns if axis else self.index
if isinstance(key, list):
# deprecated in pandas, to be removed in 2.0
warnings.warn(
"Passing lists as key for xs is deprecated and will be removed in a "
+ "future version. Pass key as a tuple instead.",
FutureWarning,
)
if level is not None:
if not isinstance(labels, pandas.MultiIndex):
raise TypeError("Index must be a MultiIndex")
loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level)
# create the tuple of the indexer
_indexer = [slice(None)] * self.ndim
_indexer[axis] = loc
indexer = tuple(_indexer)
result = self.iloc[indexer]
setattr(result, self._pandas_class._get_axis_name(axis), new_ax)
return result
if axis == 1:
if drop_level:
return self[key]
index = self.columns
else:
index = self.index
new_index = None
if isinstance(index, pandas.MultiIndex):
loc, new_index = index._get_loc_level(key, level=0)
if not drop_level:
if is_integer(loc):
new_index = index[loc : loc + 1]
else:
new_index = index[loc]
else:
loc = index.get_loc(key)
if isinstance(loc, np.ndarray):
if loc.dtype == np.bool_:
(loc,) = loc.nonzero()
# Note: pandas uses self._take_with_is_copy here
return self.take(loc, axis=axis)
if not is_scalar(loc):
new_index = index[loc]
if is_scalar(loc) and axis == 0:
# In this case loc should be an integer
if self.ndim == 1:
# if we encounter an array-like and we only have 1 dim
# that means that their are list/ndarrays inside the Series!
# so just return them (pandas GH 6394)
return self.iloc[loc]
result = self.iloc[loc]
elif is_scalar(loc):
result = self.iloc[:, slice(loc, loc + 1)]
elif axis == 1:
result = self.iloc[:, loc]
else:
result = self.iloc[loc]
if new_index is None:
raise RuntimeError(
"`new_index` variable shouldn't be equal to None here, something went wrong."
)
result.index = new_index
# Note: pandas does result._set_is_copy here
return result
__hash__ = None
def _setitem_slice(self, key: slice, value) -> None:
"""
Set rows specified by `key` slice with `value`.
Parameters
----------
key : location or index-based slice
Key that points rows to modify.
value : object
Value to assing to the rows.
"""
indexer = self.index._convert_slice_indexer(key, kind="getitem")
self.iloc[indexer] = value
def _getitem_slice(self, key: slice) -> Self:
"""
Get rows specified by `key` slice.
Parameters
----------
key : location or index-based slice
Key that points to rows to retrieve.
Returns
-------
modin.pandas.BasePandasDataset
Selected rows.
"""
if is_full_grab_slice(
key,
# Avoid triggering shape computation for lazy executions
sequence_len=(None if self._query_compiler.lazy_row_count else len(self)),
):
return self.copy()
return self.iloc[key]
@_doc_binary_op(
operation="greater than comparison",
bin_op="gt",
right="right",
**_doc_binary_op_kwargs,
)
def __gt__(self, right) -> Self:
return self.gt(right)
def __invert__(self) -> Self:
"""
Apply bitwise inverse to each element of the `BasePandasDataset`.
Returns
-------
BasePandasDataset
New BasePandasDataset containing bitwise inverse to each value.
"""
if not all(is_bool_dtype(d) or is_integer_dtype(d) for d in self._get_dtypes()):
raise TypeError(
"bad operand type for unary ~: '{}'".format(
next(
d
for d in self._get_dtypes()
if not (is_bool_dtype(d) or is_integer_dtype(d))
)
)
)
return self.__constructor__(query_compiler=self._query_compiler.invert())
@_doc_binary_op(
operation="less than or equal comparison",
bin_op="le",
right="right",
**_doc_binary_op_kwargs,
)
def __le__(self, right) -> Self:
return self.le(right)
def __len__(self) -> int:
"""
Return length of info axis.
Returns
-------
int
"""
return self._query_compiler.get_axis_len(0)
@_doc_binary_op(
operation="less than comparison",
bin_op="lt",
right="right",
**_doc_binary_op_kwargs,
)
def __lt__(self, right) -> Self:
return self.lt(right)
def __matmul__(self, other) -> Self | np.ndarray | Scalar:
"""
Compute the matrix multiplication between the `BasePandasDataset` and `other`.
Parameters
----------
other : BasePandasDataset or array-like
The other object to compute the matrix product with.
Returns
-------
BasePandasDataset, np.ndarray or scalar
"""
return self.dot(other)
@_doc_binary_op(
operation="not equal comparison",
bin_op="ne",
right="other",
**_doc_binary_op_kwargs,
)
def __ne__(self, other) -> Self:
return self.ne(other)
def __neg__(self) -> Self:
"""
Change the sign for every value of self.
Returns
-------
BasePandasDataset
"""
self._validate_dtypes(numeric_only=True)
return self.__constructor__(query_compiler=self._query_compiler.negative())
def __nonzero__(self):
"""
Evaluate `BasePandasDataset` as boolean object.
Raises
------
ValueError
Always since truth value for self is ambiguous.
"""
raise ValueError(
f"The truth value of a {self.__class__.__name__} is ambiguous. "
+ "Use a.empty, a.bool(), a.item(), a.any() or a.all()."
)
__bool__ = __nonzero__
@_doc_binary_op(
operation="disjunction",
bin_op="or",
right="other",
**_doc_binary_op_kwargs,
)
def __or__(self, other) -> Self:
return self._binary_op("__or__", other, axis=0)
@_doc_binary_op(
operation="disjunction",
bin_op="ror",
right="other",
**_doc_binary_op_kwargs,
)
def __ror__(self, other) -> Self:
return self._binary_op("__ror__", other, axis=0)
def __sizeof__(self) -> int:
"""
Generate the total memory usage for an `BasePandasDataset`.
Returns
-------
int
"""
return self._query_compiler.sizeof()
def __str__(self) -> str: # pragma: no cover
"""
Return str(self).
Returns
-------
str
"""
return repr(self)
@_doc_binary_op(
operation="exclusive disjunction",
bin_op="xor",
right="other",
**_doc_binary_op_kwargs,
)
def __xor__(self, other) -> Self:
return self._binary_op("__xor__", other, axis=0)
@_doc_binary_op(
operation="exclusive disjunction",
bin_op="rxor",
right="other",
**_doc_binary_op_kwargs,
)
def __rxor__(self, other) -> Self:
return self._binary_op("__rxor__", other, axis=0)
@property
def size(self) -> int: # noqa: RT01, D200
"""
Return an int representing the number of elements in this `BasePandasDataset` object.
"""
return len(self._query_compiler.index) * len(self._query_compiler.columns)
@property
def values(self) -> np.ndarray: # noqa: RT01, D200
"""
Return a NumPy representation of the `BasePandasDataset`.
"""
return self.to_numpy()
def _repartition(self, axis: Optional[int] = None) -> Self:
"""
Repartitioning Modin objects to get ideal partitions inside.
Allows to improve performance where the query compiler can't improve
yet by doing implicit repartitioning.
Parameters
----------
axis : {0, 1, None}, optional
The axis along which the repartitioning occurs.
`None` is used for repartitioning along both axes.
Returns
-------
DataFrame or Series
The repartitioned dataframe or series, depending on the original type.
"""
allowed_axis_values = (0, 1, None)
if axis not in allowed_axis_values:
raise ValueError(
f"Passed `axis` parameter: {axis}, but should be one of {allowed_axis_values}"
)
return self.__constructor__(
query_compiler=self._query_compiler.repartition(axis=axis)
)
@disable_logging
def __getattribute__(self, item) -> Any:
"""
Return item from the `BasePandasDataset`.
Parameters
----------
item : hashable
Item to get.
Returns
-------
Any
"""
# NOTE that to get an attribute, python calls __getattribute__() first and
# then falls back to __getattr__() if the former raises an AttributeError.
if item not in EXTENSION_NO_LOOKUP:
extensions_result = self._getattribute__from_extension_impl(
item, __class__._extensions
)
if extensions_result is not sentinel:
return extensions_result
attr = super().__getattribute__(item)
if item not in _DEFAULT_BEHAVIOUR and not self._query_compiler.lazy_shape:
# We default to pandas on empty DataFrames. This avoids a large amount of
# pain in underlying implementation and returns a result immediately rather
# than dealing with the edge cases that empty DataFrames have.
if callable(attr) and self.empty and hasattr(self._pandas_class, item):
def default_handler(*args, **kwargs):
return self._default_to_pandas(item, *args, **kwargs)
return default_handler
return attr
def __array_ufunc__(
self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any
) -> DataFrame | Series | Any:
"""
Apply the `ufunc` to the `BasePandasDataset`.
Parameters
----------
ufunc : np.ufunc
The NumPy ufunc to apply.
method : str
The method to apply.
*inputs : tuple
The inputs to the ufunc.
**kwargs : dict
Additional keyword arguments.
Returns
-------
BasePandasDataset
The result of the ufunc applied to the `BasePandasDataset`.
"""
return self._query_compiler.do_array_ufunc_implementation(
self, ufunc, method, *inputs, **kwargs
)
def __array_function__(
self,
func: np.func,
types: tuple,
args: tuple,
kwargs: dict,
) -> DataFrame | Series | Any:
"""
Apply `func` to the `BasePandasDataset`.
This function implements NEP18-style dispatch for certain NumPy functions:
https://numpy.org/neps/nep-0018-array-function-protocol.html#nep18
By default, this function will transparently call __array__, followed by __array_function__
on the returned NumPy array. We implement this function to prevent bugs with the extension
system when another backend overrides this method.
Parameters
----------
func : np.func
The NumPy func to apply.
types : tuple
The types of the args.
args : tuple
The args to the func.
kwargs : dict
Additional keyword arguments.
Returns
-------
DataFrame | Series | Any
The result of applying the function to this dataset. By default, it will return
a NumPy array.
"""
return self._query_compiler.do_array_function_implementation(
self, func, types, args, kwargs
)
# namespace for additional Modin functions that are not available in Pandas
modin: ModinAPI = CachedAccessor("modin", ModinAPI)
@disable_logging
def is_backend_pinned(self) -> bool:
"""
Get whether this object's data is pinned to a particular backend.
Returns
-------
bool
True if the data is pinned.
"""
return self._pinned
def _set_backend_pinned(
self, pinned: bool, inplace: bool = False
) -> Optional[Self]:
"""
Update whether this object's data is pinned to a particular backend.
Parameters
----------
pinned : bool
Whether the data is pinned.
inplace : bool, default: False
Whether to update the object in place.
Returns
-------
Optional[Self]
The object with the new pin state, if `inplace` is False. Otherwise, None.
"""
change = (self.is_backend_pinned() and not pinned) or (
not self.is_backend_pinned() and pinned
)
if inplace:
self._pinned = pinned
return None
else:
if change:
new_obj = self.__constructor__(query_compiler=self._query_compiler)
new_obj._pinned = pinned
return new_obj
return self
@doc(SET_BACKEND_DOC, class_name=__qualname__)
def set_backend(
self, backend: str, inplace: bool = False, *, switch_operation: str = None
) -> Optional[Self]:
# TODO(https://github.com/modin-project/modin/issues/7467): refactor
# to avoid this cyclic import in most places we do I/O, e.g. in
# modin/pandas/io.py
from modin.core.execution.dispatching.factories.dispatcher import (
FactoryDispatcher,
)
progress_split_count = 2
progress_iter = iter(range(progress_split_count))
self_backend = self.get_backend()
normalized_backend = Backend.normalize(backend)
if normalized_backend != self_backend:
max_rows, max_cols = self._query_compiler._max_shape()
# Format the transfer string to be relatively short, but informative. Each
# backend is given an allowable width of 10 and the shape integers use the
# general format to use scientific notation when needed.
std_field_length = 10
operation_str = switch_operation
self_backend_str = self_backend
normalized_backend_str = normalized_backend
if switch_operation is None:
operation_str = ""
# Provide the switch_operation; and specifically only the method, so
# DataFrame.merge would become "merge"
operation_str = operation_str.split(".")[-1]
# truncate all strings to the field length if needed
if len(operation_str) > 15:
operation_str = operation_str[: 15 - 3] + "..."
if len(self_backend_str) > std_field_length:
self_backend_str = self_backend_str[: std_field_length - 3] + "..."
if len(normalized_backend_str) > std_field_length:
normalized_backend_str = (
normalized_backend_str[: std_field_length - 3] + "..."
)
# format the estimated max shape
max_shape_str = f"({max_rows:.0g}, {max_cols:.0g})"
desc = (
f"Transfer: {self_backend_str:>10.10} → {normalized_backend_str:<10.10} "
+ f" | {operation_str:^15.15} ≃ {max_shape_str:<10}"
)
if ShowBackendSwitchProgress.get():
try:
from tqdm.auto import trange
progress_iter = iter(
trange(
progress_split_count, desc=desc, bar_format="{desc} [{bar}]"
)
)
except ImportError:
# Fallback to simple print statement when tqdm is not available.
# Print to stderr to match tqdm's behavior.
print(desc, file=sys.stderr) # noqa: T201
else:
# Use a dummy progress iterator with no side effects if we do
# not want to show the progress.
progress_iter = iter(range(progress_split_count))
else:
return None if inplace else self
# If tqdm is imported and a conversion is necessary, then display a progress bar.
# Otherwise, use fallback print statements.
next(progress_iter)
# Attempt to transfer data based on the following preference order.
# 1. The `self._query_compiler.move_to()`, if implemented.
# 2. Otherwise, tries the other `query_compiler`'s `move_from()` method.
# 3. If both methods return `NotImplemented`, it falls back to materializing
# as a pandas DataFrame, and then creates a new `query_compiler` on the
# specified backend using `from_pandas`.
query_compiler = self._query_compiler.move_to(backend)
if query_compiler is NotImplemented:
query_compiler = FactoryDispatcher._get_prepared_factory_for_backend(
backend
).io_cls.query_compiler_cls.move_from(
self._query_compiler,
)
if query_compiler is NotImplemented:
pandas_self = self._query_compiler.to_pandas()
next(progress_iter)
query_compiler = FactoryDispatcher.from_pandas(
df=pandas_self, backend=backend
)
else:
next(progress_iter)
try:
next(progress_iter)
except StopIteration:
# Last call to next informs tqdm that the operation is done
pass
if inplace:
self._update_inplace(query_compiler)
# Always unpin after an explicit set_backend operation
self._pinned = False
return None
else:
return self.__constructor__(query_compiler=query_compiler)
move_to = set_backend
@doc(GET_BACKEND_DOC, class_name=__qualname__)
@disable_logging
def get_backend(self) -> str:
return self._query_compiler.get_backend()
@disable_logging
def __setattr__(self, key: str, value: Any) -> None:
"""
Set attribute on this `BasePandasDataset`.
Parameters
----------
key : str
The attribute name.
value : Any
The attribute value.
Returns
-------
None
"""
# An extension property is only accessible if the backend supports it.
extension = self._get_extension(key, __class__._extensions)
if extension is not sentinel and hasattr(extension, "__set__"):
return extension.__set__(self, value)
return super().__setattr__(key, value)
@disable_logging
def __delattr__(self, name) -> None:
"""
Delete attribute on this `BasePandasDataset`.
Parameters
----------
name : str
The attribute name.
Returns
-------
None
"""
# An extension property is only accessible if the backend supports it.
extension = self._get_extension(name, __class__._extensions)
if extension is not sentinel and hasattr(extension, "__delete__"):
return extension.__delete__(self)
return super().__delattr__(name)
@disable_logging
@_inherit_docstrings(QueryCompilerCaster._get_query_compiler)
def _get_query_compiler(self):
return getattr(self, "_query_compiler", None)
|
BasePandasDataset
|
python
|
huggingface__transformers
|
src/transformers/models/imagegpt/modeling_imagegpt.py
|
{
"start": 17358,
"end": 26929
}
|
class ____(ImageGPTPreTrainedModel):
def __init__(self, config: ImageGPTConfig):
super().__init__(config)
self.embed_dim = config.hidden_size
self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([ImageGPTBlock(config, layer_idx=i) for i in range(config.num_hidden_layers)])
self.ln_f = ImageGPTLayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, new_embeddings):
self.wte = new_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Any,
) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoImageProcessor`]. See [`ImageGPTImageProcessor.__call__`] for details.
Examples:
```python
>>> from transformers import AutoImageProcessor, ImageGPTModel
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("openai/imagegpt-small")
>>> model = ImageGPTModel.from_pretrained("openai/imagegpt-small")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = torch.arange(
past_seen_tokens, past_seen_tokens + input_shape[-1], device=device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
# ImageGPTAttention mask.
if attention_mask is not None:
if batch_size <= 0:
raise ValueError("batch_size has to be defined and > 0")
attention_mask = attention_mask.view(batch_size, -1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and the dtype's smallest value for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.add_cross_attention and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_attention_mask = None
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
hidden_states = inputs_embeds + position_embeds.to(inputs_embeds.device)
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
hidden_states = hidden_states + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
all_hidden_states = () if output_hidden_states else None
for i, block in enumerate(self.h):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = block(
hidden_states,
past_key_values,
attention_mask,
encoder_hidden_states, # as a positional argument for gradient checkpointing
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (outputs[2],)
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(*output_shape)
# Add last hidden state
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, past_key_values, all_hidden_states, all_self_attentions, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
@auto_docstring(
custom_intro="""
The ImageGPT Model transformer with a language modeling head on top (linear layer with weights tied to the input
embeddings).
"""
)
|
ImageGPTModel
|
python
|
ray-project__ray
|
python/ray/__init__.py
|
{
"start": 4236,
"end": 7661
}
|
class ____:
def __init__(self, name, real_worker):
self._name = name
self._real_worker = real_worker
self._warned = set()
def __getattr__(self, attr):
value = getattr(self._real_worker, attr)
if attr not in self._warned:
self._warned.add(attr)
logger.warning(
f"DeprecationWarning: `ray.{self._name}.{attr}` is a private "
"attribute and access will be removed in a future Ray version."
)
return value
# TODO(ekl) remove this entirely after 3rd party libraries are all migrated.
worker = _DeprecationWrapper("worker", ray._private.worker)
ray_constants = _DeprecationWrapper("ray_constants", ray._private.ray_constants)
serialization = _DeprecationWrapper("serialization", ray._private.serialization)
state = _DeprecationWrapper("state", ray._private.state)
# Pulic Ray APIs
__all__ = [
"__version__",
"_config",
"get_runtime_context",
"autoscaler",
"available_resources",
"cancel",
"client",
"ClientBuilder",
"cluster_resources",
"get",
"get_actor",
"get_gpu_ids",
"init",
"is_initialized",
"java_actor_class",
"java_function",
"cpp_function",
"kill",
"Language",
"method",
"nodes",
"put",
"remote",
"shutdown",
"show_in_dashboard",
"timeline",
"wait",
"LOCAL_MODE",
"SCRIPT_MODE",
"WORKER_MODE",
"LoggingConfig",
]
# Public APIs that should automatically trigger ray.init().
AUTO_INIT_APIS = {
"cancel",
"get",
"get_actor",
"get_gpu_ids",
"kill",
"put",
"wait",
"get_runtime_context",
}
# Public APIs that should not automatically trigger ray.init().
NON_AUTO_INIT_APIS = {
"ClientBuilder",
"LOCAL_MODE",
"Language",
"SCRIPT_MODE",
"WORKER_MODE",
"__version__",
"_config",
"autoscaler",
"available_resources",
"client",
"cluster_resources",
"cpp_function",
"init",
"is_initialized",
"java_actor_class",
"java_function",
"method",
"nodes",
"remote",
"show_in_dashboard",
"shutdown",
"timeline",
"LoggingConfig",
}
assert set(__all__) == AUTO_INIT_APIS | NON_AUTO_INIT_APIS
from ray._private.auto_init_hook import wrap_auto_init_for_all_apis # noqa: E402
wrap_auto_init_for_all_apis(AUTO_INIT_APIS)
del wrap_auto_init_for_all_apis
# Subpackages
__all__ += [
"actor",
"autoscaler",
"data",
"internal",
"util",
"widgets",
"workflow",
]
# ID types
__all__ += [
"ActorClassID",
"ActorID",
"NodeID",
"JobID",
"WorkerID",
"FunctionID",
"ObjectID",
"ObjectRef",
"ObjectRefGenerator",
"DynamicObjectRefGenerator",
"TaskID",
"UniqueID",
"PlacementGroupID",
]
# Delay importing of expensive, isolated subpackages. Note that for proper type
# checking support these imports must be kept in sync between type checking and
# runtime behavior.
if TYPE_CHECKING:
from ray import autoscaler
from ray import data
from ray import workflow
else:
def __getattr__(name: str):
import importlib
if name in ["data", "workflow", "autoscaler"]:
return importlib.import_module("." + name, __name__)
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
del os
del logging
del sys
del TYPE_CHECKING
|
_DeprecationWrapper
|
python
|
ansible__ansible
|
lib/ansible/errors/__init__.py
|
{
"start": 10870,
"end": 11971
}
|
class ____(AnsibleRuntimeError):
"""A file missing failure."""
def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=..., orig_exc=None, paths=None, file_name=None):
self.file_name = file_name
self.paths = paths
if message:
message += "\n"
if self.file_name:
message += "Could not find or access '%s'" % to_text(self.file_name)
else:
message += "Could not find file"
if self.paths and isinstance(self.paths, _c.Sequence):
searched = to_text('\n\t'.join(self.paths))
if message:
message += "\n"
message += "Searched in:\n\t%s" % searched
message += " on the Ansible Controller.\nIf you are using a module and expect the file to exist on the remote, see the remote_src option"
super(AnsibleFileNotFound, self).__init__(message=message, obj=obj, show_content=show_content,
suppress_extended_error=suppress_extended_error, orig_exc=orig_exc)
|
AnsibleFileNotFound
|
python
|
ray-project__ray
|
rllib/utils/exploration/gaussian_noise.py
|
{
"start": 804,
"end": 9197
}
|
class ____(Exploration):
"""An exploration that adds white noise to continuous actions.
If explore=True, returns actions plus scale (annealed over time) x
Gaussian noise. Also, some completely random period is possible at the
beginning.
If explore=False, returns the deterministic action.
"""
def __init__(
self,
action_space: Space,
*,
framework: str,
model: ModelV2,
random_timesteps: int = 1000,
stddev: float = 0.1,
initial_scale: float = 1.0,
final_scale: float = 0.02,
scale_timesteps: int = 10000,
scale_schedule: Optional[Schedule] = None,
**kwargs
):
"""Initializes a GaussianNoise instance.
Args:
random_timesteps: The number of timesteps for which to act
completely randomly. Only after this number of timesteps, the
`self.scale` annealing process will start (see below).
stddev: The stddev (sigma) to use for the
Gaussian noise to be added to the actions.
initial_scale: The initial scaling weight to multiply
the noise with.
final_scale: The final scaling weight to multiply
the noise with.
scale_timesteps: The timesteps over which to linearly anneal
the scaling factor (after(!) having used random actions for
`random_timesteps` steps).
scale_schedule: An optional Schedule object
to use (instead of constructing one from the given parameters).
"""
assert framework is not None
super().__init__(action_space, model=model, framework=framework, **kwargs)
# Create the Random exploration module (used for the first n
# timesteps).
self.random_timesteps = random_timesteps
self.random_exploration = Random(
action_space, model=self.model, framework=self.framework, **kwargs
)
self.stddev = stddev
# The `scale` annealing schedule.
self.scale_schedule = scale_schedule or PiecewiseSchedule(
endpoints=[
(random_timesteps, initial_scale),
(random_timesteps + scale_timesteps, final_scale),
],
outside_value=final_scale,
framework=self.framework,
)
# The current timestep value (tf-var or python int).
self.last_timestep = get_variable(
np.array(0, np.int64),
framework=self.framework,
tf_name="timestep",
dtype=np.int64,
)
# Build the tf-info-op.
if self.framework == "tf":
self._tf_state_op = self.get_state()
@override(Exploration)
def get_exploration_action(
self,
*,
action_distribution: ActionDistribution,
timestep: Union[int, TensorType],
explore: bool = True
):
# Adds IID Gaussian noise for exploration, TD3-style.
if self.framework == "torch":
return self._get_torch_exploration_action(
action_distribution, explore, timestep
)
else:
return self._get_tf_exploration_action_op(
action_distribution, explore, timestep
)
def _get_tf_exploration_action_op(
self,
action_dist: ActionDistribution,
explore: bool,
timestep: Union[int, TensorType],
):
ts = timestep if timestep is not None else self.last_timestep
# The deterministic actions (if explore=False).
deterministic_actions = action_dist.deterministic_sample()
# Take a Gaussian sample with our stddev (mean=0.0) and scale it.
gaussian_sample = self.scale_schedule(ts) * tf.random.normal(
tf.shape(deterministic_actions), stddev=self.stddev
)
# Stochastic actions could either be: random OR action + noise.
random_actions, _ = self.random_exploration.get_tf_exploration_action_op(
action_dist, explore
)
stochastic_actions = tf.cond(
pred=tf.convert_to_tensor(ts < self.random_timesteps),
true_fn=lambda: random_actions,
false_fn=lambda: tf.clip_by_value(
deterministic_actions + gaussian_sample,
self.action_space.low * tf.ones_like(deterministic_actions),
self.action_space.high * tf.ones_like(deterministic_actions),
),
)
# Chose by `explore` (main exploration switch).
action = tf.cond(
pred=tf.constant(explore, dtype=tf.bool)
if isinstance(explore, bool)
else explore,
true_fn=lambda: stochastic_actions,
false_fn=lambda: deterministic_actions,
)
# Logp=always zero.
logp = zero_logps_from_actions(deterministic_actions)
# Increment `last_timestep` by 1 (or set to `timestep`).
if self.framework == "tf2":
if timestep is None:
self.last_timestep.assign_add(1)
else:
self.last_timestep.assign(tf.cast(timestep, tf.int64))
return action, logp
else:
assign_op = (
tf1.assign_add(self.last_timestep, 1)
if timestep is None
else tf1.assign(self.last_timestep, timestep)
)
with tf1.control_dependencies([assign_op]):
return action, logp
def _get_torch_exploration_action(
self,
action_dist: ActionDistribution,
explore: bool,
timestep: Union[int, TensorType],
):
# Set last timestep or (if not given) increase by one.
self.last_timestep = (
timestep if timestep is not None else self.last_timestep + 1
)
# Apply exploration.
if explore:
# Random exploration phase.
if self.last_timestep < self.random_timesteps:
action, _ = self.random_exploration.get_torch_exploration_action(
action_dist, explore=True
)
# Take a Gaussian sample with our stddev (mean=0.0) and scale it.
else:
det_actions = action_dist.deterministic_sample()
scale = self.scale_schedule(self.last_timestep)
gaussian_sample = scale * torch.normal(
mean=torch.zeros(det_actions.size()), std=self.stddev
).to(self.device)
action = torch.min(
torch.max(
det_actions + gaussian_sample,
torch.tensor(
self.action_space.low,
dtype=torch.float32,
device=self.device,
),
),
torch.tensor(
self.action_space.high, dtype=torch.float32, device=self.device
),
)
# No exploration -> Return deterministic actions.
else:
action = action_dist.deterministic_sample()
# Logp=always zero.
logp = torch.zeros((action.size()[0],), dtype=torch.float32, device=self.device)
return action, logp
@override(Exploration)
def get_state(self, sess: Optional["tf.Session"] = None):
"""Returns the current scale value.
Returns:
Union[float,tf.Tensor[float]]: The current scale value.
"""
if sess:
return sess.run(self._tf_state_op)
scale = self.scale_schedule(self.last_timestep)
return {
"cur_scale": convert_to_numpy(scale) if self.framework != "tf" else scale,
"last_timestep": convert_to_numpy(self.last_timestep)
if self.framework != "tf"
else self.last_timestep,
}
@override(Exploration)
def set_state(self, state: dict, sess: Optional["tf.Session"] = None) -> None:
if self.framework == "tf":
self.last_timestep.load(state["last_timestep"], session=sess)
elif isinstance(self.last_timestep, int):
self.last_timestep = state["last_timestep"]
else:
self.last_timestep.assign(state["last_timestep"])
|
GaussianNoise
|
python
|
eth-brownie__brownie
|
brownie/project/main.py
|
{
"start": 2440,
"end": 6453
}
|
class ____:
_path: Optional[pathlib.Path]
_build_path: Optional[pathlib.Path]
_sources: Sources
_build: Build
_containers: Dict[ContractName, ContractContainer]
def _compile(
self, contract_sources: Dict, compiler_config: CompilerConfig, silent: bool
) -> None:
compiler_config.setdefault("solc", {})
allow_paths = None
cwd = os.getcwd()
path = self._path
if path is not None:
_install_dependencies(path)
allow_paths = path.as_posix()
os.chdir(path)
try:
solc_config = compiler_config["solc"]
vyper_config = compiler_config["vyper"]
project_evm_version = compiler_config["evm_version"]
evm_version: Dict[Language, Optional[EvmVersion]] = {
"Solidity": solc_config.get("evm_version", project_evm_version),
"Vyper": vyper_config.get("evm_version", project_evm_version),
}
build_json = compiler.compile_and_format(
contract_sources,
solc_version=solc_config.get("version", None),
vyper_version=vyper_config.get("version", None),
optimize=solc_config.get("optimize", None),
runs=solc_config.get("runs", None),
evm_version=evm_version,
silent=silent,
allow_paths=allow_paths,
remappings=solc_config.get("remappings", []),
optimizer=solc_config.get("optimizer", None),
viaIR=solc_config.get("viaIR", None),
)
finally:
os.chdir(cwd)
build = self._build
build_path = self._build_path
for alias, data in build_json.items():
if build_path is not None and not data["sourcePath"].startswith("interface"):
# interfaces should generate artifact in /build/interfaces/ not /build/contracts/
if alias == data["contractName"]:
# if the alias == contract name, this is a part of the core project
path = build_path.joinpath(f"contracts/{alias}.json")
else:
# otherwise, this is an artifact from an external dependency
path = build_path.joinpath(f"contracts/dependencies/{alias}.json")
for parent in list(path.parents)[::-1]:
parent.mkdir(exist_ok=True)
with path.open("w") as fp:
ujson_dump(data, fp, sort_keys=True, indent=2, default=sorted)
if alias == data["contractName"]:
# only add artifacts from the core project for now
build._add_contract(data)
def _create_containers(self) -> None:
# create container objects
self.interface = InterfaceContainer(self)
self._containers = {}
for key, data in self._build.items():
if data["type"] == "interface":
self.interface._add(data["contractName"], data["abi"])
if data.get("bytecode"):
container = ContractContainer(self, data) # type: ignore [arg-type]
self._containers[key] = container
setattr(self, container._name, container)
def __getitem__(self, key: ContractName) -> ContractContainer:
return self._containers[key]
def __iter__(self) -> Iterator[ContractContainer]:
for i in sorted(self._containers):
yield self._containers[i]
def __len__(self) -> int:
return len(self._containers)
def __contains__(self, item: ContractName) -> bool:
return item in self._containers
def dict(self) -> Dict[ContractName, ContractContainer]:
return dict(self._containers)
def keys(self) -> KeysView[ContractName]:
return self._containers.keys()
# TODO: remove this decorator once weakref support is implemented
@mypyc_attr(native_class=False)
|
_ProjectBase
|
python
|
apache__airflow
|
providers/influxdb/src/airflow/providers/influxdb/hooks/influxdb.py
|
{
"start": 1324,
"end": 5933
}
|
class ____(BaseHook):
"""
Interact with InfluxDB.
Performs a connection to InfluxDB and retrieves client.
:param influxdb_conn_id: Reference to :ref:`Influxdb connection id <howto/connection:influxdb>`.
"""
conn_name_attr = "influxdb_conn_id"
default_conn_name = "influxdb_default"
conn_type = "influxdb"
hook_name = "Influxdb"
def __init__(self, conn_id: str = default_conn_name, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.influxdb_conn_id = conn_id
self.connection = kwargs.pop("connection", None)
self.client: InfluxDBClient | None = None
self.extras: dict = {}
self.uri = None
@classmethod
def get_connection_form_widgets(cls) -> dict[str, Any]:
"""Return connection widgets to add to connection form."""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import StringField
return {
"token": StringField(lazy_gettext("Token"), widget=BS3TextFieldWidget(), default=""),
"org": StringField(
lazy_gettext("Organization Name"),
widget=BS3TextFieldWidget(),
default="",
),
}
def get_client(self, uri, kwargs) -> InfluxDBClient:
return InfluxDBClient(url=uri, **kwargs)
def get_uri(self, conn: Connection):
"""Add additional parameters to the URI based on InfluxDB host requirements."""
conn_scheme = "https" if conn.schema is None else conn.schema
conn_port = 7687 if conn.port is None else conn.port
return f"{conn_scheme}://{conn.host}:{conn_port}"
def get_conn(self) -> InfluxDBClient:
"""Initiate a new InfluxDB connection with token and organization name."""
self.connection = self.get_connection(self.influxdb_conn_id)
self.extras = self.connection.extra_dejson.copy()
self.uri = self.get_uri(self.connection)
self.log.info("URI: %s", self.uri)
if self.client is not None:
return self.client
self.client = self.get_client(self.uri, self.extras)
return self.client
def query(self, query) -> list[FluxTable]:
"""
Run the query.
Note: The bucket name should be included in the query.
:param query: InfluxDB query
:return: List
"""
client = self.get_conn()
query_api = client.query_api()
return query_api.query(query)
def query_to_df(self, query) -> pd.DataFrame:
"""
Run the query and return a pandas dataframe.
Note: The bucket name should be included in the query.
:param query: InfluxDB query
:return: pd.DataFrame
"""
client = self.get_conn()
query_api = client.query_api()
return query_api.query_data_frame(query)
def write(self, bucket_name, point_name, tag_name, tag_value, field_name, field_value, synchronous=False):
"""
Write a Point to the bucket specified.
Example: ``Point("my_measurement").tag("location", "Prague").field("temperature", 25.3)``
"""
# By defaults its Batching
if synchronous:
write_api = self.client.write_api(write_options=SYNCHRONOUS)
else:
write_api = self.client.write_api()
p = Point(point_name).tag(tag_name, tag_value).field(field_name, field_value)
write_api.write(bucket=bucket_name, record=p)
def create_organization(self, name):
"""Create a new organization."""
return self.client.organizations_api().create_organization(name=name)
def delete_organization(self, org_id):
"""Delete an organization by ID."""
return self.client.organizations_api().delete_organization(org_id=org_id)
def create_bucket(self, bucket_name, description, org_id, retention_rules=None):
"""Create a bucket for an organization."""
return self.client.buckets_api().create_bucket(
bucket_name=bucket_name, description=description, org_id=org_id, retention_rules=None
)
def find_bucket_id_by_name(self, bucket_name):
"""Get bucket ID by name."""
bucket = self.client.buckets_api().find_bucket_by_name(bucket_name)
return "" if bucket is None else bucket.id
def delete_bucket(self, bucket_name):
"""Delete bucket by name."""
bucket = self.find_bucket_id_by_name(bucket_name)
return self.client.buckets_api().delete_bucket(bucket)
|
InfluxDBHook
|
python
|
python-openxml__python-docx
|
tests/opc/unitdata/types.py
|
{
"start": 404,
"end": 765
}
|
class ____(BaseBuilder):
__tag__ = "Types"
__nspfxs__ = ("ct",)
__attrs__ = ()
def with_nsdecls(self, *nspfxs):
self._nsdecls = ' xmlns="%s"' % nsmap["ct"]
return self
def a_Default():
return CT_DefaultBuilder()
def a_Types():
return CT_TypesBuilder()
def an_Override():
return CT_OverrideBuilder()
|
CT_TypesBuilder
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_13/events.py
|
{
"start": 34342,
"end": 34576
}
|
class ____(Response):
"""
Response of events.add endpoint.
"""
_service = "events"
_action = "add"
_version = "2.13"
_schema = {"additionalProperties": True, "definitions": {}, "type": "object"}
|
AddResponse
|
python
|
huggingface__transformers
|
src/transformers/models/yolos/modeling_yolos.py
|
{
"start": 5271,
"end": 6652
}
|
class ____(nn.Module):
def __init__(self, config) -> None:
super().__init__()
self.config = config
def forward(self, pos_embed, img_size=(800, 1344)) -> torch.Tensor:
cls_pos_embed = pos_embed[:, 0, :]
cls_pos_embed = cls_pos_embed[:, None]
det_pos_embed = pos_embed[:, -self.config.num_detection_tokens :, :]
patch_pos_embed = pos_embed[:, 1 : -self.config.num_detection_tokens, :]
patch_pos_embed = patch_pos_embed.transpose(1, 2)
batch_size, hidden_size, seq_len = patch_pos_embed.shape
patch_height, patch_width = (
self.config.image_size[0] // self.config.patch_size,
self.config.image_size[1] // self.config.patch_size,
)
patch_pos_embed = patch_pos_embed.view(batch_size, hidden_size, patch_height, patch_width)
height, width = img_size
new_patch_height, new_patch_width = height // self.config.patch_size, width // self.config.patch_size
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed, size=(new_patch_height, new_patch_width), mode="bicubic", align_corners=False
)
patch_pos_embed = patch_pos_embed.flatten(2).transpose(1, 2)
scale_pos_embed = torch.cat((cls_pos_embed, patch_pos_embed, det_pos_embed), dim=1)
return scale_pos_embed
|
InterpolateInitialPositionEmbeddings
|
python
|
tensorflow__tensorflow
|
tensorflow/python/training/input_test.py
|
{
"start": 17391,
"end": 41462
}
|
class ____(test_lib.TestCase):
def _testOneThreadHelper(self, use_dict):
with ops.Graph().as_default(), self.cached_session():
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(
array_ops_stack.stack([zero64, zero64 + 1]), [2, 1]),
values=math_ops.cast(
array_ops_stack.stack([counter, -counter]), dtypes.float32),
dense_shape=[2])
if use_dict:
batched = inp.batch(
{
"c": counter,
"s": sparse_counter,
"S": "string"
},
batch_size=batch_size)
batched_fetch = [batched["c"], batched["s"], batched["S"]]
else:
batched = inp.batch(
[counter, sparse_counter, "string"], batch_size=batch_size)
batched_fetch = batched
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
for i in range(num_batches):
results = self.evaluate(batched_fetch)
self.assertAllEqual(results[0],
np.arange(i * batch_size, (i + 1) * batch_size))
self.assertAllEqual(
results[1].indices,
np.vstack((
np.arange(2 * batch_size) // 2, # 0, 0, 1, 1, ...
[0, 1] * batch_size)).T)
# [x, -x, x+1, -(x+1), ...]
expected = np.arange(2 * i * batch_size, 2 * (i + 1) * batch_size) // 2
expected *= ([1, -1] * batch_size) # mult by [1, -1, 1, -1, ...]
self.assertAllEqual(results[1].values, expected)
self.assertAllEqual(results[1].dense_shape, [batch_size, 2])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched_fetch)
for thread in threads:
thread.join()
def testOneThread(self):
self._testOneThreadHelper(use_dict=False)
def testOneThreadDict(self):
self._testOneThreadHelper(use_dict=True)
def testUint32DataTypes(self):
with ops.Graph().as_default():
values = constant_op.constant([0, 1, 2, 3, 4, 5], dtype=dtypes.uint32)
batched = inp.batch([values], batch_size=2)
with self.cached_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
self.evaluate(batched)
coord.request_stop()
for thread in threads:
thread.join()
def testUint64DataTypes(self):
with ops.Graph().as_default():
values = constant_op.constant([0, 1, 2, 3, 4, 5], dtype=dtypes.uint64)
batched = inp.batch([values], batch_size=2)
with self.cached_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
self.evaluate(batched)
coord.request_stop()
for thread in threads:
thread.join()
def testOneThreadDynamicPad(self):
with ops.Graph().as_default(), self.cached_session():
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
string = array_ops.tile(["string"],
math_ops.cast(array_ops_stack.stack([counter]),
dtypes.int32))
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
batched = inp.batch(
[counter, string], batch_size=batch_size, dynamic_pad=True)
threads = queue_runner_impl.start_queue_runners()
for i in range(num_batches):
results = self.evaluate(batched)
expected_results = np.arange(i * batch_size, (i + 1) * batch_size)
max_len = expected_results[-1]
self.assertAllEqual(results[0], expected_results)
expected_strings = [[b"string"] * rep + [b""] * (max_len - rep)
for rep in expected_results]
self.assertAllEqual(results[1], expected_strings)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched)
for thread in threads:
thread.join()
def testOneThreadEnqueueMany(self):
with ops.Graph().as_default(), self.cached_session():
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops_stack.stack(
[math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
pre_batched = inp.batch([counter, sparse_counter, "string"], batch_size=2)
batched = inp.batch(pre_batched, enqueue_many=True, batch_size=batch_size)
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
for i in range(num_batches):
results = self.evaluate(batched)
self.assertAllEqual(results[0],
np.arange(i * batch_size, (i + 1) * batch_size))
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[1].values,
np.arange(i * batch_size, (i + 1) * batch_size))
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched)
for thread in threads:
thread.join()
def testManyThreads(self):
with ops.Graph().as_default(), self.cached_session():
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops_stack.stack(
[math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
batched = inp.batch(
[counter, sparse_counter, "string"],
batch_size=batch_size,
num_threads=4)
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
all_counts = []
for i in range(num_batches):
results = self.evaluate(batched)
tf_logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
all_counts.extend(results[0])
self.assertAllEqual(results[2], [b"string"] * batch_size)
self.assertItemsEqual(all_counts, range(num_batches * batch_size))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched)
for thread in threads:
thread.join()
def testOneThreadSmallerBatch(self):
with ops.Graph().as_default(), self.cached_session():
batch_size = 10
num_batches = 3
extra_elements = 5
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size + extra_elements)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(
array_ops_stack.stack([zero64, zero64 + 1]), [2, 1]),
values=math_ops.cast(
array_ops_stack.stack([counter, -counter]), dtypes.float32),
dense_shape=[2])
batched = inp.batch(
[counter, sparse_counter, "string"],
batch_size=batch_size,
allow_smaller_final_batch=True)
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
for i in range(num_batches):
results = self.evaluate(batched)
self.assertAllEqual(results[0],
np.arange(i * batch_size, (i + 1) * batch_size))
self.assertAllEqual(
results[1].indices,
np.vstack((
np.arange(2 * batch_size) // 2, # 0, 0, 1, 1, ...
[0, 1] * batch_size)).T)
# [x, -x, x+1, -(x+1), ...]
expected = np.arange(2 * i * batch_size, 2 * (i + 1) * batch_size) // 2
expected *= ([1, -1] * batch_size) # mult by [1, -1, 1, -1, ...]
self.assertAllEqual(results[1].values, expected)
self.assertAllEqual(results[1].dense_shape, [batch_size, 2])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Reached the final batch with extra_elements.
results = self.evaluate(batched)
self.assertAllEqual(results[0],
np.arange(num_batches * batch_size,
num_batches * batch_size + extra_elements))
self.assertAllEqual(
results[1].indices,
np.vstack((
np.arange(2 * extra_elements) // 2, # 0, 0, 1, 1, ...
[0, 1] * extra_elements)).T)
self.assertAllEqual(results[1].dense_shape, [extra_elements, 2])
self.assertAllEqual(results[2], [b"string"] * extra_elements)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched)
for thread in threads:
thread.join()
def testManyThreadsSmallerBatch(self):
with ops.Graph().as_default(), self.cached_session():
batch_size = 10
num_batches = 3
extra_elements = 5
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size + extra_elements)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops_stack.stack(
[math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
batched = inp.batch(
[counter, sparse_counter, "string"],
batch_size=batch_size,
num_threads=4,
allow_smaller_final_batch=True)
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
all_counts = []
for i in range(num_batches):
results = self.evaluate(batched)
tf_logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
all_counts.extend(results[0])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Reached the final batch with extra_elements.
results = self.evaluate(batched)
tf_logging.info("Last Batch: %s", results[0])
self.assertEqual(len(results[0]), extra_elements)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(extra_elements), np.zeros(extra_elements))).T)
self.assertAllEqual(results[1].dense_shape, [extra_elements, 1])
all_counts.extend(results[0])
self.assertAllEqual(results[2], [b"string"] * extra_elements)
self.assertItemsEqual(all_counts,
range(num_batches * batch_size + extra_elements))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched)
for thread in threads:
thread.join()
def testSharedName(self):
with ops.Graph().as_default(), self.cached_session():
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
batched = inp.batch(
[counter, "string"],
batch_size=batch_size,
shared_name="SHARED_NAME_XYZ",
name="Q")
self.assertProtoEquals(
"s: 'SHARED_NAME_XYZ'",
batched[0].op.inputs[0].op.node_def.attr["shared_name"])
def testCannotInferRankError(self):
with ops.Graph().as_default(), self.cached_session():
x = array_ops.placeholder(dtype=dtypes.int64)
with self.assertRaisesRegex(ValueError, "Cannot infer Tensor's rank"):
inp.batch([x], batch_size=2)
def testBatchedSparseTensorInferredShape(self):
with ops.Graph().as_default():
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.batch([sparse], batch_size=2)
self.assertAllEqual((2,), batched.dense_shape.get_shape().as_list())
def testBatchedSparseTensorInferredShapeEnqueueMany(self):
with ops.Graph().as_default():
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.batch([sparse], batch_size=2, enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
def testBatchedSparseTensorInferredShapeUnknownRank(self):
with ops.Graph().as_default():
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.batch([sparse], batch_size=2)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testBatchedSparseTensorInferredShapeUnknownRankEnqueueMany(self):
with ops.Graph().as_default():
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.batch([sparse], batch_size=2, enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testSingleElementDict(self):
with ops.Graph().as_default():
x = inp.batch({"c": [12, 12]}, batch_size=8)
self.assertAllEqual((8, 2), x["c"].get_shape().as_list())
def _testKeepInputHelper(self, num_threads, enqueue_many,
keep_input_vector=False):
with ops.Graph().as_default(), self.cached_session():
batch_size = 5
num_batches = 4
examples = variables.Variable(0)
counter = examples.count_up_to(num_batches * batch_size * 2)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.zeros(
[1, 1], dtype=dtypes.int64),
values=array_ops_stack.stack(
[math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
to_batch = [counter, sparse_counter, "string"]
if enqueue_many:
to_batch = inp.batch(to_batch, 4 if keep_input_vector else 1)
keep_input = array_ops.squeeze(
math_ops.equal(0, math_ops.mod(to_batch[0], 2)))
batched = inp.maybe_batch(
to_batch,
keep_input,
batch_size,
num_threads=num_threads,
enqueue_many=enqueue_many)
variables.initialize_all_variables().run()
variables.initialize_local_variables().run()
threads = queue_runner_impl.start_queue_runners()
for _ in range(num_batches):
results = self.evaluate(batched)
self.assertAllEqual([0] * batch_size, np.mod(results[0], 2))
self.assertAllEqual([0] * batch_size, np.mod(results[1].values, 2))
self.assertAllEqual([b"string"] * batch_size, results[2])
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(batched)
for thread in threads:
thread.join()
@test_util.run_v1_only("Input pipelines based on Queues are not supported "
"when eager execution is enabled. TF2 uses tf.data "
"instead.")
def testSingleThreadKeepInput(self):
self._testKeepInputHelper(1, False)
@test_util.run_v1_only("Input pipelines based on Queues are not supported "
"when eager execution is enabled. TF2 uses tf.data "
"instead.")
def testSingleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(1, True)
@test_util.run_v1_only("Input pipelines based on Queues are not supported "
"when eager execution is enabled. TF2 uses tf.data "
"instead.")
def testMultipleThreadKeepInput(self):
self._testKeepInputHelper(5, False)
@test_util.run_v1_only("Input pipelines based on Queues are not supported "
"when eager execution is enabled. TF2 uses tf.data "
"instead.")
def testMultipleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(5, True)
def testMaybeEnqueuePerExample(self):
self._testKeepInputHelper(1, True, keep_input_vector=True)
def testMultipleThreadMaybeEnqueuePerExample(self):
self._testKeepInputHelper(5, True, keep_input_vector=True)
def testInvalidKeepInputVector(self):
with ops.Graph().as_default():
# Can't have vector `keep_input` with `enqueue_many=False`.
with self.assertRaisesRegex(ValueError,
"`keep_input` cannot be a vector"):
inp.maybe_batch([array_ops.zeros(5)],
keep_input=constant_op.constant([True, False]),
batch_size=1,
enqueue_many=False)
# Can't have `keep_input` with more than one dimension.
with self.assertRaisesRegex(ValueError, "must be 0 or 1 dimensions"):
inp.maybe_batch([array_ops.zeros(5)],
keep_input=constant_op.constant([[True], [False]]),
batch_size=1,
enqueue_many=True)
# `keep_input` must have dimensions determined at graph construction.
with self.assertRaisesRegex(ValueError,
"must be known at graph construction"):
inp.maybe_batch([array_ops.zeros(5)],
keep_input=array_ops.placeholder(dtypes.bool),
batch_size=1,
enqueue_many=True)
def testMaybeBatchedSparseTensorInferredShape(self):
with ops.Graph().as_default():
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_batch([sparse], keep_input=True, batch_size=2)
self.assertAllEqual((2,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeEnqueueMany(self):
with ops.Graph().as_default():
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_batch([sparse],
keep_input=True,
batch_size=2,
enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeEnqueueManyPerExample(self):
with ops.Graph().as_default():
sparse = sparse_tensor.SparseTensor(
indices=[[0], [0]], values=[1.0, 2.0], dense_shape=[2])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_batch([sparse],
keep_input=[True, False],
batch_size=2,
enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeUnknownRank(self):
with ops.Graph().as_default():
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_batch([sparse], keep_input=True, batch_size=2)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testMaybeBatchedSparseTensorInferredShapeUnknownRankEnqueueMany(self):
with ops.Graph().as_default():
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_batch([sparse],
keep_input=True,
batch_size=2,
enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testMaybeBatchedSparseTensorInferredShapeUnknownRankPerExample(self):
with ops.Graph().as_default():
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_batch([sparse],
keep_input=[True, False],
batch_size=2,
enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testMaybeBatchCorrectValues(self):
with ops.Graph().as_default():
sparse_t = sparse_tensor.SparseTensor(
indices=[[0, 1], [0, 2], [1, 0], [1, 3]],
dense_shape=[2, 4],
values=[5, 4, 7, 2])
keep = constant_op.constant([True, False])
batched = inp.maybe_batch([sparse_t],
keep_input=keep,
batch_size=1,
enqueue_many=True)
with self.cached_session():
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
batched_np = self.evaluate(batched)
coord.request_stop()
for thread in threads:
thread.join()
self.assertAllEqual([[0, 1], [0, 2]], batched_np.indices)
self.assertAllEqual([5, 4], batched_np.values)
self.assertAllEqual([1, 4], batched_np.dense_shape)
|
BatchTest
|
python
|
pola-rs__polars
|
py-polars/src/polars/interchange/buffer.py
|
{
"start": 331,
"end": 2301
}
|
class ____(Buffer):
"""
A buffer object backed by a Polars Series consisting of a single chunk.
Parameters
----------
data
The Polars Series backing the buffer object.
allow_copy
Allow data to be copied during operations on this column. If set to `False`,
a RuntimeError will be raised if data would be copied.
"""
def __init__(self, data: Series, *, allow_copy: bool = True) -> None:
if data.n_chunks() > 1:
if not allow_copy:
msg = "non-contiguous buffer must be made contiguous"
raise CopyNotAllowedError(msg)
data = data.rechunk()
self._data = data
@property
def bufsize(self) -> int:
"""Buffer size in bytes."""
dtype = polars_dtype_to_dtype(self._data.dtype)
if dtype[0] == DtypeKind.BOOL:
_, offset, length = self._data._get_buffer_info()
n_bits = offset + length
n_bytes, rest = divmod(n_bits, 8)
# Round up to the nearest byte
if rest == 0:
return n_bytes
else:
return n_bytes + 1
return self._data.len() * (dtype[1] // 8)
@property
def ptr(self) -> int:
"""Pointer to start of the buffer as an integer."""
pointer, _, _ = self._data._get_buffer_info()
return pointer
def __dlpack__(self) -> NoReturn:
"""Represent this structure as DLPack interface."""
msg = "__dlpack__"
raise NotImplementedError(msg)
def __dlpack_device__(self) -> tuple[DlpackDeviceType, None]:
"""Device type and device ID for where the data in the buffer resides."""
return (DlpackDeviceType.CPU, None)
def __repr__(self) -> str:
bufsize = self.bufsize
ptr = self.ptr
device = self.__dlpack_device__()[0].name
return f"PolarsBuffer(bufsize={bufsize}, ptr={ptr}, device={device!r})"
|
PolarsBuffer
|
python
|
great-expectations__great_expectations
|
contrib/experimental/great_expectations_experimental/expectations/expect_column_distinct_values_to_be_continuous.py
|
{
"start": 781,
"end": 10228
}
|
class ____(ColumnAggregateExpectation):
"""Expect the set of distinct column values to be continuous."""
examples = [
{
"data": {
"a": [
"2021-01-01",
"2021-01-31",
"2021-02-28",
"2021-03-20",
"2021-02-21",
"2021-05-01",
"2021-06-18",
]
},
"only_for": ["pandas"],
"tests": [
{
"title": "fail_for_missing_date",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "a", "datetime_format": "%Y-%m-%d"},
"out": {"success": False},
},
],
},
{
"data": {
"a": [
"2021-01-01 10:56:30",
"2021-01-03 10:56:30",
"2021-01-02 10:56:30", # out of order row to make sure we're ignoring order
"2021-01-04 10:56:30",
"2021-01-05 10:56:30",
"2021-01-06 10:56:30",
"2021-01-07 10:56:30",
]
},
"only_for": ["pandas"],
"tests": [
{
"title": "pass_for_continuous_date",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "a", "datetime_format": "%Y-%m-%d %H:%M:%S"},
"out": {"success": True},
},
],
},
{
"data": {"a": [2, 3, 4, 5, 6, 7, 8, 9, 1]},
"only_for": ["pandas"],
"tests": [
{
"title": "pass_for_continuous_integers",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "a"},
"out": {"success": True},
},
],
},
{
"data": {"a": [1, 2, 3, 4, 5, 8, 9]},
"only_for": ["pandas"],
"tests": [
{
"title": "fail_for_non_continuous_integers",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "a"},
"out": {"success": False},
},
],
},
]
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "experimental",
"tags": ["core expectation", "column aggregate expectation"],
"contributors": ["@jmoskovc"],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": False,
}
# Setting necessary computation metric dependencies and defining kwargs, as well as assigning kwargs default values
metric_dependencies = ("column.value_counts", "column.max", "column.min")
# Default values
default_kwarg_values = {
"row_condition": None,
"condition_parser": None,
"result_format": "BASIC",
"catch_exceptions": False,
}
args_keys = ("column", "datetime_format")
@classmethod
def _atomic_prescriptive_template(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = include_column_name if include_column_name is not None else True
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
[
"column",
"datetime_format",
"row_condition",
"condition_parser",
],
)
params_with_json_schema = {
"column": {"schema": {"type": "string"}, "value": params.get("column")},
"datetime_format": {
"schema": {"type": "string"},
"value": params.get("datetime_format"),
},
"row_condition": {
"schema": {"type": "string"},
"value": params.get("row_condition"),
},
"condition_parser": {
"schema": {"type": "string"},
"value": params.get("condition_parser"),
},
}
template_str = "distinct values must be continuous."
if include_column_name:
template_str = f"$column {template_str}"
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(params["row_condition"], with_schema=True)
template_str = f"{conditional_template_str}, then {template_str}"
params_with_json_schema.update(conditional_params)
params_with_json_schema = add_values_with_json_schema_from_list_in_params(
params=params, params_with_json_schema=params_with_json_schema
)
return (template_str, params_with_json_schema, styling)
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_suite_parameter_string
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = include_column_name if include_column_name is not None else True
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
[
"column",
"datetime_format",
"row_condition",
"condition_parser",
],
)
template_str = "distinct values must be continuous."
if include_column_name:
template_str = f"$column {template_str}"
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(params["row_condition"])
template_str = f"{conditional_template_str}, then {template_str}"
params.update(conditional_params)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
def _expected_list(
self,
start_value: Any,
end_value: Any,
configuration: ExpectationConfiguration,
):
datetime_format = configuration.kwargs.get("datetime_format")
if datetime_format:
try:
# user defined datetime_format, so we're expecting to handle dates
start_date = datetime.strptime(start_value, datetime_format)
end_date = datetime.strptime(end_value, datetime_format)
return [
(start_date + timedelta(days=x)).strftime("%Y-%m-%d")
for x in range((end_date - start_date).days + 1)
]
except TypeError as ex:
raise InvalidExpectationConfigurationError( # noqa: TRY003
f"Expecting datetime when datetime_format is set\n{ex}"
)
# else - no datetime format, so we're assuming integers
return [x for x in range(start_value, end_value + 1)]
def _validate(
self,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
configuration = self.configuration
observed_value_counts = metrics.get("column.value_counts", [])
observed_max = metrics.get("column.max")
observed_min = metrics.get("column.min")
datetime_format = configuration.kwargs.get("datetime_format")
# need to strip the time part from the datetime strings
if datetime_format is not None:
observed_set = set(
map(
lambda x: datetime.strptime(x, datetime_format).strftime("%Y-%m-%d"),
observed_value_counts.index,
)
)
else:
observed_set = set(observed_value_counts.index)
expected_set = set(self._expected_list(observed_min, observed_max, configuration))
return {
"success": expected_set == observed_set,
"result": {
"observed_value": f"Missing values {sorted(expected_set - observed_set)}",
},
}
if __name__ == "__main__":
ExpectColumnDistinctValuesToBeContinuous().print_diagnostic_checklist()
|
ExpectColumnDistinctValuesToBeContinuous
|
python
|
huggingface__transformers
|
src/transformers/models/granite_speech/configuration_granite_speech.py
|
{
"start": 4513,
"end": 8565
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`GraniteSpeechForConditionalGeneration`]. It is used to instantiate an
Granite Speech model according to the specified arguments, defining the model architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `GraniteConfig`):
The config object or dictionary of the text backbone.
encoder_config (`GraniteSpeechEncoderConfig`, *optional*):
The config object or dictionary of the Granite Speech CTC Encoder.
projector_config (`Union[AutoConfig, dict]`, *optional*, defaults to `Blip2QFormerConfig`):
The config object or dictionary of the audio projector.
audio_token_index (`int`, *optional*, defaults to 49155):
The audio token index to encode the audio prompt.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
has_lora_adapter (`bool`, *optional*, defaults to `True`):
Indicates whether or not the model has a lora adapter that should only
be activate when processing audio inputs.
downsample_rate (`int`, *optional*, defaults to 5):
Downsample rate for the audio feature extractor.
window_size (`int`, *optional*, defaults to 15):
Window size for the audio feature projector.
Example:
```python
>>> from transformers import GraniteSpeechConfig, GraniteSpeechForConditionalGeneration
>>> # Initializing a GraniteSpeechConfig
>>> configuration = GraniteSpeechConfig()
>>> # Initializing a GraniteSpeechForConditionalGeneration (with random weights)
>>> model = GraniteSpeechForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "granite_speech"
attribute_map = {
"audio_token_id": "audio_token_index",
}
sub_configs = {
"text_config": AutoConfig,
"encoder_config": GraniteSpeechEncoderConfig,
"projector_config": AutoConfig,
}
def __init__(
self,
text_config=None,
encoder_config=None,
projector_config=None,
audio_token_index=49155,
initializer_range=0.02,
has_lora_adapter=True,
downsample_rate=5,
window_size=15,
**kwargs,
):
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "granite")
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
text_config = CONFIG_MAPPING["granite"]()
if isinstance(projector_config, dict):
projector_config["model_type"] = projector_config.get("model_type", "blip_2_qformer")
projector_config = CONFIG_MAPPING[projector_config["model_type"]](**projector_config)
elif projector_config is None:
projector_config = CONFIG_MAPPING["blip_2_qformer"]()
if not isinstance(encoder_config, GraniteSpeechEncoderConfig):
encoder_config = {} if encoder_config is None else encoder_config
encoder_config = GraniteSpeechEncoderConfig(**encoder_config)
self.text_config = text_config
self.encoder_config = encoder_config
self.projector_config = projector_config
self.audio_token_index = audio_token_index
self.initializer_range = initializer_range
self.has_lora_adapter = has_lora_adapter
self.downsample_rate = downsample_rate
self.window_size = window_size
super().__init__(**kwargs)
__all__ = ["GraniteSpeechEncoderConfig", "GraniteSpeechConfig"]
|
GraniteSpeechConfig
|
python
|
sphinx-doc__sphinx
|
tests/roots/test-ext-autodoc/circular_import/c.py
|
{
"start": 52,
"end": 97
}
|
class ____:
X = circular_import.a.X
|
SomeClass
|
python
|
aimacode__aima-python
|
nlp4e.py
|
{
"start": 10798,
"end": 13594
}
|
class ____:
"""Class for parsing sentences using a chart data structure.
>>> chart = Chart(E0)
>>> len(chart.parses('the stench is in 2 2'))
1
"""
def __init__(self, grammar, trace=False):
"""A datastructure for parsing a string; and methods to do the parse.
self.chart[i] holds the edges that end just before the i'th word.
Edges are 5-element lists of [start, end, lhs, [found], [expects]]."""
self.grammar = grammar
self.trace = trace
def parses(self, words, S='S'):
"""Return a list of parses; words can be a list or string."""
if isinstance(words, str):
words = words.split()
self.parse(words, S)
# Return all the parses that span the whole input
# 'span the whole input' => begin at 0, end at len(words)
return [[i, j, S, found, []]
for (i, j, lhs, found, expects) in self.chart[len(words)]
# assert j == len(words)
if i == 0 and lhs == S and expects == []]
def parse(self, words, S='S'):
"""Parse a list of words; according to the grammar.
Leave results in the chart."""
self.chart = [[] for i in range(len(words) + 1)]
self.add_edge([0, 0, 'S_', [], [S]])
for i in range(len(words)):
self.scanner(i, words[i])
return self.chart
def add_edge(self, edge):
"""Add edge to chart, and see if it extends or predicts another edge."""
start, end, lhs, found, expects = edge
if edge not in self.chart[end]:
self.chart[end].append(edge)
if self.trace:
print('Chart: added {}'.format(edge))
if not expects:
self.extender(edge)
else:
self.predictor(edge)
def scanner(self, j, word):
"""For each edge expecting a word of this category here, extend the edge."""
for (i, j, A, alpha, Bb) in self.chart[j]:
if Bb and self.grammar.isa(word, Bb[0]):
self.add_edge([i, j + 1, A, alpha + [(Bb[0], word)], Bb[1:]])
def predictor(self, edge):
"""Add to chart any rules for B that could help extend this edge."""
(i, j, A, alpha, Bb) = edge
B = Bb[0]
if B in self.grammar.rules:
for rhs in self.grammar.rewrites_for(B):
self.add_edge([j, j, B, [], rhs])
def extender(self, edge):
"""See what edges can be extended by this edge."""
(j, k, B, _, _) = edge
for (i, j, A, alpha, B1b) in self.chart[j]:
if B1b and B == B1b[0]:
self.add_edge([i, k, A, alpha + [edge], B1b[1:]])
# ______________________________________________________________________________
# CYK Parsing
|
Chart
|
python
|
PrefectHQ__prefect
|
src/prefect/server/schemas/filters.py
|
{
"start": 75482,
"end": 76006
}
|
class ____(PrefectFilterBaseModel):
"""Filter by `Artifact.task_run_id`."""
any_: Optional[list[UUID]] = Field(
default=None, description="A list of task run IDs to include"
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.any_ is not None:
filters.append(db.Artifact.task_run_id.in_(self.any_))
return filters
|
ArtifactFilterTaskRunId
|
python
|
cython__cython
|
Cython/Compiler/ExprNodes.py
|
{
"start": 395261,
"end": 401589
}
|
class ____(ExprNode):
"""
Merge a sequence of iterables into a set/list/tuple.
The target collection is determined by self.type, which must be set externally.
args [ExprNode]
"""
subexprs = ['args']
is_temp = True
gil_message = "Constructing Python collection"
def __init__(self, pos, args, type):
if type in (list_type, tuple_type) and args and args[0].is_sequence_constructor:
# construct a list directly from the first argument that we can then extend
if args[0].type is not list_type:
args[0] = ListNode(args[0].pos, args=args[0].args, is_temp=True, mult_factor=args[0].mult_factor)
ExprNode.__init__(self, pos, args=args, type=type)
def calculate_constant_result(self):
result = []
for item in self.args:
if item.is_sequence_constructor and item.mult_factor:
if item.mult_factor.constant_result <= 0:
continue
# otherwise, adding each item once should be enough
if item.is_set_literal or item.is_sequence_constructor:
# process items in order
items = (arg.constant_result for arg in item.args)
else:
items = item.constant_result
result.extend(items)
if self.type is set_type:
result = set(result)
elif self.type is tuple_type:
result = tuple(result)
else:
assert self.type is list_type
self.constant_result = result
def compile_time_value(self, denv):
result = []
for item in self.args:
if item.is_sequence_constructor and item.mult_factor:
if item.mult_factor.compile_time_value(denv) <= 0:
continue
if item.is_set_literal or item.is_sequence_constructor:
# process items in order
items = (arg.compile_time_value(denv) for arg in item.args)
else:
items = item.compile_time_value(denv)
result.extend(items)
if self.type is set_type:
try:
result = set(result)
except Exception as e:
self.compile_time_value_error(e)
elif self.type is tuple_type:
result = tuple(result)
else:
assert self.type is list_type
return result
def type_dependencies(self, env):
return ()
def infer_type(self, env):
return self.type
def analyse_types(self, env):
args = [
arg.analyse_types(env).coerce_to_pyobject(env).as_none_safe_node(
# FIXME: CPython's error message starts with the runtime function name
'argument after * must be an iterable, not NoneType')
for arg in self.args
]
if len(args) == 1 and args[0].type is self.type:
# strip this intermediate node and use the bare collection
return args[0]
assert self.type in (set_type, list_type, tuple_type)
self.args = args
return self
def may_be_none(self):
return False
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.allocate_temp_result(code)
is_set = self.type is set_type
args = iter(self.args)
item = next(args)
item.generate_evaluation_code(code)
if (is_set and item.is_set_literal or
not is_set and item.is_sequence_constructor and item.type is list_type):
code.putln("%s = %s;" % (self.result(), item.py_result()))
item.generate_post_assignment_code(code)
else:
code.putln("%s = %s(%s); %s" % (
self.result(),
'PySet_New' if is_set
else "__Pyx_PySequence_ListKeepNew" if item.result_in_temp() and item.type in (py_object_type, list_type)
else "PySequence_List",
item.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
item.generate_disposal_code(code)
item.free_temps(code)
helpers = set()
if is_set:
add_func = "PySet_Add"
extend_func = "__Pyx_PySet_Update"
else:
add_func = "__Pyx_ListComp_Append"
extend_func = "__Pyx_PyList_Extend"
for item in args:
if (is_set and (item.is_set_literal or item.is_sequence_constructor) or
(item.is_sequence_constructor and not item.mult_factor)):
if not is_set and item.args:
helpers.add(("ListCompAppend", "Optimize.c"))
for arg in item.args:
arg.generate_evaluation_code(code)
code.put_error_if_neg(arg.pos, "%s(%s, %s)" % (
add_func,
self.result(),
arg.py_result()))
arg.generate_disposal_code(code)
arg.free_temps(code)
continue
if is_set:
helpers.add(("PySet_Update", "Builtins.c"))
else:
helpers.add(("ListExtend", "Optimize.c"))
item.generate_evaluation_code(code)
code.put_error_if_neg(item.pos, "%s(%s, %s)" % (
extend_func,
self.result(),
item.py_result()))
item.generate_disposal_code(code)
item.free_temps(code)
if self.type is tuple_type:
code.putln("{")
code.putln("PyObject *%s = PyList_AsTuple(%s);" % (
Naming.quick_temp_cname,
self.result()))
code.put_decref(self.result(), py_object_type)
code.putln("%s = %s; %s" % (
self.result(),
Naming.quick_temp_cname,
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
code.putln("}")
for helper in sorted(helpers):
code.globalstate.use_utility_code(UtilityCode.load_cached(*helper))
def annotate(self, code):
for item in self.args:
item.annotate(code)
|
MergedSequenceNode
|
python
|
davidhalter__jedi
|
test/refactor/extract_variable.py
|
{
"start": 3150,
"end": 8641
}
|
class ____(x):
pass
# -------------------------------------------------- keyword-pass
#? 12 error {'new_name': 'x'}
def x(): pass
# ++++++++++++++++++++++++++++++++++++++++++++++++++
Cannot extract a "simple_stmt"
# -------------------------------------------------- keyword-continue
#? 5 error {'new_name': 'x'}
continue
# ++++++++++++++++++++++++++++++++++++++++++++++++++
Cannot extract a "simple_stmt"
# -------------------------------------------------- keyword-None
if 1:
#? 4 text {'new_name': 'x'}
None
# ++++++++++++++++++++++++++++++++++++++++++++++++++
if 1:
#? 4 text {'new_name': 'x'}
x = None
x
# -------------------------------------------------- with-tuple
#? 4 text {'new_name': 'x'}
x + 1, 3
# ++++++++++++++++++++++++++++++++++++++++++++++++++
#? 4 text {'new_name': 'x'}
x = x + 1
x, 3
# -------------------------------------------------- range-1
#? 4 text {'new_name': 'x', 'until_column': 9}
y + 1, 3
# ++++++++++++++++++++++++++++++++++++++++++++++++++
#? 4 text {'new_name': 'x', 'until_column': 9}
x = y + 1, 3
x
# -------------------------------------------------- range-2
#? 1 text {'new_name': 'x', 'until_column': 3}
y + 1, 3
# ++++++++++++++++++++++++++++++++++++++++++++++++++
#? 1 text {'new_name': 'x', 'until_column': 3}
x = y + 1
x, 3
# -------------------------------------------------- range-3
#? 1 text {'new_name': 'x', 'until_column': 6}
y + 1, 3
# ++++++++++++++++++++++++++++++++++++++++++++++++++
#? 1 text {'new_name': 'x', 'until_column': 6}
x = y + 1
x, 3
# -------------------------------------------------- range-4
#? 1 text {'new_name': 'x', 'until_column': 1}
y + 1, 3
# ++++++++++++++++++++++++++++++++++++++++++++++++++
#? 1 text {'new_name': 'x', 'until_column': 1}
x = y
x + 1, 3
# -------------------------------------------------- addition-1
#? 4 text {'new_name': 'x', 'until_column': 9}
z = y + 1 + 2+ 3, 3
# ++++++++++++++++++++++++++++++++++++++++++++++++++
#? 4 text {'new_name': 'x', 'until_column': 9}
x = y + 1
z = x + 2+ 3, 3
# -------------------------------------------------- addition-2
#? 8 text {'new_name': 'x', 'until_column': 12}
z = y +1 + 2+ 3, 3
# ++++++++++++++++++++++++++++++++++++++++++++++++++
#? 8 text {'new_name': 'x', 'until_column': 12}
x = 1 + 2
z = y +x+ 3, 3
# -------------------------------------------------- addition-3
#? 10 text {'new_name': 'x', 'until_column': 14}
z = y + 1 + 2+ 3, 3
# ++++++++++++++++++++++++++++++++++++++++++++++++++
#? 10 text {'new_name': 'x', 'until_column': 14}
x = 1 + 2+ 3
z = y + x, 3
# -------------------------------------------------- addition-4
#? 13 text {'new_name': 'x', 'until_column': 17}
z = y + (1 + 2)+ 3, 3
# ++++++++++++++++++++++++++++++++++++++++++++++++++
#? 13 text {'new_name': 'x', 'until_column': 17}
x = (1 + 2)+ 3
z = y + x, 3
# -------------------------------------------------- mult-add-1
#? 8 text {'new_name': 'x', 'until_column': 11}
z = foo(y+1*2+3, 3)
# ++++++++++++++++++++++++++++++++++++++++++++++++++
#? 8 text {'new_name': 'x', 'until_column': 11}
x = y+1
z = foo(x*2+3, 3)
# -------------------------------------------------- mult-add-2
#? 12 text {'new_name': 'x', 'until_column': 15}
z = foo(y+1*2+3)
# ++++++++++++++++++++++++++++++++++++++++++++++++++
#? 12 text {'new_name': 'x', 'until_column': 15}
x = 2+3
z = foo(y+1*x)
# -------------------------------------------------- mult-add-3
#? 9 text {'new_name': 'x', 'until_column': 13}
z = (y+1*2+3)
# ++++++++++++++++++++++++++++++++++++++++++++++++++
#? 9 text {'new_name': 'x', 'until_column': 13}
x = (y+1*2+3)
z = x
# -------------------------------------------------- extract-weird-1
#? 0 error {'new_name': 'x', 'until_column': 7}
foo = 3
# ++++++++++++++++++++++++++++++++++++++++++++++++++
Cannot extract a "expr_stmt"
# -------------------------------------------------- extract-weird-2
#? 0 error {'new_name': 'x', 'until_column': 5}
def x():
foo = 3
# ++++++++++++++++++++++++++++++++++++++++++++++++++
Cannot extract a "funcdef"
# -------------------------------------------------- extract-weird-3
def x():
#? 4 error {'new_name': 'x', 'until_column': 8}
if 1:
pass
# ++++++++++++++++++++++++++++++++++++++++++++++++++
Cannot extract a "if_stmt"
# -------------------------------------------------- extract-weird-4
#? 4 error {'new_name': 'x', 'until_column': 7}
x = foo = 4
# ++++++++++++++++++++++++++++++++++++++++++++++++++
Cannot extract a name that defines something
# -------------------------------------------------- keyword-None
#? 4 text {'new_name': 'x', 'until_column': 7}
yy = not foo or bar
# ++++++++++++++++++++++++++++++++++++++++++++++++++
#? 4 text {'new_name': 'x', 'until_column': 7}
x = not foo
yy = x or bar
# -------------------------------------------------- augassign
yy = ()
#? 6 text {'new_name': 'x', 'until_column': 10}
yy += 3, 4
# ++++++++++++++++++++++++++++++++++++++++++++++++++
yy = ()
#? 6 text {'new_name': 'x', 'until_column': 10}
x = 3, 4
yy += x
# -------------------------------------------------- if-else
#? 9 text {'new_name': 'x', 'until_column': 22}
yy = foo(a if y else b)
# ++++++++++++++++++++++++++++++++++++++++++++++++++
#? 9 text {'new_name': 'x', 'until_column': 22}
x = a if y else b
yy = foo(x)
# -------------------------------------------------- lambda
#? 8 text {'new_name': 'x', 'until_column': 17}
y = foo(lambda x: 3, 5)
# ++++++++++++++++++++++++++++++++++++++++++++++++++
#? 8 text {'new_name': 'x', 'until_column': 17}
x = lambda x: 3
y = foo(x, 5)
|
Foo
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/src/hypothesis/internal/observability.py
|
{
"start": 11839,
"end": 19487
}
|
class ____:
def __bool__(self):
self._note_deprecation()
return bool(_callbacks)
def _note_deprecation(self):
from hypothesis._settings import note_deprecation
note_deprecation(
"hypothesis.internal.observability.TESTCASE_CALLBACKS is deprecated. "
"Replace TESTCASE_CALLBACKS.append with add_observability_callback, "
"TESTCASE_CALLBACKS.remove with remove_observability_callback, and "
"bool(TESTCASE_CALLBACKS) with observability_enabled().",
since="2025-08-01",
has_codemod=False,
)
def append(self, f):
self._note_deprecation()
add_observability_callback(f)
def remove(self, f):
self._note_deprecation()
remove_observability_callback(f)
#: .. warning::
#:
#: Deprecated in favor of |add_observability_callback|,
#: |remove_observability_callback|, and |observability_enabled|.
#:
#: |TESTCASE_CALLBACKS| remains a thin compatibility
#: shim which forwards ``.append``, ``.remove``, and ``bool()`` to those
#: three methods. It is not an attempt to be fully compatible with the previous
#: ``TESTCASE_CALLBACKS = []``, so iteration or other usages will not work
#: anymore. Please update to using the new methods instead.
#:
#: |TESTCASE_CALLBACKS| will eventually be removed.
TESTCASE_CALLBACKS = _TestcaseCallbacks()
def make_testcase(
*,
run_start: float,
property: str,
data: "ConjectureData",
how_generated: str,
representation: str = "<unknown>",
timing: dict[str, float],
arguments: dict | None = None,
coverage: dict[str, list[int]] | None = None,
phase: str | None = None,
backend_metadata: dict[str, Any] | None = None,
status: (
Union[TestCaseStatus, "Status"] | None
) = None, # overrides automatic calculation
status_reason: str | None = None, # overrides automatic calculation
# added to calculated metadata. If keys overlap, the value from this `metadata`
# is used
metadata: dict[str, Any] | None = None,
) -> TestCaseObservation:
from hypothesis.core import reproduction_decorator
from hypothesis.internal.conjecture.data import Status
# We should only be sending observability reports for datas that have finished
# being modified.
assert data.frozen
if status_reason is not None:
pass
elif data.interesting_origin:
status_reason = str(data.interesting_origin)
elif phase == "shrink" and data.status == Status.OVERRUN:
status_reason = "exceeded size of current best example"
else:
status_reason = str(data.events.pop("invalid because", ""))
status_map: dict[Status, TestCaseStatus] = {
Status.OVERRUN: "gave_up",
Status.INVALID: "gave_up",
Status.VALID: "passed",
Status.INTERESTING: "failed",
}
if status is not None and isinstance(status, Status):
status = status_map[status]
if status is None:
status = status_map[data.status]
return TestCaseObservation(
type="test_case",
status=status,
status_reason=status_reason,
representation=representation,
arguments={
k.removeprefix("generate:"): v for k, v in (arguments or {}).items()
},
how_generated=how_generated, # iid, mutation, etc.
features={
**{
f"target:{k}".strip(":"): v for k, v in data.target_observations.items()
},
**data.events,
},
coverage=coverage,
timing=timing,
metadata=ObservationMetadata(
**{
"traceback": data.expected_traceback,
"reproduction_decorator": (
reproduction_decorator(data.choices) if status == "failed" else None
),
"predicates": dict(data._observability_predicates),
"backend": backend_metadata or {},
"data_status": data.status,
"phase": phase,
"interesting_origin": data.interesting_origin,
"choice_nodes": data.nodes if OBSERVABILITY_CHOICES else None,
"choice_spans": data.spans if OBSERVABILITY_CHOICES else None,
**_system_metadata(),
# unpack last so it takes precedence for duplicate keys
**(metadata or {}),
}
),
run_start=run_start,
property=property,
)
_WROTE_TO = set()
_deliver_to_file_lock = Lock()
def _deliver_to_file(
observation: Observation, thread_id: int
) -> None: # pragma: no cover
from hypothesis.strategies._internal.utils import to_jsonable
kind = "testcases" if observation.type == "test_case" else "info"
fname = storage_directory("observed", f"{date.today().isoformat()}_{kind}.jsonl")
fname.parent.mkdir(exist_ok=True, parents=True)
observation_bytes = (
json.dumps(to_jsonable(observation, avoid_realization=False)) + "\n"
)
# only allow one conccurent file write to avoid write races. This is likely to make
# HYPOTHESIS_EXPERIMENTAL_OBSERVABILITY quite slow under threading. A queue
# would be an improvement, but that requires a background thread, and I
# would prefer to avoid a thread in the single-threaded case. We could
# switch over to a queue if we detect multithreading, but it's tricky to get
# right.
with _deliver_to_file_lock:
_WROTE_TO.add(fname)
with fname.open(mode="a") as f:
f.write(observation_bytes)
_imported_at = time.time()
@lru_cache
def _system_metadata() -> dict[str, Any]:
return {
"sys_argv": sys.argv,
"os_getpid": os.getpid(),
"imported_at": _imported_at,
}
#: If ``False``, do not collect coverage information when observability is enabled.
#:
#: This is exposed both for performance (as coverage collection can be slow on
#: Python 3.11 and earlier) and size (if you do not use coverage information,
#: you may not want to store it in-memory).
OBSERVABILITY_COLLECT_COVERAGE = (
"HYPOTHESIS_EXPERIMENTAL_OBSERVABILITY_NOCOVER" not in os.environ
)
#: If ``True``, include the ``metadata.choice_nodes`` and ``metadata.spans`` keys
#: in test case observations.
#:
#: ``False`` by default. ``metadata.choice_nodes`` and ``metadata.spans`` can be
#: a substantial amount of data, and so must be opted-in to, even when
#: observability is enabled.
#:
#: .. warning::
#:
#: EXPERIMENTAL AND UNSTABLE. We are actively working towards a better
#: interface for this as of June 2025, and this attribute may disappear or
#: be renamed without notice.
#:
OBSERVABILITY_CHOICES = "HYPOTHESIS_EXPERIMENTAL_OBSERVABILITY_CHOICES" in os.environ
if OBSERVABILITY_COLLECT_COVERAGE is False and (
sys.version_info[:2] >= (3, 12)
): # pragma: no cover
warnings.warn(
"Coverage data collection should be quite fast in Python 3.12 or later "
"so there should be no need to turn coverage reporting off.",
HypothesisWarning,
stacklevel=2,
)
if (
"HYPOTHESIS_EXPERIMENTAL_OBSERVABILITY" in os.environ
or OBSERVABILITY_COLLECT_COVERAGE is False
): # pragma: no cover
add_observability_callback(_deliver_to_file, all_threads=True)
# Remove files more than a week old, to cap the size on disk
max_age = (date.today() - timedelta(days=8)).isoformat()
for f in storage_directory("observed", intent_to_write=False).glob("*.jsonl"):
if f.stem < max_age: # pragma: no branch
f.unlink(missing_ok=True)
|
_TestcaseCallbacks
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/cloud_storage_transfer_service.py
|
{
"start": 17515,
"end": 21761
}
|
class ____(GoogleCloudBaseOperator):
"""
Runs a transfer job.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataTransferServiceRunJobOperator`
:param job_name: (Required) Name of the job to be run
:param project_id: (Optional) the ID of the project that owns the Transfer
Job. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param api_version: API version used (e.g. v1).
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param timeout: Time to wait for the operation to end in seconds. Defaults to 60 seconds if not specified.
:param deferrable: Run operator in the deferrable mode.
"""
# [START gcp_transfer_job_run_template_fields]
template_fields: Sequence[str] = (
"job_name",
"project_id",
"gcp_conn_id",
"api_version",
"google_impersonation_chain",
)
# [END gcp_transfer_job_run_template_fields]
operator_extra_links = (CloudStorageTransferJobLink(),)
def __init__(
self,
*,
job_name: str,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
project_id: str = PROVIDE_PROJECT_ID,
google_impersonation_chain: str | Sequence[str] | None = None,
timeout: float | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
super().__init__(**kwargs)
self.job_name = job_name
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.google_impersonation_chain = google_impersonation_chain
self.timeout = timeout
self.deferrable = deferrable
def _validate_inputs(self) -> None:
if not self.job_name:
raise AirflowException("The required parameter 'job_name' is empty or None")
def execute(self, context: Context) -> dict:
self._validate_inputs()
hook = CloudDataTransferServiceHook(
api_version=self.api_version,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudStorageTransferJobLink.persist(
context=context,
project_id=project_id,
job_name=self.job_name,
)
if self.deferrable:
self.defer(
timeout=timedelta(seconds=self.timeout or 60),
trigger=CloudDataTransferServiceRunJobTrigger(
job_name=self.job_name,
project_id=project_id,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
),
method_name="execute_complete",
)
return hook.run_transfer_job(job_name=self.job_name, project_id=project_id)
def execute_complete(self, context: Context, event: dict[str, Any]) -> Any:
"""
Act as a callback for when the trigger fires.
This returns immediately. It relies on trigger to throw an exception,
otherwise it assumes execution was successful.
"""
if event["status"] == "error":
raise AirflowException(event["message"])
return event["job_result"]
|
CloudDataTransferServiceRunJobOperator
|
python
|
PrefectHQ__prefect
|
src/prefect/flows.py
|
{
"start": 4232,
"end": 74016
}
|
class ____(Generic[P, R]):
"""
A Prefect workflow definition.
Wraps a function with an entrypoint to the Prefect engine. To preserve the input
and output types, we use the generic type variables `P` and `R` for "Parameters" and
"Returns" respectively.
Args:
fn: The function defining the workflow.
name: An optional name for the flow; if not provided, the name will be inferred
from the given function.
version: An optional version string for the flow; if not provided, we will
attempt to create a version string as a hash of the file containing the
wrapped function; if the file cannot be located, the version will be null.
flow_run_name: An optional name to distinguish runs of this flow; this name can
be provided as a string template with the flow's parameters as variables,
or a function that returns a string.
task_runner: An optional task runner to use for task execution within the flow;
if not provided, a `ThreadPoolTaskRunner` will be used.
description: An optional string description for the flow; if not provided, the
description will be pulled from the docstring for the decorated function.
timeout_seconds: An optional number of seconds indicating a maximum runtime for
the flow. If the flow exceeds this runtime, it will be marked as failed.
Flow execution may continue until the next task is called.
validate_parameters: By default, parameters passed to flows are validated by
Pydantic. This will check that input values conform to the annotated types
on the function. Where possible, values will be coerced into the correct
type; for example, if a parameter is defined as `x: int` and "5" is passed,
it will be resolved to `5`. If set to `False`, no validation will be
performed on flow parameters.
retries: An optional number of times to retry on flow run failure.
retry_delay_seconds: An optional number of seconds to wait before retrying the
flow after failure. This is only applicable if `retries` is nonzero.
persist_result: An optional toggle indicating whether the result of this flow
should be persisted to result storage. Defaults to `None`, which indicates
that Prefect should choose whether the result should be persisted depending on
the features being used.
result_storage: An optional block to use to persist the result of this flow.
This value will be used as the default for any tasks in this flow.
If not provided, the local file system will be used unless called as
a subflow, at which point the default will be loaded from the parent flow.
result_serializer: An optional serializer to use to serialize the result of this
flow for persistence. This value will be used as the default for any tasks
in this flow. If not provided, the value of `PREFECT_RESULTS_DEFAULT_SERIALIZER`
will be used unless called as a subflow, at which point the default will be
loaded from the parent flow.
on_failure: An optional list of callables to run when the flow enters a failed state.
on_completion: An optional list of callables to run when the flow enters a completed state.
on_cancellation: An optional list of callables to run when the flow enters a cancelling state.
on_crashed: An optional list of callables to run when the flow enters a crashed state.
on_running: An optional list of callables to run when the flow enters a running state.
"""
# NOTE: These parameters (types, defaults, and docstrings) should be duplicated
# exactly in the @flow decorator
def __init__(
self,
fn: Callable[P, R] | "classmethod[Any, P, R]" | "staticmethod[P, R]",
name: Optional[str] = None,
version: Optional[str] = None,
flow_run_name: Optional[Union[Callable[[], str], str]] = None,
retries: Optional[int] = None,
retry_delay_seconds: Optional[Union[int, float]] = None,
task_runner: Union[
Type[TaskRunner[PrefectFuture[Any]]], TaskRunner[PrefectFuture[Any]], None
] = None,
description: Optional[str] = None,
timeout_seconds: Union[int, float, None] = None,
validate_parameters: bool = True,
persist_result: Optional[bool] = None,
result_storage: Optional[Union[ResultStorage, str]] = None,
result_serializer: Optional[ResultSerializer] = None,
cache_result_in_memory: bool = True,
log_prints: Optional[bool] = None,
on_completion: Optional[list[FlowStateHook[P, R]]] = None,
on_failure: Optional[list[FlowStateHook[P, R]]] = None,
on_cancellation: Optional[list[FlowStateHook[P, R]]] = None,
on_crashed: Optional[list[FlowStateHook[P, R]]] = None,
on_running: Optional[list[FlowStateHook[P, R]]] = None,
):
if name is not None and not isinstance(name, str): # pyright: ignore[reportUnnecessaryIsInstance]
raise TypeError(
"Expected string for flow parameter 'name'; got {} instead. {}".format(
type(name).__name__,
(
"Perhaps you meant to call it? e.g."
" '@flow(name=get_flow_run_name())'"
if callable(name)
else ""
),
)
)
# Validate if hook passed is list and contains callables
hook_categories = [
on_completion,
on_failure,
on_cancellation,
on_crashed,
on_running,
]
hook_names = [
"on_completion",
"on_failure",
"on_cancellation",
"on_crashed",
"on_running",
]
for hooks, hook_name in zip(hook_categories, hook_names):
if hooks is not None:
try:
hooks = list(hooks)
except TypeError:
raise TypeError(
f"Expected iterable for '{hook_name}'; got"
f" {type(hooks).__name__} instead. Please provide a list of"
f" hooks to '{hook_name}':\n\n"
f"@flow({hook_name}=[hook1, hook2])\ndef"
" my_flow():\n\tpass"
)
for hook in hooks:
if not callable(hook):
raise TypeError(
f"Expected callables in '{hook_name}'; got"
f" {type(hook).__name__} instead. Please provide a list of"
f" hooks to '{hook_name}':\n\n"
f"@flow({hook_name}=[hook1, hook2])\ndef"
" my_flow():\n\tpass"
)
if isinstance(fn, classmethod):
fn = cast(Callable[P, R], fn.__func__)
self._isclassmethod = True
if isinstance(fn, staticmethod):
fn = cast(Callable[P, R], fn.__func__)
self._isstaticmethod = True
if not callable(fn):
raise TypeError("'fn' must be callable")
self.name: str = name or fn.__name__.replace("_", "-").replace(
"<lambda>",
"unknown-lambda", # prefect API will not accept "<" or ">" in flow names
)
_raise_on_name_with_banned_characters(self.name)
if flow_run_name is not None:
if not isinstance(flow_run_name, str) and not callable(flow_run_name):
raise TypeError(
"Expected string or callable for 'flow_run_name'; got"
f" {type(flow_run_name).__name__} instead."
)
self.flow_run_name = flow_run_name
if task_runner is None:
self.task_runner: TaskRunner[PrefectFuture[Any]] = cast(
TaskRunner[PrefectFuture[Any]], ThreadPoolTaskRunner()
)
else:
self.task_runner: TaskRunner[PrefectFuture[Any]] = (
task_runner() if isinstance(task_runner, type) else task_runner
)
self.log_prints = log_prints
self.description: str | None = description or inspect.getdoc(fn)
update_wrapper(self, fn)
self.fn = fn
# the flow is considered async if its function is async or an async
# generator
self.isasync: bool = inspect.iscoroutinefunction(
self.fn
) or inspect.isasyncgenfunction(self.fn)
# the flow is considered a generator if its function is a generator or
# an async generator
self.isgenerator: bool = inspect.isgeneratorfunction(
self.fn
) or inspect.isasyncgenfunction(self.fn)
raise_for_reserved_arguments(self.fn, ["return_state", "wait_for"])
# Version defaults to a hash of the function's file
if not version:
try:
flow_file = inspect.getsourcefile(self.fn)
if flow_file is None:
raise FileNotFoundError
version = file_hash(flow_file)
except (FileNotFoundError, TypeError, OSError):
pass # `getsourcefile` can return null values and "<stdin>" for objects in repls
self.version = version
self.timeout_seconds: float | None = (
float(timeout_seconds) if timeout_seconds else None
)
# FlowRunPolicy settings
# TODO: We can instantiate a `FlowRunPolicy` and add Pydantic bound checks to
# validate that the user passes positive numbers here
self.retries: int = (
retries if retries is not None else PREFECT_FLOW_DEFAULT_RETRIES.value()
)
self.retry_delay_seconds: float | int = (
retry_delay_seconds
if retry_delay_seconds is not None
else PREFECT_FLOW_DEFAULT_RETRY_DELAY_SECONDS.value()
)
self.parameters: ParameterSchema = parameter_schema(self.fn)
self.should_validate_parameters = validate_parameters
if self.should_validate_parameters:
# Try to create the validated function now so that incompatibility can be
# raised at declaration time rather than at runtime
# We cannot, however, store the validated function on the flow because it
# is not picklable in some environments
try:
ValidatedFunction(self.fn, config={"arbitrary_types_allowed": True})
except Exception as exc:
raise ValueError(
"Flow function is not compatible with `validate_parameters`. "
"Disable validation or change the argument names."
) from exc
# result persistence settings
if persist_result is None:
if result_storage is not None or result_serializer is not None:
persist_result = True
self.persist_result = persist_result
if result_storage and not isinstance(result_storage, str):
if getattr(result_storage, "_block_document_id", None) is None:
raise TypeError(
"Result storage configuration must be persisted server-side."
" Please call `.save()` on your block before passing it in."
)
self.result_storage = result_storage
self.result_serializer = result_serializer
self.cache_result_in_memory = cache_result_in_memory
self.on_completion_hooks: list[FlowStateHook[P, R]] = on_completion or []
self.on_failure_hooks: list[FlowStateHook[P, R]] = on_failure or []
self.on_cancellation_hooks: list[FlowStateHook[P, R]] = on_cancellation or []
self.on_crashed_hooks: list[FlowStateHook[P, R]] = on_crashed or []
self.on_running_hooks: list[FlowStateHook[P, R]] = on_running or []
# Used for flows loaded from remote storage
self._storage: Optional["RunnerStorage"] = None
self._entrypoint: Optional[str] = None
module = fn.__module__
if module and (module == "__main__" or module.startswith("__prefect_loader_")):
module_name = inspect.getfile(fn)
module = module_name if module_name != "__main__" else module
self._entrypoint = f"{module}:{getattr(fn, '__qualname__', fn.__name__)}"
@property
def ismethod(self) -> bool:
return hasattr(self.fn, "__prefect_self__")
@property
def isclassmethod(self) -> bool:
return getattr(self, "_isclassmethod", False)
@property
def isstaticmethod(self) -> bool:
return getattr(self, "_isstaticmethod", False)
def __get__(self, instance: Any, owner: Any) -> "Flow[P, R]":
"""
Implement the descriptor protocol so that the flow can be used as an instance or class method.
When an instance method is loaded, this method is called with the "self" instance as
an argument. We return a copy of the flow with that instance bound to the flow's function.
"""
# wrapped function is a classmethod
if self.isclassmethod:
bound_task = copy(self)
setattr(bound_task.fn, "__prefect_cls__", owner)
return bound_task
# if the task is being accessed on an instance, bind the instance to the __prefect_self__ attribute
# of the task's function. This will allow it to be automatically added to the task's parameters
if instance:
bound_task = copy(self)
bound_task.fn.__prefect_self__ = instance # type: ignore[attr-defined]
return bound_task
return self
def with_options(
self,
*,
name: Optional[str] = None,
version: Optional[str] = None,
retries: Optional[int] = None,
retry_delay_seconds: Optional[Union[int, float]] = None,
description: Optional[str] = None,
flow_run_name: Optional[Union[Callable[[], str], str]] = None,
task_runner: Union[
Type[TaskRunner[PrefectFuture[Any]]], TaskRunner[PrefectFuture[Any]], None
] = None,
timeout_seconds: Union[int, float, None] = None,
validate_parameters: Optional[bool] = None,
persist_result: Optional[bool] = NotSet, # type: ignore
result_storage: Optional[ResultStorage] = NotSet, # type: ignore
result_serializer: Optional[ResultSerializer] = NotSet, # type: ignore
cache_result_in_memory: Optional[bool] = None,
log_prints: Optional[bool] = NotSet, # type: ignore
on_completion: Optional[list[FlowStateHook[P, R]]] = None,
on_failure: Optional[list[FlowStateHook[P, R]]] = None,
on_cancellation: Optional[list[FlowStateHook[P, R]]] = None,
on_crashed: Optional[list[FlowStateHook[P, R]]] = None,
on_running: Optional[list[FlowStateHook[P, R]]] = None,
) -> "Flow[P, R]":
"""
Create a new flow from the current object, updating provided options.
Args:
name: A new name for the flow.
version: A new version for the flow.
description: A new description for the flow.
flow_run_name: An optional name to distinguish runs of this flow; this name
can be provided as a string template with the flow's parameters as variables,
or a function that returns a string.
task_runner: A new task runner for the flow.
timeout_seconds: A new number of seconds to fail the flow after if still
running.
validate_parameters: A new value indicating if flow calls should validate
given parameters.
retries: A new number of times to retry on flow run failure.
retry_delay_seconds: A new number of seconds to wait before retrying the
flow after failure. This is only applicable if `retries` is nonzero.
persist_result: A new option for enabling or disabling result persistence.
result_storage: A new storage type to use for results.
result_serializer: A new serializer to use for results.
cache_result_in_memory: A new value indicating if the flow's result should
be cached in memory.
on_failure: A new list of callables to run when the flow enters a failed state.
on_completion: A new list of callables to run when the flow enters a completed state.
on_cancellation: A new list of callables to run when the flow enters a cancelling state.
on_crashed: A new list of callables to run when the flow enters a crashed state.
on_running: A new list of callables to run when the flow enters a running state.
Returns:
A new `Flow` instance.
Examples:
Create a new flow from an existing flow and update the name:
```python
from prefect import flow
@flow(name="My flow")
def my_flow():
return 1
new_flow = my_flow.with_options(name="My new flow")
```
Create a new flow from an existing flow, update the task runner, and call
it without an intermediate variable:
```python
from prefect.task_runners import ThreadPoolTaskRunner
@flow
def my_flow(x, y):
return x + y
state = my_flow.with_options(task_runner=ThreadPoolTaskRunner)(1, 3)
assert state.result() == 4
```
"""
new_task_runner = (
task_runner() if isinstance(task_runner, type) else task_runner
)
if new_task_runner is None:
new_task_runner = self.task_runner
new_flow = Flow(
fn=self.fn,
name=name or self.name,
description=description or self.description,
flow_run_name=flow_run_name or self.flow_run_name,
version=version or self.version,
task_runner=new_task_runner,
retries=retries if retries is not None else self.retries,
retry_delay_seconds=(
retry_delay_seconds
if retry_delay_seconds is not None
else self.retry_delay_seconds
),
timeout_seconds=(
timeout_seconds if timeout_seconds is not None else self.timeout_seconds
),
validate_parameters=(
validate_parameters
if validate_parameters is not None
else self.should_validate_parameters
),
persist_result=(
persist_result if persist_result is not NotSet else self.persist_result
),
result_storage=(
result_storage if result_storage is not NotSet else self.result_storage
),
result_serializer=(
result_serializer
if result_serializer is not NotSet
else self.result_serializer
),
cache_result_in_memory=(
cache_result_in_memory
if cache_result_in_memory is not None
else self.cache_result_in_memory
),
log_prints=log_prints if log_prints is not NotSet else self.log_prints,
on_completion=on_completion or self.on_completion_hooks,
on_failure=on_failure or self.on_failure_hooks,
on_cancellation=on_cancellation or self.on_cancellation_hooks,
on_crashed=on_crashed or self.on_crashed_hooks,
on_running=on_running or self.on_running_hooks,
)
new_flow._storage = self._storage
new_flow._entrypoint = self._entrypoint
return new_flow
def validate_parameters(self, parameters: dict[str, Any]) -> dict[str, Any]:
"""
Validate parameters for compatibility with the flow by attempting to cast the inputs to the
associated types specified by the function's type annotations.
Returns:
A new dict of parameters that have been cast to the appropriate types
Raises:
ParameterTypeError: if the provided parameters are not valid
"""
def resolve_block_reference(data: Any | dict[str, Any]) -> Any:
if isinstance(data, dict) and "$ref" in data:
from prefect.blocks.core import Block
return Block.load_from_ref(data["$ref"], _sync=True)
return data
try:
parameters = visit_collection(
parameters, resolve_block_reference, return_data=True
)
except (ValueError, RuntimeError) as exc:
raise ParameterTypeError(
"Failed to resolve block references in parameters."
) from exc
args, kwargs = parameters_to_args_kwargs(self.fn, parameters)
if sys.version_info >= (3, 14): # Pydantic v1 is not supported in Python 3.14+
has_v1_models = False
else:
from pydantic.v1 import BaseModel as V1BaseModel
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", category=pydantic.warnings.PydanticDeprecatedSince20
)
has_v1_models = any(isinstance(o, V1BaseModel) for o in args) or any(
isinstance(o, V1BaseModel) for o in kwargs.values()
)
has_v2_types = any(is_v2_type(o) for o in args) or any(
is_v2_type(o) for o in kwargs.values()
)
if has_v1_models and has_v2_types:
raise ParameterTypeError(
"Cannot mix Pydantic v1 and v2 types as arguments to a flow."
)
try:
if has_v1_models:
from pydantic.v1.decorator import (
ValidatedFunction as V1ValidatedFunction,
)
validated_fn = V1ValidatedFunction(
self.fn, config=dict(arbitrary_types_allowed=True)
)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", category=pydantic.warnings.PydanticDeprecatedSince20
)
model = validated_fn.init_model_instance(*args, **kwargs)
# Get the updated parameter dict with cast values from the model
cast_parameters = {
k: v
for k, v in dict(iter(model)).items()
if k in model.model_fields_set
or type(model).model_fields[k].default_factory
}
return cast_parameters
else:
validated_fn = ValidatedFunction(
self.fn, config=pydantic.ConfigDict(arbitrary_types_allowed=True)
)
return validated_fn.validate_call_args(args, kwargs)
except pydantic.ValidationError as exc:
# We capture the pydantic exception and raise our own because the pydantic
# exception is not picklable when using a cythonized pydantic installation
logger.error(
f"Parameter validation failed for flow {self.name!r}: {exc.errors()}"
f"\nParameters: {parameters}"
)
raise ParameterTypeError.from_validation_error(exc) from None
def serialize_parameters(
self, parameters: dict[str, Any | PrefectFuture[Any] | State]
) -> dict[str, Any]:
"""
Convert parameters to a serializable form.
Uses FastAPI's `jsonable_encoder` to convert to JSON compatible objects without
converting everything directly to a string. This maintains basic types like
integers during API roundtrips.
"""
serialized_parameters: dict[str, Any] = {}
for key, value in parameters.items():
# do not serialize the bound self object
if self.ismethod and value is getattr(self.fn, "__prefect_self__", None):
continue
if self.isclassmethod and value is getattr(
self.fn, "__prefect_cls__", None
):
continue
if isinstance(value, (PrefectFuture, State)):
# Don't call jsonable_encoder() on a PrefectFuture or State to
# avoid triggering a __getitem__ call
serialized_parameters[key] = f"<{type(value).__name__}>"
continue
try:
from fastapi.encoders import jsonable_encoder
serialized_parameters[key] = jsonable_encoder(value)
except (TypeError, ValueError):
logger.debug(
f"Parameter {key!r} for flow {self.name!r} is unserializable. "
f"Type {type(value).__name__!r} and will not be stored "
"in the backend."
)
serialized_parameters[key] = f"<{type(value).__name__}>"
return serialized_parameters
async def ato_deployment(
self,
name: str,
interval: Optional[
Union[
Iterable[Union[int, float, datetime.timedelta]],
int,
float,
datetime.timedelta,
]
] = None,
cron: Optional[Union[Iterable[str], str]] = None,
rrule: Optional[Union[Iterable[str], str]] = None,
paused: Optional[bool] = None,
schedule: Optional[Schedule] = None,
schedules: Optional["FlexibleScheduleList"] = None,
concurrency_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None,
parameters: Optional[dict[str, Any]] = None,
triggers: Optional[list[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
description: Optional[str] = None,
tags: Optional[list[str]] = None,
version: Optional[str] = None,
version_type: Optional[VersionType] = None,
enforce_parameter_schema: bool = True,
work_pool_name: Optional[str] = None,
work_queue_name: Optional[str] = None,
job_variables: Optional[dict[str, Any]] = None,
entrypoint_type: EntrypointType = EntrypointType.FILE_PATH,
_sla: Optional[Union[SlaTypes, list[SlaTypes]]] = None, # experimental
) -> "RunnerDeployment":
"""
Asynchronously creates a runner deployment object for this flow.
Args:
name: The name to give the created deployment.
interval: An interval on which to execute the new deployment. Accepts either a number
or a timedelta object. If a number is given, it will be interpreted as seconds.
cron: A cron schedule of when to execute runs of this deployment.
rrule: An rrule schedule of when to execute runs of this deployment.
paused: Whether or not to set this deployment as paused.
schedule: A schedule object defining when to execute runs of this deployment.
Used to provide additional scheduling options like `timezone` or `parameters`.
schedules: A list of schedule objects defining when to execute runs of this deployment.
Used to define multiple schedules or additional scheduling options such as `timezone`.
concurrency_limit: The maximum number of runs of this deployment that can run at the same time.
parameters: A dictionary of default parameter values to pass to runs of this deployment.
triggers: A list of triggers that will kick off runs of this deployment.
description: A description for the created deployment. Defaults to the flow's
description if not provided.
tags: A list of tags to associate with the created deployment for organizational
purposes.
version: A version for the created deployment. Defaults to the flow's version.
version_type: The type of version to use for the created deployment. The version type
will be inferred if not provided.
enforce_parameter_schema: Whether or not the Prefect API should enforce the
parameter schema for the created deployment.
work_pool_name: The name of the work pool to use for this deployment.
work_queue_name: The name of the work queue to use for this deployment's scheduled runs.
If not provided the default work queue for the work pool will be used.
job_variables: Settings used to override the values specified default base job template
of the chosen work pool. Refer to the base job template of the chosen work pool for
entrypoint_type: Type of entrypoint to use for the deployment. When using a module path
entrypoint, ensure that the module will be importable in the execution environment.
_sla: (Experimental) SLA configuration for the deployment. May be removed or modified at any time. Currently only supported on Prefect Cloud.
Examples:
Prepare two deployments and serve them:
```python
from prefect import flow, serve
@flow
def my_flow(name):
print(f"hello {name}")
@flow
def my_other_flow(name):
print(f"goodbye {name}")
if __name__ == "__main__":
hello_deploy = my_flow.to_deployment("hello", tags=["dev"])
bye_deploy = my_other_flow.to_deployment("goodbye", tags=["dev"])
serve(hello_deploy, bye_deploy)
```
"""
from prefect.deployments.runner import RunnerDeployment
if not name.endswith(".py"):
_raise_on_name_with_banned_characters(name)
if self._storage and self._entrypoint:
return await RunnerDeployment.afrom_storage(
storage=self._storage,
entrypoint=self._entrypoint,
name=name,
flow_name=self.name,
interval=interval,
cron=cron,
rrule=rrule,
paused=paused,
schedule=schedule,
schedules=schedules,
concurrency_limit=concurrency_limit,
tags=tags,
triggers=triggers,
parameters=parameters or {},
description=description,
version=version,
version_type=version_type,
enforce_parameter_schema=enforce_parameter_schema,
work_pool_name=work_pool_name,
work_queue_name=work_queue_name,
job_variables=job_variables,
_sla=_sla,
)
else:
return RunnerDeployment.from_flow(
flow=self,
name=name,
interval=interval,
cron=cron,
rrule=rrule,
paused=paused,
schedule=schedule,
schedules=schedules,
concurrency_limit=concurrency_limit,
tags=tags,
triggers=triggers,
parameters=parameters or {},
description=description,
version=version,
version_type=version_type,
enforce_parameter_schema=enforce_parameter_schema,
work_pool_name=work_pool_name,
work_queue_name=work_queue_name,
job_variables=job_variables,
entrypoint_type=entrypoint_type,
_sla=_sla,
)
@async_dispatch(ato_deployment)
def to_deployment(
self,
name: str,
interval: Optional[
Union[
Iterable[Union[int, float, datetime.timedelta]],
int,
float,
datetime.timedelta,
]
] = None,
cron: Optional[Union[Iterable[str], str]] = None,
rrule: Optional[Union[Iterable[str], str]] = None,
paused: Optional[bool] = None,
schedule: Optional[Schedule] = None,
schedules: Optional["FlexibleScheduleList"] = None,
concurrency_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None,
parameters: Optional[dict[str, Any]] = None,
triggers: Optional[list[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
description: Optional[str] = None,
tags: Optional[list[str]] = None,
version: Optional[str] = None,
version_type: Optional[VersionType] = None,
enforce_parameter_schema: bool = True,
work_pool_name: Optional[str] = None,
work_queue_name: Optional[str] = None,
job_variables: Optional[dict[str, Any]] = None,
entrypoint_type: EntrypointType = EntrypointType.FILE_PATH,
_sla: Optional[Union[SlaTypes, list[SlaTypes]]] = None, # experimental
) -> "RunnerDeployment":
"""
Creates a runner deployment object for this flow.
Args:
name: The name to give the created deployment.
interval: An interval on which to execute the new deployment. Accepts either a number
or a timedelta object. If a number is given, it will be interpreted as seconds.
cron: A cron schedule of when to execute runs of this deployment.
rrule: An rrule schedule of when to execute runs of this deployment.
paused: Whether or not to set this deployment as paused.
schedule: A schedule object defining when to execute runs of this deployment.
Used to provide additional scheduling options like `timezone` or `parameters`.
schedules: A list of schedule objects defining when to execute runs of this deployment.
Used to define multiple schedules or additional scheduling options such as `timezone`.
concurrency_limit: The maximum number of runs of this deployment that can run at the same time.
parameters: A dictionary of default parameter values to pass to runs of this deployment.
triggers: A list of triggers that will kick off runs of this deployment.
description: A description for the created deployment. Defaults to the flow's
description if not provided.
tags: A list of tags to associate with the created deployment for organizational
purposes.
version: A version for the created deployment. Defaults to the flow's version.
version_type: The type of version to use for the created deployment. The version type
will be inferred if not provided.
enforce_parameter_schema: Whether or not the Prefect API should enforce the
parameter schema for the created deployment.
work_pool_name: The name of the work pool to use for this deployment.
work_queue_name: The name of the work queue to use for this deployment's scheduled runs.
If not provided the default work queue for the work pool will be used.
job_variables: Settings used to override the values specified default base job template
of the chosen work pool. Refer to the base job template of the chosen work pool for
entrypoint_type: Type of entrypoint to use for the deployment. When using a module path
entrypoint, ensure that the module will be importable in the execution environment.
_sla: (Experimental) SLA configuration for the deployment. May be removed or modified at any time. Currently only supported on Prefect Cloud.
Examples:
Prepare two deployments and serve them:
```python
from prefect import flow, serve
@flow
def my_flow(name):
print(f"hello {name}")
@flow
def my_other_flow(name):
print(f"goodbye {name}")
if __name__ == "__main__":
hello_deploy = my_flow.to_deployment("hello", tags=["dev"])
bye_deploy = my_other_flow.to_deployment("goodbye", tags=["dev"])
serve(hello_deploy, bye_deploy)
```
"""
from prefect.deployments.runner import RunnerDeployment
if not name.endswith(".py"):
_raise_on_name_with_banned_characters(name)
if self._storage and self._entrypoint:
return cast(
RunnerDeployment,
RunnerDeployment.from_storage(
storage=self._storage,
entrypoint=self._entrypoint,
name=name,
flow_name=self.name,
interval=interval,
cron=cron,
rrule=rrule,
paused=paused,
schedule=schedule,
schedules=schedules,
concurrency_limit=concurrency_limit,
tags=tags,
triggers=triggers,
parameters=parameters or {},
description=description,
version=version,
version_type=version_type,
enforce_parameter_schema=enforce_parameter_schema,
work_pool_name=work_pool_name,
work_queue_name=work_queue_name,
job_variables=job_variables,
_sla=_sla,
_sync=True, # pyright: ignore[reportCallIssue] _sync is valid because .from_storage is decorated with async_dispatch
),
)
else:
return RunnerDeployment.from_flow(
flow=self,
name=name,
interval=interval,
cron=cron,
rrule=rrule,
paused=paused,
schedule=schedule,
schedules=schedules,
concurrency_limit=concurrency_limit,
tags=tags,
triggers=triggers,
parameters=parameters or {},
description=description,
version=version,
version_type=version_type,
enforce_parameter_schema=enforce_parameter_schema,
work_pool_name=work_pool_name,
work_queue_name=work_queue_name,
job_variables=job_variables,
entrypoint_type=entrypoint_type,
_sla=_sla,
)
def on_completion(self, fn: FlowStateHook[P, R]) -> FlowStateHook[P, R]:
self.on_completion_hooks.append(fn)
return fn
def on_cancellation(self, fn: FlowStateHook[P, R]) -> FlowStateHook[P, R]:
self.on_cancellation_hooks.append(fn)
return fn
def on_crashed(self, fn: FlowStateHook[P, R]) -> FlowStateHook[P, R]:
self.on_crashed_hooks.append(fn)
return fn
def on_running(self, fn: FlowStateHook[P, R]) -> FlowStateHook[P, R]:
self.on_running_hooks.append(fn)
return fn
def on_failure(self, fn: FlowStateHook[P, R]) -> FlowStateHook[P, R]:
self.on_failure_hooks.append(fn)
return fn
def serve(
self,
name: Optional[str] = None,
interval: Optional[
Union[
Iterable[Union[int, float, datetime.timedelta]],
int,
float,
datetime.timedelta,
]
] = None,
cron: Optional[Union[Iterable[str], str]] = None,
rrule: Optional[Union[Iterable[str], str]] = None,
paused: Optional[bool] = None,
schedule: Optional[Schedule] = None,
schedules: Optional["FlexibleScheduleList"] = None,
global_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None,
triggers: Optional[list[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
parameters: Optional[dict[str, Any]] = None,
description: Optional[str] = None,
tags: Optional[list[str]] = None,
version: Optional[str] = None,
enforce_parameter_schema: bool = True,
pause_on_shutdown: bool = True,
print_starting_message: bool = True,
limit: Optional[int] = None,
webserver: bool = False,
entrypoint_type: EntrypointType = EntrypointType.FILE_PATH,
) -> None:
"""
Creates a deployment for this flow and starts a runner to monitor for scheduled work.
Args:
name: The name to give the created deployment. Defaults to the name of the flow.
interval: An interval on which to execute the deployment. Accepts a number or a
timedelta object to create a single schedule. If a number is given, it will be
interpreted as seconds. Also accepts an iterable of numbers or timedelta to create
multiple schedules.
cron: A cron schedule string of when to execute runs of this deployment.
Also accepts an iterable of cron schedule strings to create multiple schedules.
rrule: An rrule schedule string of when to execute runs of this deployment.
Also accepts an iterable of rrule schedule strings to create multiple schedules.
triggers: A list of triggers that will kick off runs of this deployment.
paused: Whether or not to set this deployment as paused.
schedule: A schedule object defining when to execute runs of this deployment.
Used to provide additional scheduling options like `timezone` or `parameters`.
schedules: A list of schedule objects defining when to execute runs of this deployment.
Used to define multiple schedules or additional scheduling options like `timezone`.
global_limit: The maximum number of concurrent runs allowed across all served flow instances associated with the same deployment.
parameters: A dictionary of default parameter values to pass to runs of this deployment.
description: A description for the created deployment. Defaults to the flow's
description if not provided.
tags: A list of tags to associate with the created deployment for organizational
purposes.
version: A version for the created deployment. Defaults to the flow's version.
enforce_parameter_schema: Whether or not the Prefect API should enforce the
parameter schema for the created deployment.
pause_on_shutdown: If True, provided schedule will be paused when the serve function is stopped.
If False, the schedules will continue running.
print_starting_message: Whether or not to print the starting message when flow is served.
limit: The maximum number of runs that can be executed concurrently by the created runner; only applies to this served flow. To apply a limit across multiple served flows, use `global_limit`.
webserver: Whether or not to start a monitoring webserver for this flow.
entrypoint_type: Type of entrypoint to use for the deployment. When using a module path
entrypoint, ensure that the module will be importable in the execution environment.
Examples:
Serve a flow:
```python
from prefect import flow
@flow
def my_flow(name):
print(f"hello {name}")
if __name__ == "__main__":
my_flow.serve("example-deployment")
```
Serve a flow and run it every hour:
```python
from prefect import flow
@flow
def my_flow(name):
print(f"hello {name}")
if __name__ == "__main__":
my_flow.serve("example-deployment", interval=3600)
```
"""
from prefect.runner import Runner
if not name:
name = self.name
else:
# Only strip extension if it is a file path
if (p := Path(name)).is_file():
name = p.stem
runner = Runner(name=name, pause_on_shutdown=pause_on_shutdown, limit=limit)
deployment_id = runner.add_flow(
self,
name=name,
triggers=triggers,
interval=interval,
cron=cron,
rrule=rrule,
paused=paused,
schedule=schedule,
schedules=schedules,
concurrency_limit=global_limit,
parameters=parameters,
description=description,
tags=tags,
version=version,
enforce_parameter_schema=enforce_parameter_schema,
entrypoint_type=entrypoint_type,
)
if print_starting_message:
help_message = (
f"[green]Your flow {self.name!r} is being served and polling for"
" scheduled runs!\n[/]\nTo trigger a run for this flow, use the"
" following command:\n[blue]\n\t$ prefect deployment run"
f" '{self.name}/{name}'\n[/]"
)
if PREFECT_UI_URL:
help_message += (
"\nYou can also run your flow via the Prefect UI:"
f" [blue]{PREFECT_UI_URL.value()}/deployments/deployment/{deployment_id}[/]\n"
)
console = Console()
console.print(help_message, soft_wrap=True)
try:
loop = asyncio.get_running_loop()
except RuntimeError as exc:
if "no running event loop" in str(exc):
loop = None
else:
raise
try:
if loop is not None:
loop.run_until_complete(runner.start(webserver=webserver))
else:
asyncio.run(runner.start(webserver=webserver))
except (KeyboardInterrupt, TerminationSignal) as exc:
logger.info(f"Received {type(exc).__name__}, shutting down...")
if loop is not None:
loop.stop()
@classmethod
async def afrom_source(
cls,
source: Union[str, Path, "RunnerStorage", ReadableDeploymentStorage],
entrypoint: str,
) -> "Flow[..., Any]":
"""
Loads a flow from a remote source asynchronously.
Args:
source: Either a URL to a git repository or a storage object.
entrypoint: The path to a file containing a flow and the name of the flow function in
the format `./path/to/file.py:flow_func_name`.
Returns:
A new `Flow` instance.
Examples:
Load a flow from a public git repository:
```python
from prefect import flow
from prefect.runner.storage import GitRepository
from prefect.blocks.system import Secret
my_flow = flow.from_source(
source="https://github.com/org/repo.git",
entrypoint="flows.py:my_flow",
)
my_flow()
```
Load a flow from a private git repository using an access token stored in a `Secret` block:
```python
from prefect import flow
from prefect.runner.storage import GitRepository
from prefect.blocks.system import Secret
my_flow = flow.from_source(
source=GitRepository(
url="https://github.com/org/repo.git",
credentials={"access_token": Secret.load("github-access-token")}
),
entrypoint="flows.py:my_flow",
)
my_flow()
```
Load a flow from a local directory:
``` python
# from_local_source.py
from pathlib import Path
from prefect import flow
@flow(log_prints=True)
def my_flow(name: str = "world"):
print(f"Hello {name}! I'm a flow from a Python script!")
if __name__ == "__main__":
my_flow.from_source(
source=str(Path(__file__).parent),
entrypoint="from_local_source.py:my_flow",
).deploy(
name="my-deployment",
parameters=dict(name="Marvin"),
work_pool_name="local",
)
```
"""
from prefect.runner.storage import (
BlockStorageAdapter,
LocalStorage,
RunnerStorage,
create_storage_from_source,
)
if isinstance(source, (Path, str)):
if isinstance(source, Path):
source = str(source)
storage = create_storage_from_source(source)
elif isinstance(source, RunnerStorage):
storage = source
elif hasattr(source, "get_directory"):
storage = BlockStorageAdapter(source)
else:
raise TypeError(
f"Unsupported source type {type(source).__name__!r}. Please provide a"
" URL to remote storage or a storage object."
)
with tempfile.TemporaryDirectory() as tmpdir:
if not isinstance(storage, LocalStorage):
storage.set_base_path(Path(tmpdir))
await storage.pull_code()
full_entrypoint = str(storage.destination / entrypoint)
flow = cast(
"Flow[..., Any]",
await from_async.wait_for_call_in_new_thread(
create_call(load_flow_from_entrypoint, full_entrypoint)
),
)
flow._storage = storage
flow._entrypoint = entrypoint
return flow
@classmethod
@async_dispatch(afrom_source)
def from_source(
cls,
source: Union[str, Path, "RunnerStorage", ReadableDeploymentStorage],
entrypoint: str,
) -> "Flow[..., Any]":
"""
Loads a flow from a remote source.
Args:
source: Either a URL to a git repository or a storage object.
entrypoint: The path to a file containing a flow and the name of the flow function in
the format `./path/to/file.py:flow_func_name`.
Returns:
A new `Flow` instance.
Examples:
Load a flow from a public git repository:
```python
from prefect import flow
from prefect.runner.storage import GitRepository
from prefect.blocks.system import Secret
my_flow = flow.from_source(
source="https://github.com/org/repo.git",
entrypoint="flows.py:my_flow",
)
my_flow()
```
Load a flow from a private git repository using an access token stored in a `Secret` block:
```python
from prefect import flow
from prefect.runner.storage import GitRepository
from prefect.blocks.system import Secret
my_flow = flow.from_source(
source=GitRepository(
url="https://github.com/org/repo.git",
credentials={"access_token": Secret.load("github-access-token")}
),
entrypoint="flows.py:my_flow",
)
my_flow()
```
Load a flow from a local directory:
``` python
# from_local_source.py
from pathlib import Path
from prefect import flow
@flow(log_prints=True)
def my_flow(name: str = "world"):
print(f"Hello {name}! I'm a flow from a Python script!")
if __name__ == "__main__":
my_flow.from_source(
source=str(Path(__file__).parent),
entrypoint="from_local_source.py:my_flow",
).deploy(
name="my-deployment",
parameters=dict(name="Marvin"),
work_pool_name="local",
)
```
"""
from prefect.runner.storage import (
BlockStorageAdapter,
LocalStorage,
RunnerStorage,
create_storage_from_source,
)
if isinstance(source, (Path, str)):
if isinstance(source, Path):
source = str(source)
storage = create_storage_from_source(source)
elif isinstance(source, RunnerStorage):
storage = source
elif hasattr(source, "get_directory"):
storage = BlockStorageAdapter(source)
else:
raise TypeError(
f"Unsupported source type {type(source).__name__!r}. Please provide a"
" URL to remote storage or a storage object."
)
with tempfile.TemporaryDirectory() as tmpdir:
if not isinstance(storage, LocalStorage):
storage.set_base_path(Path(tmpdir))
run_coro_as_sync(storage.pull_code())
full_entrypoint = str(storage.destination / entrypoint)
flow = load_flow_from_entrypoint(full_entrypoint)
flow._storage = storage
flow._entrypoint = entrypoint
return flow
@sync_compatible
async def deploy(
self,
name: str,
work_pool_name: Optional[str] = None,
image: Optional[Union[str, "DockerImage"]] = None,
build: bool = True,
push: bool = True,
work_queue_name: Optional[str] = None,
job_variables: Optional[dict[str, Any]] = None,
interval: Optional[Union[int, float, datetime.timedelta]] = None,
cron: Optional[str] = None,
rrule: Optional[str] = None,
paused: Optional[bool] = None,
schedule: Optional[Schedule] = None,
schedules: Optional[list[Schedule]] = None,
concurrency_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None,
triggers: Optional[list[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
parameters: Optional[dict[str, Any]] = None,
description: Optional[str] = None,
tags: Optional[list[str]] = None,
version: Optional[str] = None,
version_type: Optional[VersionType] = None,
enforce_parameter_schema: bool = True,
entrypoint_type: EntrypointType = EntrypointType.FILE_PATH,
print_next_steps: bool = True,
ignore_warnings: bool = False,
_sla: Optional[Union[SlaTypes, list[SlaTypes]]] = None,
) -> UUID:
"""
Deploys a flow to run on dynamic infrastructure via a work pool.
By default, calling this method will build a Docker image for the flow, push it to a registry,
and create a deployment via the Prefect API that will run the flow on the given schedule.
If you want to use an existing image, you can pass `build=False` to skip building and pushing
an image.
Args:
name: The name to give the created deployment.
work_pool_name: The name of the work pool to use for this deployment. Defaults to
the value of `PREFECT_DEFAULT_WORK_POOL_NAME`.
image: The name of the Docker image to build, including the registry and
repository. Pass a DockerImage instance to customize the Dockerfile used
and build arguments.
build: Whether or not to build a new image for the flow. If False, the provided
image will be used as-is and pulled at runtime.
push: Whether or not to skip pushing the built image to a registry.
work_queue_name: The name of the work queue to use for this deployment's scheduled runs.
If not provided the default work queue for the work pool will be used.
job_variables: Settings used to override the values specified default base job template
of the chosen work pool. Refer to the base job template of the chosen work pool for
available settings.
interval: An interval on which to execute the deployment. Accepts a number or a
timedelta object to create a single schedule. If a number is given, it will be
interpreted as seconds. Also accepts an iterable of numbers or timedelta to create
multiple schedules.
cron: A cron schedule string of when to execute runs of this deployment.
Also accepts an iterable of cron schedule strings to create multiple schedules.
rrule: An rrule schedule string of when to execute runs of this deployment.
Also accepts an iterable of rrule schedule strings to create multiple schedules.
triggers: A list of triggers that will kick off runs of this deployment.
paused: Whether or not to set this deployment as paused.
schedule: A schedule object defining when to execute runs of this deployment.
Used to provide additional scheduling options like `timezone` or `parameters`.
schedules: A list of schedule objects defining when to execute runs of this deployment.
Used to define multiple schedules or additional scheduling options like `timezone`.
concurrency_limit: The maximum number of runs that can be executed concurrently.
parameters: A dictionary of default parameter values to pass to runs of this deployment.
description: A description for the created deployment. Defaults to the flow's
description if not provided.
tags: A list of tags to associate with the created deployment for organizational
purposes.
version: A version for the created deployment. Defaults to the flow's version.
version_type: The type of version to use for the created deployment. The version type
will be inferred if not provided.
enforce_parameter_schema: Whether or not the Prefect API should enforce the
parameter schema for the created deployment.
entrypoint_type: Type of entrypoint to use for the deployment. When using a module path
entrypoint, ensure that the module will be importable in the execution environment.
print_next_steps_message: Whether or not to print a message with next steps
after deploying the deployments.
ignore_warnings: Whether or not to ignore warnings about the work pool type.
_sla: (Experimental) SLA configuration for the deployment. May be removed or modified at any time. Currently only supported on Prefect Cloud.
Returns:
The ID of the created/updated deployment.
Examples:
Deploy a local flow to a work pool:
```python
from prefect import flow
@flow
def my_flow(name):
print(f"hello {name}")
if __name__ == "__main__":
my_flow.deploy(
"example-deployment",
work_pool_name="my-work-pool",
image="my-repository/my-image:dev",
)
```
Deploy a remotely stored flow to a work pool:
```python
from prefect import flow
if __name__ == "__main__":
flow.from_source(
source="https://github.com/org/repo.git",
entrypoint="flows.py:my_flow",
).deploy(
"example-deployment",
work_pool_name="my-work-pool",
image="my-repository/my-image:dev",
)
```
"""
if not (
work_pool_name := work_pool_name or PREFECT_DEFAULT_WORK_POOL_NAME.value()
):
raise ValueError(
"No work pool name provided. Please provide a `work_pool_name` or set the"
" `PREFECT_DEFAULT_WORK_POOL_NAME` environment variable."
)
from prefect.client.orchestration import get_client
try:
async with get_client() as client:
work_pool = await client.read_work_pool(work_pool_name)
active_workers = await client.read_workers_for_work_pool(
work_pool_name,
worker_filter=WorkerFilter(
status=WorkerFilterStatus(any_=["ONLINE"])
),
)
except ObjectNotFound as exc:
raise ValueError(
f"Could not find work pool {work_pool_name!r}. Please create it before"
" deploying this flow."
) from exc
to_deployment_coro = self.to_deployment(
name=name,
interval=interval,
cron=cron,
rrule=rrule,
schedule=schedule,
schedules=schedules,
concurrency_limit=concurrency_limit,
paused=paused,
triggers=triggers,
parameters=parameters,
description=description,
tags=tags,
version=version,
version_type=version_type,
enforce_parameter_schema=enforce_parameter_schema,
work_queue_name=work_queue_name,
job_variables=job_variables,
entrypoint_type=entrypoint_type,
_sla=_sla,
)
if inspect.isawaitable(to_deployment_coro):
deployment = await to_deployment_coro
else:
deployment = to_deployment_coro
from prefect.deployments.runner import deploy
deploy_coro = deploy(
deployment,
work_pool_name=work_pool_name,
image=image,
build=build,
push=push,
print_next_steps_message=False,
ignore_warnings=ignore_warnings,
)
if TYPE_CHECKING:
assert inspect.isawaitable(deploy_coro)
deployment_ids = await deploy_coro
if print_next_steps:
console = Console()
if (
not work_pool.is_push_pool
and not work_pool.is_managed_pool
and not active_workers
):
console.print(
"\nTo execute flow runs from this deployment, start a worker in a"
" separate terminal that pulls work from the"
f" {work_pool_name!r} work pool:"
)
console.print(
f"\n\t$ prefect worker start --pool {work_pool_name!r}",
style="blue",
)
console.print(
"\nTo schedule a run for this deployment, use the following command:"
)
console.print(
f"\n\t$ prefect deployment run '{self.name}/{name}'\n",
style="blue",
)
if PREFECT_UI_URL:
message = (
"\nYou can also run your flow via the Prefect UI:"
f" [blue]{PREFECT_UI_URL.value()}/deployments/deployment/{deployment_ids[0]}[/]\n"
)
console.print(message, soft_wrap=True)
return deployment_ids[0]
@overload
def __call__(self: "Flow[P, NoReturn]", *args: P.args, **kwargs: P.kwargs) -> None:
# `NoReturn` matches if a type can't be inferred for the function which stops a
# sync function from matching the `Coroutine` overload
...
@overload
def __call__(
self: "Flow[P, Coroutine[Any, Any, T]]", *args: P.args, **kwargs: P.kwargs
) -> Coroutine[Any, Any, T]: ...
@overload
def __call__(
self: "Flow[P, T]",
*args: P.args,
**kwargs: P.kwargs,
) -> T: ...
@overload
def __call__(
self: "Flow[P, Coroutine[Any, Any, T]]",
*args: P.args,
return_state: Literal[True],
**kwargs: P.kwargs,
) -> Awaitable[State[T]]: ...
@overload
def __call__(
self: "Flow[P, T]",
*args: P.args,
return_state: Literal[True],
**kwargs: P.kwargs,
) -> State[T]: ...
def __call__(
self,
*args: "P.args",
return_state: bool = False,
wait_for: Optional[Iterable[PrefectFuture[Any]]] = None,
**kwargs: "P.kwargs",
):
"""
Run the flow and return its result.
Flow parameter values must be serializable by Pydantic.
If writing an async flow, this call must be awaited.
This will create a new flow run in the API.
Args:
*args: Arguments to run the flow with.
return_state: Return a Prefect State containing the result of the
flow run.
wait_for: Upstream task futures to wait for before starting the flow if called as a subflow
**kwargs: Keyword arguments to run the flow with.
Returns:
If `return_state` is False, returns the result of the flow run.
If `return_state` is True, returns the result of the flow run
wrapped in a Prefect State which provides error handling.
Examples:
Define a flow
```python
@flow
def my_flow(name):
print(f"hello {name}")
return f"goodbye {name}"
```
Run a flow
```python
my_flow("marvin")
```
Run a flow with additional tags
```python
from prefect import tags
with tags("db", "blue"):
my_flow("foo")
```
"""
from prefect.utilities.visualization import (
get_task_viz_tracker,
track_viz_task,
)
# Convert the call args/kwargs to a parameter dict
parameters = get_call_parameters(self.fn, args, kwargs)
return_type = "state" if return_state else "result"
task_viz_tracker = get_task_viz_tracker()
if task_viz_tracker:
# this is a subflow, for now return a single task and do not go further
# we can add support for exploring subflows for tasks in the future.
return track_viz_task(self.isasync, self.name, parameters)
from prefect.flow_engine import run_flow
return run_flow(
flow=self,
parameters=parameters,
wait_for=wait_for,
return_type=return_type,
)
@sync_compatible
async def visualize(self, *args: "P.args", **kwargs: "P.kwargs"):
"""
Generates a graphviz object representing the current flow. In IPython notebooks,
it's rendered inline, otherwise in a new window as a PNG.
Raises:
- ImportError: If `graphviz` isn't installed.
- GraphvizExecutableNotFoundError: If the `dot` executable isn't found.
- FlowVisualizationError: If the flow can't be visualized for any other reason.
"""
from prefect.utilities.visualization import (
FlowVisualizationError,
GraphvizExecutableNotFoundError,
GraphvizImportError,
TaskVizTracker,
VisualizationUnsupportedError,
build_task_dependencies,
visualize_task_dependencies,
)
if not PREFECT_TESTING_UNIT_TEST_MODE:
warnings.warn(
"`flow.visualize()` will execute code inside of your flow that is not"
" decorated with `@task` or `@flow`."
)
try:
with TaskVizTracker() as tracker:
if self.isasync:
await self.fn(*args, **kwargs) # type: ignore[reportGeneralTypeIssues]
else:
self.fn(*args, **kwargs)
graph = build_task_dependencies(tracker)
visualize_task_dependencies(graph, self.name)
except GraphvizImportError:
raise
except GraphvizExecutableNotFoundError:
raise
except VisualizationUnsupportedError:
raise
except FlowVisualizationError:
raise
except Exception as e:
msg = (
"It's possible you are trying to visualize a flow that contains "
"code that directly interacts with the result of a task"
" inside of the flow. \nTry passing a `viz_return_value` "
"to the task decorator, e.g. `@task(viz_return_value=[1, 2, 3]).`"
)
new_exception = type(e)(str(e) + "\n" + msg)
# Copy traceback information from the original exception
new_exception.__traceback__ = e.__traceback__
raise new_exception
|
Flow
|
python
|
getsentry__sentry
|
src/sentry/api/event_search.py
|
{
"start": 23449,
"end": 26128
}
|
class ____[TAllowBoolean: (Literal[True], Literal[False]) = Literal[True]]: # noqa: E251
"""
Configures how the search parser interprets a search query
"""
# <target_name>: [<list of source names>]
key_mappings: Mapping[str, list[str]] = field(default_factory=dict)
# Text keys we allow operators to be used on
text_operator_keys: set[str] = field(default_factory=set)
# Keys which are considered valid for duration filters
duration_keys: set[str] = field(default_factory=set)
# Keys considered valid for numeric filter types
numeric_keys: set[str] = field(default_factory=set)
# Keys considered valid for date filter types
date_keys: set[str] = field(default_factory=set)
# Keys considered valid for boolean filter types
boolean_keys: set[str] = field(default_factory=set)
# A mapping of string values that may be provided to `is:<value>` which
# translates to a pair of SearchKey + SearchValue's. An empty list disables
# this feature for the search
is_filter_translation: Mapping[str, tuple[str, Any]] = field(default_factory=dict)
# Enables boolean filtering (AND / OR)
allow_boolean: TAllowBoolean = True # type: ignore[assignment] # python/mypy#18812
# Allows us to specify an allowlist of keys we will accept for this search.
# If empty, allow all keys.
allowed_keys: set[str] = field(default_factory=set)
# Allows us to specify a list of keys we will not accept for this search.
blocked_keys: set[str] = field(default_factory=set)
# Which key we should return any free text under
free_text_key = "message"
# Whether to wrap free_text_keys in asterisks
wildcard_free_text: bool = False
# Disallow the use of the !has filter
allow_not_has_filter: bool = True
@overload
@classmethod
def create_from[TBool: (
Literal[True],
Literal[False],
)](
cls: type[SearchConfig[Any]],
search_config: SearchConfig[Any],
*,
allow_boolean: TBool,
**overrides: Any,
) -> SearchConfig[TBool]: ...
@overload
@classmethod
def create_from[TBool: (
Literal[True],
Literal[False],
)](
cls: type[SearchConfig[Any]],
search_config: SearchConfig[TBool],
**overrides: Any,
) -> SearchConfig[TBool]: ...
@classmethod
def create_from(
cls: type[SearchConfig[Any]], search_config: SearchConfig[Any], **overrides: Any
) -> SearchConfig[Any]:
config = cls(**asdict(search_config))
for key, val in overrides.items():
setattr(config, key, val)
return config
|
SearchConfig
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/cloud_composer.py
|
{
"start": 2273,
"end": 2512
}
|
class ____(BaseGoogleLink):
"""Helper class for constructing Cloud Composer Environment Link."""
name = "Cloud Composer Environment"
key = "composer_conf"
format_str = CLOUD_COMPOSER_DETAILS_LINK
|
CloudComposerEnvironmentLink
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/ads/transfers/ads_to_gcs.py
|
{
"start": 1247,
"end": 5122
}
|
class ____(BaseOperator):
"""
Fetch daily results from the Google Ads API for 1-n clients.
Converts and saves the data as a temporary CSV file Uploads the CSV to
Google Cloud Storage.
.. seealso::
For more information on the Google Ads API, take a look at the API docs:
https://developers.google.com/google-ads/api/docs/start
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleAdsToGcsOperator`
:param client_ids: Google Ads client IDs to query
:param query: Google Ads Query Language API query
:param attributes: List of Google Ads Row attributes to extract
:param bucket: The GCS bucket to upload to
:param obj: GCS path to save the object. Must be the full file path (ex. `path/to/file.txt`)
:param gcp_conn_id: Airflow Google Cloud connection ID
:param google_ads_conn_id: Airflow Google Ads connection ID
:param gzip: Option to compress local file or file data for upload
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param api_version: Optional Google Ads API version to use.
"""
template_fields: Sequence[str] = (
"client_ids",
"query",
"attributes",
"bucket",
"obj",
"impersonation_chain",
)
def __init__(
self,
*,
client_ids: list[str],
query: str,
attributes: list[str],
bucket: str,
obj: str,
gcp_conn_id: str = "google_cloud_default",
google_ads_conn_id: str = "google_ads_default",
gzip: bool = False,
impersonation_chain: str | Sequence[str] | None = None,
api_version: str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.client_ids = client_ids
self.query = query
self.attributes = attributes
self.bucket = bucket
self.obj = obj
self.gcp_conn_id = gcp_conn_id
self.google_ads_conn_id = google_ads_conn_id
self.gzip = gzip
self.impersonation_chain = impersonation_chain
self.api_version = api_version
def execute(self, context: Context) -> None:
service = GoogleAdsHook(
gcp_conn_id=self.gcp_conn_id,
google_ads_conn_id=self.google_ads_conn_id,
api_version=self.api_version,
)
rows = service.search(client_ids=self.client_ids, query=self.query)
try:
getter = attrgetter(*self.attributes)
converted_rows = [getter(row) for row in rows]
except Exception as e:
self.log.error("An error occurred in converting the Google Ad Rows. \n Error %s", e)
raise
with NamedTemporaryFile("w", suffix=".csv") as csvfile:
writer = csv.writer(csvfile)
writer.writerows(converted_rows)
csvfile.flush()
hook = GCSHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
hook.upload(
bucket_name=self.bucket,
object_name=self.obj,
filename=csvfile.name,
gzip=self.gzip,
)
self.log.info("%s uploaded to GCS", self.obj)
|
GoogleAdsToGcsOperator
|
python
|
pypa__hatch
|
tests/env/plugin/test_interface.py
|
{
"start": 24857,
"end": 30079
}
|
class ____:
def test_default(self, isolation, isolated_data_dir, platform, global_application):
config = {"project": {"name": "my_app", "version": "0.0.1"}}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
assert environment.dependency_groups == []
def test_not_array(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1"},
"tool": {"hatch": {"envs": {"default": {"dependency-groups": 9000}}}},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
with pytest.raises(
TypeError, match="Field `tool.hatch.envs.default.dependency-groups` must be an array of strings"
):
_ = environment.dependency_groups
def test_correct(self, isolation, isolated_data_dir, platform, temp_application):
config = {
"project": {"name": "my_app", "version": "0.0.1"},
"dependency-groups": {"foo-bar": [], "baz": []},
"tool": {"hatch": {"envs": {"default": {"dependency-groups": ["Foo...Bar", "Baz", "baZ"]}}}},
}
project = Project(isolation, config=config)
project.set_app(temp_application)
temp_application.project = project
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
temp_application,
)
assert environment.dependency_groups == ["baz", "foo-bar"]
def test_group_not_string(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1"},
"dependency-groups": {"foo": [], "bar": []},
"tool": {"hatch": {"envs": {"default": {"dependency-groups": [9000]}}}},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
with pytest.raises(
TypeError, match="Group #1 of field `tool.hatch.envs.default.dependency-groups` must be a string"
):
_ = environment.dependency_groups
def test_group_empty_string(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1"},
"dependency-groups": {"foo": [], "bar": []},
"tool": {"hatch": {"envs": {"default": {"dependency-groups": [""]}}}},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
with pytest.raises(
ValueError, match="Group #1 of field `tool.hatch.envs.default.dependency-groups` cannot be an empty string"
):
_ = environment.dependency_groups
def test_group_undefined(self, isolation, isolated_data_dir, platform, temp_application):
config = {
"project": {
"name": "my_app",
"version": "0.0.1",
},
"dependency-groups": {"foo": []},
"tool": {"hatch": {"envs": {"default": {"dependency-groups": ["foo", "bar", ""]}}}},
}
project = Project(isolation, config=config)
project.set_app(temp_application)
temp_application.project = project
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
temp_application,
)
with pytest.raises(
ValueError,
match=(
"Group `bar` of field `tool.hatch.envs.default.dependency-groups` is not "
"defined in field `dependency-groups`"
),
):
_ = environment.dependency_groups
|
TestDependencyGroups
|
python
|
django__django
|
tests/model_inheritance_regress/models.py
|
{
"start": 2949,
"end": 3110
}
|
class ____(CertificationAudit):
auditing_dept = models.CharField(max_length=20)
# Abstract classes don't get m2m tables autocreated.
|
InternalCertificationAudit
|
python
|
langchain-ai__langchain
|
libs/core/langchain_core/runnables/base.py
|
{
"start": 206258,
"end": 213944
}
|
class ____(RunnableBindingBase[Input, Output]): # type: ignore[no-redef]
"""Wrap a `Runnable` with additional functionality.
A `RunnableBinding` can be thought of as a "runnable decorator" that
preserves the essential features of `Runnable`; i.e., batching, streaming,
and async support, while adding additional functionality.
Any class that inherits from `Runnable` can be bound to a `RunnableBinding`.
Runnables expose a standard set of methods for creating `RunnableBindings`
or sub-classes of `RunnableBindings` (e.g., `RunnableRetry`,
`RunnableWithFallbacks`) that add additional functionality.
These methods include:
- `bind`: Bind kwargs to pass to the underlying `Runnable` when running it.
- `with_config`: Bind config to pass to the underlying `Runnable` when running
it.
- `with_listeners`: Bind lifecycle listeners to the underlying `Runnable`.
- `with_types`: Override the input and output types of the underlying
`Runnable`.
- `with_retry`: Bind a retry policy to the underlying `Runnable`.
- `with_fallbacks`: Bind a fallback policy to the underlying `Runnable`.
Example:
`bind`: Bind kwargs to pass to the underlying `Runnable` when running it.
```python
# Create a Runnable binding that invokes the chat model with the
# additional kwarg `stop=['-']` when running it.
from langchain_openai import ChatOpenAI
model = ChatOpenAI()
model.invoke('Say "Parrot-MAGIC"', stop=["-"]) # Should return `Parrot`
# Using it the easy way via `bind` method which returns a new
# RunnableBinding
runnable_binding = model.bind(stop=["-"])
runnable_binding.invoke('Say "Parrot-MAGIC"') # Should return `Parrot`
```
Can also be done by instantiating a `RunnableBinding` directly (not
recommended):
```python
from langchain_core.runnables import RunnableBinding
runnable_binding = RunnableBinding(
bound=model,
kwargs={"stop": ["-"]}, # <-- Note the additional kwargs
)
runnable_binding.invoke('Say "Parrot-MAGIC"') # Should return `Parrot`
```
"""
@override
def bind(self, **kwargs: Any) -> Runnable[Input, Output]:
"""Bind additional kwargs to a `Runnable`, returning a new `Runnable`.
Args:
**kwargs: The kwargs to bind to the `Runnable`.
Returns:
A new `Runnable` with the same type and config as the original,
but with the additional kwargs bound.
"""
return self.__class__(
bound=self.bound,
config=self.config,
config_factories=self.config_factories,
kwargs={**self.kwargs, **kwargs},
custom_input_type=self.custom_input_type,
custom_output_type=self.custom_output_type,
)
@override
def with_config(
self,
config: RunnableConfig | None = None,
# Sadly Unpack is not well supported by mypy so this will have to be untyped
**kwargs: Any,
) -> Runnable[Input, Output]:
return self.__class__(
bound=self.bound,
kwargs=self.kwargs,
config=cast("RunnableConfig", {**self.config, **(config or {}), **kwargs}),
config_factories=self.config_factories,
custom_input_type=self.custom_input_type,
custom_output_type=self.custom_output_type,
)
@override
def with_listeners(
self,
*,
on_start: Callable[[Run], None]
| Callable[[Run, RunnableConfig], None]
| None = None,
on_end: Callable[[Run], None]
| Callable[[Run, RunnableConfig], None]
| None = None,
on_error: Callable[[Run], None]
| Callable[[Run, RunnableConfig], None]
| None = None,
) -> Runnable[Input, Output]:
"""Bind lifecycle listeners to a `Runnable`, returning a new `Runnable`.
The `Run` object contains information about the run, including its `id`,
`type`, `input`, `output`, `error`, `start_time`, `end_time`, and
any tags or metadata added to the run.
Args:
on_start: Called before the `Runnable` starts running, with the `Run`
object.
on_end: Called after the `Runnable` finishes running, with the `Run`
object.
on_error: Called if the `Runnable` throws an error, with the `Run`
object.
Returns:
A new `Runnable` with the listeners bound.
"""
def listener_config_factory(config: RunnableConfig) -> RunnableConfig:
return {
"callbacks": [
RootListenersTracer(
config=config,
on_start=on_start,
on_end=on_end,
on_error=on_error,
)
],
}
return self.__class__(
bound=self.bound,
kwargs=self.kwargs,
config=self.config,
config_factories=[listener_config_factory, *self.config_factories],
custom_input_type=self.custom_input_type,
custom_output_type=self.custom_output_type,
)
@override
def with_types(
self,
input_type: type[Input] | BaseModel | None = None,
output_type: type[Output] | BaseModel | None = None,
) -> Runnable[Input, Output]:
return self.__class__(
bound=self.bound,
kwargs=self.kwargs,
config=self.config,
config_factories=self.config_factories,
custom_input_type=(
input_type if input_type is not None else self.custom_input_type
),
custom_output_type=(
output_type if output_type is not None else self.custom_output_type
),
)
@override
def with_retry(self, **kwargs: Any) -> Runnable[Input, Output]:
return self.__class__(
bound=self.bound.with_retry(**kwargs),
kwargs=self.kwargs,
config=self.config,
config_factories=self.config_factories,
)
@override
def __getattr__(self, name: str) -> Any: # type: ignore[misc]
attr = getattr(self.bound, name)
if callable(attr) and (
config_param := inspect.signature(attr).parameters.get("config")
):
if config_param.kind == inspect.Parameter.KEYWORD_ONLY:
@wraps(attr)
def wrapper(*args: Any, **kwargs: Any) -> Any:
return attr(
*args,
config=merge_configs(self.config, kwargs.pop("config", None)),
**kwargs,
)
return wrapper
if config_param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:
idx = list(inspect.signature(attr).parameters).index("config")
@wraps(attr)
def wrapper(*args: Any, **kwargs: Any) -> Any:
if len(args) >= idx + 1:
argsl = list(args)
argsl[idx] = merge_configs(self.config, argsl[idx])
return attr(*argsl, **kwargs)
return attr(
*args,
config=merge_configs(self.config, kwargs.pop("config", None)),
**kwargs,
)
return wrapper
return attr
|
RunnableBinding
|
python
|
redis__redis-py
|
tests/test_http/test_http_client.py
|
{
"start": 854,
"end": 13343
}
|
class ____:
def test_get_returns_parsed_json_and_uses_timeout(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
# Arrange
base_url = "https://api.example.com/"
path = "v1/items"
params = {"limit": 5, "q": "hello world"}
expected_url = f"{base_url}{path}?limit=5&q=hello+world"
payload: Dict[str, Any] = {"items": [1, 2, 3], "ok": True}
content = json.dumps(payload).encode("utf-8")
captured_kwargs = {}
def fake_urlopen(request, *, timeout=None, context=None):
# Capture call details for assertions
captured_kwargs["timeout"] = timeout
captured_kwargs["context"] = context
# Assert the request was constructed correctly
assert getattr(request, "method", "").upper() == "GET"
assert request.full_url == expected_url
# Return a successful response
return FakeResponse(
status=200,
headers={"Content-Type": "application/json; charset=utf-8"},
url=expected_url,
content=content,
)
# Patch the urlopen used inside HttpClient
monkeypatch.setattr("redis.http.http_client.urlopen", fake_urlopen)
client = HttpClient(base_url=base_url)
# Act
result = client.get(
path, params=params, timeout=12.34
) # default expect_json=True
# Assert
assert result == payload
assert pytest.approx(captured_kwargs["timeout"], rel=1e-6) == 12.34
# HTTPS -> a context should be provided (created by ssl.create_default_context)
assert captured_kwargs["context"] is not None
def test_get_handles_gzip_response(self, monkeypatch: pytest.MonkeyPatch) -> None:
# Arrange
base_url = "https://api.example.com/"
path = "gzip-endpoint"
expected_url = f"{base_url}{path}"
payload = {"message": "compressed ok"}
raw = json.dumps(payload).encode("utf-8")
gzipped = gzip.compress(raw)
def fake_urlopen(request, *, timeout=None, context=None):
# Return gzipped content with appropriate header
return FakeResponse(
status=200,
headers={
"Content-Type": "application/json; charset=utf-8",
"Content-Encoding": "gzip",
},
url=expected_url,
content=gzipped,
)
monkeypatch.setattr("redis.http.http_client.urlopen", fake_urlopen)
client = HttpClient(base_url=base_url)
# Act
result = client.get(path) # expect_json=True by default
# Assert
assert result == payload
def test_get_retries_on_retryable_http_errors_and_succeeds(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
# Arrange: configure limited retries so we can assert attempts
retry_policy = Retry(
backoff=ExponentialWithJitterBackoff(base=0, cap=0), retries=2
) # 2 retries -> up to 3 attempts
base_url = "https://api.example.com/"
path = "sometimes-busy"
expected_url = f"{base_url}{path}"
payload = {"ok": True}
success_content = json.dumps(payload).encode("utf-8")
call_count = {"n": 0}
def make_http_error(url: str, code: int, body: bytes = b"busy"):
# Provide a file-like object for .read() when HttpClient tries to read error content
fp = BytesIO(body)
return HTTPError(
url=url,
code=code,
msg="Service Unavailable",
hdrs={"Content-Type": "text/plain"},
fp=fp,
)
def flaky_urlopen(request, *, timeout=None, context=None):
call_count["n"] += 1
# Fail with a retryable status (503) for the first two calls, then succeed
if call_count["n"] <= 2:
raise make_http_error(expected_url, 503)
return FakeResponse(
status=200,
headers={"Content-Type": "application/json; charset=utf-8"},
url=expected_url,
content=success_content,
)
monkeypatch.setattr("redis.http.http_client.urlopen", flaky_urlopen)
client = HttpClient(base_url=base_url, retry=retry_policy)
# Act
result = client.get(path)
# Assert: should have retried twice (total 3 attempts) and finally returned parsed JSON
assert result == payload
assert call_count["n"] == retry_policy.get_retries() + 1
def test_post_sends_json_body_and_parses_response(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
# Arrange
base_url = "https://api.example.com/"
path = "v1/create"
expected_url = f"{base_url}{path}"
send_payload = {"a": 1, "b": "x"}
recv_payload = {"id": 10, "ok": True}
recv_content = json.dumps(recv_payload, separators=(",", ":")).encode("utf-8")
def fake_urlopen(request, *, timeout=None, context=None):
# Verify method, URL and headers
assert getattr(request, "method", "").upper() == "POST"
assert request.full_url == expected_url
# Content-Type should be auto-set for string JSON body
assert (
request.headers.get("Content-type") == "application/json; charset=utf-8"
)
# Body should be already UTF-8 encoded JSON with no spaces
assert request.data == json.dumps(
send_payload, ensure_ascii=False, separators=(",", ":")
).encode("utf-8")
return FakeResponse(
status=200,
headers={"Content-Type": "application/json; charset=utf-8"},
url=expected_url,
content=recv_content,
)
monkeypatch.setattr("redis.http.http_client.urlopen", fake_urlopen)
client = HttpClient(base_url=base_url)
# Act
result = client.post(path, json_body=send_payload)
# Assert
assert result == recv_payload
def test_post_with_raw_data_and_custom_headers(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
# Arrange
base_url = "https://api.example.com/"
path = "upload"
expected_url = f"{base_url}{path}"
raw_data = b"\x00\x01BINARY"
custom_headers = {"Content-type": "application/octet-stream", "X-extra": "1"}
recv_payload = {"status": "ok"}
def fake_urlopen(request, *, timeout=None, context=None):
assert getattr(request, "method", "").upper() == "POST"
assert request.full_url == expected_url
# Ensure our provided headers are present
assert request.headers.get("Content-type") == "application/octet-stream"
assert request.headers.get("X-extra") == "1"
assert request.data == raw_data
return FakeResponse(
status=200,
headers={"Content-Type": "application/json"},
url=expected_url,
content=json.dumps(recv_payload).encode("utf-8"),
)
monkeypatch.setattr("redis.http.http_client.urlopen", fake_urlopen)
client = HttpClient(base_url=base_url)
# Act
result = client.post(path, data=raw_data, headers=custom_headers)
# Assert
assert result == recv_payload
def test_delete_returns_http_response_when_expect_json_false(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
# Arrange
base_url = "https://api.example.com/"
path = "v1/resource/42"
expected_url = f"{base_url}{path}"
body = b"deleted"
def fake_urlopen(request, *, timeout=None, context=None):
assert getattr(request, "method", "").upper() == "DELETE"
assert request.full_url == expected_url
return FakeResponse(
status=204,
headers={"Content-Type": "text/plain"},
url=expected_url,
content=body,
)
monkeypatch.setattr("redis.http.http_client.urlopen", fake_urlopen)
client = HttpClient(base_url=base_url)
# Act
resp = client.delete(path, expect_json=False)
# Assert
assert resp.status == 204
assert resp.url == expected_url
assert resp.content == body
def test_put_raises_http_error_on_non_success(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
# Arrange
base_url = "https://api.example.com/"
path = "v1/update/1"
expected_url = f"{base_url}{path}"
def make_http_error(url: str, code: int, body: bytes = b"not found"):
fp = BytesIO(body)
return HTTPError(
url=url,
code=code,
msg="Not Found",
hdrs={"Content-Type": "text/plain"},
fp=fp,
)
def fake_urlopen(request, *, timeout=None, context=None):
raise make_http_error(expected_url, 404)
monkeypatch.setattr("redis.http.http_client.urlopen", fake_urlopen)
client = HttpClient(base_url=base_url)
# Act / Assert
with pytest.raises(HttpError) as exc:
client.put(path, json_body={"x": 1})
assert exc.value.status == 404
assert exc.value.url == expected_url
def test_patch_with_params_encodes_query(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
# Arrange
base_url = "https://api.example.com/"
path = "v1/edit"
params = {"tag": ["a", "b"], "q": "hello world"}
captured_url = {"u": None}
def fake_urlopen(request, *, timeout=None, context=None):
captured_url["u"] = request.full_url
return FakeResponse(
status=200,
headers={"Content-Type": "application/json"},
url=request.full_url,
content=json.dumps({"ok": True}).encode("utf-8"),
)
monkeypatch.setattr("redis.http.http_client.urlopen", fake_urlopen)
client = HttpClient(base_url=base_url)
client.patch(path, params=params) # We don't care about response here
# Assert query parameters regardless of ordering
parsed = urlparse(captured_url["u"])
qs = parse_qs(parsed.query)
assert qs["q"] == ["hello world"]
assert qs["tag"] == ["a", "b"]
def test_request_low_level_headers_auth_and_timeout_default(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
# Arrange: use plain HTTP to verify no TLS context, and check default timeout used
base_url = "http://example.com/"
path = "ping"
captured = {
"timeout": None,
"context": "unset",
"headers": None,
"method": None,
}
def fake_urlopen(request, *, timeout=None, context=None):
captured["timeout"] = timeout
captured["context"] = context
captured["headers"] = dict(request.headers)
captured["method"] = getattr(request, "method", "").upper()
return FakeResponse(
status=200,
headers={"Content-Type": "application/json"},
url=request.full_url,
content=json.dumps({"pong": True}).encode("utf-8"),
)
monkeypatch.setattr("redis.http.http_client.urlopen", fake_urlopen)
client = HttpClient(base_url=base_url, auth_basic=("user", "pass"))
resp = client.request("GET", path)
# Assert
assert resp.status == 200
assert captured["method"] == "GET"
assert captured["context"] is None # no TLS for http
assert (
pytest.approx(captured["timeout"], rel=1e-6) == client.timeout
) # default used
# Check some default headers and Authorization presence
headers = {k.lower(): v for k, v in captured["headers"].items()}
assert "authorization" in headers and headers["authorization"].startswith(
"Basic "
)
assert headers.get("accept") == "application/json"
assert "gzip" in headers.get("accept-encoding", "").lower()
assert "user-agent" in headers
|
TestHttpClient
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/autotools_config_replacement/package.py
|
{
"start": 239,
"end": 3202
}
|
class ____(AutotoolsPackage):
"""
This package features broken and working config.sub and config.guess files,
that should be replaced by the ones provided by gnuconfig. It allows testing
with / without patches and with / without substitutes available.
"""
has_code = False
version("1.0.0")
variant("patch_config_files", default=False)
variant("gnuconfig", default=False)
depends_on("gnuconfig", type="build", when="+gnuconfig")
@property
def patch_config_files(self):
return self.spec.satisfies("+patch_config_files")
def autoreconf(self, spec, prefix):
pass
def configure(self, spec, prefix):
pass
def build(self, spec, prefix):
pass
def install(self, spec, prefix):
broken = os.path.join(self.stage.source_path, "broken")
working = os.path.join(self.stage.source_path, "working")
install_tree(broken, self.prefix.broken)
install_tree(working, self.prefix.working)
@run_before("autoreconf")
def create_the_package_sources(self):
# Creates the following file structure:
# ./broken/config.sub -- not executable
# ./broken/config.guess -- exectuable & exit code 1
# ./working/config.sub -- executable & exit code 0
# ./working/config.guess -- executable & exit code 0
# Automatic config helper script substitution should replace the two
# broken scripts with those from the gnuconfig package.
broken = os.path.join(self.stage.source_path, "broken")
working = os.path.join(self.stage.source_path, "working")
mkdirp(broken)
mkdirp(working)
# a configure script is required
configure_script = join_path(self.stage.source_path, "configure")
with open(configure_script, "w", encoding="utf-8") as f:
f.write("#!/bin/sh\nexit 0")
os.chmod(configure_script, 0o775)
# broken config.sub (not executable)
broken_config_sub = join_path(broken, "config.sub")
with open(broken_config_sub, "w", encoding="utf-8") as f:
f.write("#!/bin/sh\nexit 0")
# broken config.guess (exectuable but with error return code)
broken_config_guess = join_path(broken, "config.guess")
with open(broken_config_guess, "w", encoding="utf-8") as f:
f.write("#!/bin/sh\nexit 1")
os.chmod(broken_config_guess, 0o775)
# working config.sub
working_config_sub = join_path(working, "config.sub")
with open(working_config_sub, "w", encoding="utf-8") as f:
f.write("#!/bin/sh\nexit 0")
os.chmod(working_config_sub, 0o775)
# working config.guess
working_config_guess = join_path(working, "config.guess")
with open(working_config_guess, "w", encoding="utf-8") as f:
f.write("#!/bin/sh\nexit 0")
os.chmod(working_config_guess, 0o775)
|
AutotoolsConfigReplacement
|
python
|
fastapi__sqlmodel
|
docs_src/tutorial/delete/tutorial001_py310.py
|
{
"start": 71,
"end": 2761
}
|
class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: int | None = Field(default=None, index=True)
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48)
hero_4 = Hero(name="Tarantula", secret_name="Natalia Roman-on", age=32)
hero_5 = Hero(name="Black Lion", secret_name="Trevor Challa", age=35)
hero_6 = Hero(name="Dr. Weird", secret_name="Steve Weird", age=36)
hero_7 = Hero(name="Captain North America", secret_name="Esteban Rogelios", age=93)
with Session(engine) as session:
session.add(hero_1)
session.add(hero_2)
session.add(hero_3)
session.add(hero_4)
session.add(hero_5)
session.add(hero_6)
session.add(hero_7)
session.commit()
def update_heroes():
with Session(engine) as session:
statement = select(Hero).where(Hero.name == "Spider-Boy")
results = session.exec(statement)
hero_1 = results.one()
print("Hero 1:", hero_1)
statement = select(Hero).where(Hero.name == "Captain North America")
results = session.exec(statement)
hero_2 = results.one()
print("Hero 2:", hero_2)
hero_1.age = 16
hero_1.name = "Spider-Youngster"
session.add(hero_1)
hero_2.name = "Captain North America Except Canada"
hero_2.age = 110
session.add(hero_2)
session.commit()
session.refresh(hero_1)
session.refresh(hero_2)
print("Updated hero 1:", hero_1)
print("Updated hero 2:", hero_2)
def delete_heroes():
with Session(engine) as session:
statement = select(Hero).where(Hero.name == "Spider-Youngster")
results = session.exec(statement)
hero = results.one()
print("Hero: ", hero)
session.delete(hero)
session.commit()
print("Deleted hero:", hero)
statement = select(Hero).where(Hero.name == "Spider-Youngster")
results = session.exec(statement)
hero = results.first()
if hero is None:
print("There's no hero named Spider-Youngster")
def main():
create_db_and_tables()
create_heroes()
update_heroes()
delete_heroes()
if __name__ == "__main__":
main()
|
Hero
|
python
|
tiangolo__fastapi
|
docs_src/path_operation_configuration/tutorial005_py39.py
|
{
"start": 104,
"end": 736
}
|
class ____(BaseModel):
name: str
description: Union[str, None] = None
price: float
tax: Union[float, None] = None
tags: set[str] = set()
@app.post(
"/items/",
response_model=Item,
summary="Create an item",
response_description="The created item",
)
async def create_item(item: Item):
"""
Create an item with all the information:
- **name**: each item must have a name
- **description**: a long description
- **price**: required
- **tax**: if the item doesn't have tax, you can omit this
- **tags**: a set of unique tag strings for this item
"""
return item
|
Item
|
python
|
keras-team__keras
|
guides/making_new_layers_and_models_via_subclassing.py
|
{
"start": 15775,
"end": 17460
}
|
class ____(keras.Model):
def __init__(self, num_classes=1000):
super().__init__()
self.block_1 = ResNetBlock()
self.block_2 = ResNetBlock()
self.global_pool = layers.GlobalAveragePooling2D()
self.classifier = Dense(num_classes)
def call(self, inputs):
x = self.block_1(inputs)
x = self.block_2(x)
x = self.global_pool(x)
return self.classifier(x)
resnet = ResNet()
dataset = ...
resnet.fit(dataset, epochs=10)
resnet.save(filepath.keras)
```
"""
"""
## Putting it all together: an end-to-end example
Here's what you've learned so far:
- A `Layer` encapsulate a state (created in `__init__()` or `build()`) and some
computation (defined in `call()`).
- Layers can be recursively nested to create new, bigger computation blocks.
- Layers are backend-agnostic as long as they only use Keras APIs. You can use
backend-native APIs (such as `jax.numpy`, `torch.nn` or `tf.nn`), but then
your layer will only be usable with that specific backend.
- Layers can create and track losses (typically regularization losses)
via `add_loss()`.
- The outer container, the thing you want to train, is a `Model`. A `Model` is
just like a `Layer`, but with added training and serialization utilities.
Let's put all of these things together into an end-to-end example: we're going
to implement a Variational AutoEncoder (VAE) in a backend-agnostic fashion
-- so that it runs the same with TensorFlow, JAX, and PyTorch.
We'll train it on MNIST digits.
Our VAE will be a subclass of `Model`, built as a nested composition of layers
that subclass `Layer`. It will feature a regularization loss (KL divergence).
"""
|
ResNet
|
python
|
openai__gym
|
gym/error.py
|
{
"start": 1607,
"end": 1727
}
|
class ____(Error):
"""When the order enforcing is violated, i.e. step or render is called before reset."""
|
ResetNeeded
|
python
|
openai__openai-python
|
src/openai/_utils/_transform.py
|
{
"start": 887,
"end": 15999
}
|
class ____:
"""Metadata class to be used in Annotated types to provide information about a given type.
For example:
class MyParams(TypedDict):
account_holder_name: Annotated[str, PropertyInfo(alias='accountHolderName')]
This means that {'account_holder_name': 'Robert'} will be transformed to {'accountHolderName': 'Robert'} before being sent to the API.
"""
alias: str | None
format: PropertyFormat | None
format_template: str | None
discriminator: str | None
def __init__(
self,
*,
alias: str | None = None,
format: PropertyFormat | None = None,
format_template: str | None = None,
discriminator: str | None = None,
) -> None:
self.alias = alias
self.format = format
self.format_template = format_template
self.discriminator = discriminator
@override
def __repr__(self) -> str:
return f"{self.__class__.__name__}(alias='{self.alias}', format={self.format}, format_template='{self.format_template}', discriminator='{self.discriminator}')"
def maybe_transform(
data: object,
expected_type: object,
) -> Any | None:
"""Wrapper over `transform()` that allows `None` to be passed.
See `transform()` for more details.
"""
if data is None:
return None
return transform(data, expected_type)
# Wrapper over _transform_recursive providing fake types
def transform(
data: _T,
expected_type: object,
) -> _T:
"""Transform dictionaries based off of type information from the given type, for example:
```py
class Params(TypedDict, total=False):
card_id: Required[Annotated[str, PropertyInfo(alias="cardID")]]
transformed = transform({"card_id": "<my card ID>"}, Params)
# {'cardID': '<my card ID>'}
```
Any keys / data that does not have type information given will be included as is.
It should be noted that the transformations that this function does are not represented in the type system.
"""
transformed = _transform_recursive(data, annotation=cast(type, expected_type))
return cast(_T, transformed)
@lru_cache(maxsize=8096)
def _get_annotated_type(type_: type) -> type | None:
"""If the given type is an `Annotated` type then it is returned, if not `None` is returned.
This also unwraps the type when applicable, e.g. `Required[Annotated[T, ...]]`
"""
if is_required_type(type_):
# Unwrap `Required[Annotated[T, ...]]` to `Annotated[T, ...]`
type_ = get_args(type_)[0]
if is_annotated_type(type_):
return type_
return None
def _maybe_transform_key(key: str, type_: type) -> str:
"""Transform the given `data` based on the annotations provided in `type_`.
Note: this function only looks at `Annotated` types that contain `PropertyInfo` metadata.
"""
annotated_type = _get_annotated_type(type_)
if annotated_type is None:
# no `Annotated` definition for this type, no transformation needed
return key
# ignore the first argument as it is the actual type
annotations = get_args(annotated_type)[1:]
for annotation in annotations:
if isinstance(annotation, PropertyInfo) and annotation.alias is not None:
return annotation.alias
return key
def _no_transform_needed(annotation: type) -> bool:
return annotation == float or annotation == int
def _transform_recursive(
data: object,
*,
annotation: type,
inner_type: type | None = None,
) -> object:
"""Transform the given data against the expected type.
Args:
annotation: The direct type annotation given to the particular piece of data.
This may or may not be wrapped in metadata types, e.g. `Required[T]`, `Annotated[T, ...]` etc
inner_type: If applicable, this is the "inside" type. This is useful in certain cases where the outside type
is a container type such as `List[T]`. In that case `inner_type` should be set to `T` so that each entry in
the list can be transformed using the metadata from the container type.
Defaults to the same value as the `annotation` argument.
"""
from .._compat import model_dump
if inner_type is None:
inner_type = annotation
stripped_type = strip_annotated_type(inner_type)
origin = get_origin(stripped_type) or stripped_type
if is_typeddict(stripped_type) and is_mapping(data):
return _transform_typeddict(data, stripped_type)
if origin == dict and is_mapping(data):
items_type = get_args(stripped_type)[1]
return {key: _transform_recursive(value, annotation=items_type) for key, value in data.items()}
if (
# List[T]
(is_list_type(stripped_type) and is_list(data))
# Iterable[T]
or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str))
# Sequence[T]
or (is_sequence_type(stripped_type) and is_sequence(data) and not isinstance(data, str))
):
# dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually
# intended as an iterable, so we don't transform it.
if isinstance(data, dict):
return cast(object, data)
inner_type = extract_type_arg(stripped_type, 0)
if _no_transform_needed(inner_type):
# for some types there is no need to transform anything, so we can get a small
# perf boost from skipping that work.
#
# but we still need to convert to a list to ensure the data is json-serializable
if is_list(data):
return data
return list(data)
return [_transform_recursive(d, annotation=annotation, inner_type=inner_type) for d in data]
if is_union_type(stripped_type):
# For union types we run the transformation against all subtypes to ensure that everything is transformed.
#
# TODO: there may be edge cases where the same normalized field name will transform to two different names
# in different subtypes.
for subtype in get_args(stripped_type):
data = _transform_recursive(data, annotation=annotation, inner_type=subtype)
return data
if isinstance(data, pydantic.BaseModel):
return model_dump(data, exclude_unset=True, mode="json", exclude=getattr(data, "__api_exclude__", None))
annotated_type = _get_annotated_type(annotation)
if annotated_type is None:
return data
# ignore the first argument as it is the actual type
annotations = get_args(annotated_type)[1:]
for annotation in annotations:
if isinstance(annotation, PropertyInfo) and annotation.format is not None:
return _format_data(data, annotation.format, annotation.format_template)
return data
def _format_data(data: object, format_: PropertyFormat, format_template: str | None) -> object:
if isinstance(data, (date, datetime)):
if format_ == "iso8601":
return data.isoformat()
if format_ == "custom" and format_template is not None:
return data.strftime(format_template)
if format_ == "base64" and is_base64_file_input(data):
binary: str | bytes | None = None
if isinstance(data, pathlib.Path):
binary = data.read_bytes()
elif isinstance(data, io.IOBase):
binary = data.read()
if isinstance(binary, str): # type: ignore[unreachable]
binary = binary.encode()
if not isinstance(binary, bytes):
raise RuntimeError(f"Could not read bytes from {data}; Received {type(binary)}")
return base64.b64encode(binary).decode("ascii")
return data
def _transform_typeddict(
data: Mapping[str, object],
expected_type: type,
) -> Mapping[str, object]:
result: dict[str, object] = {}
annotations = get_type_hints(expected_type, include_extras=True)
for key, value in data.items():
if not is_given(value):
# we don't need to include omitted values here as they'll
# be stripped out before the request is sent anyway
continue
type_ = annotations.get(key)
if type_ is None:
# we do not have a type annotation for this field, leave it as is
result[key] = value
else:
result[_maybe_transform_key(key, type_)] = _transform_recursive(value, annotation=type_)
return result
async def async_maybe_transform(
data: object,
expected_type: object,
) -> Any | None:
"""Wrapper over `async_transform()` that allows `None` to be passed.
See `async_transform()` for more details.
"""
if data is None:
return None
return await async_transform(data, expected_type)
async def async_transform(
data: _T,
expected_type: object,
) -> _T:
"""Transform dictionaries based off of type information from the given type, for example:
```py
class Params(TypedDict, total=False):
card_id: Required[Annotated[str, PropertyInfo(alias="cardID")]]
transformed = transform({"card_id": "<my card ID>"}, Params)
# {'cardID': '<my card ID>'}
```
Any keys / data that does not have type information given will be included as is.
It should be noted that the transformations that this function does are not represented in the type system.
"""
transformed = await _async_transform_recursive(data, annotation=cast(type, expected_type))
return cast(_T, transformed)
async def _async_transform_recursive(
data: object,
*,
annotation: type,
inner_type: type | None = None,
) -> object:
"""Transform the given data against the expected type.
Args:
annotation: The direct type annotation given to the particular piece of data.
This may or may not be wrapped in metadata types, e.g. `Required[T]`, `Annotated[T, ...]` etc
inner_type: If applicable, this is the "inside" type. This is useful in certain cases where the outside type
is a container type such as `List[T]`. In that case `inner_type` should be set to `T` so that each entry in
the list can be transformed using the metadata from the container type.
Defaults to the same value as the `annotation` argument.
"""
from .._compat import model_dump
if inner_type is None:
inner_type = annotation
stripped_type = strip_annotated_type(inner_type)
origin = get_origin(stripped_type) or stripped_type
if is_typeddict(stripped_type) and is_mapping(data):
return await _async_transform_typeddict(data, stripped_type)
if origin == dict and is_mapping(data):
items_type = get_args(stripped_type)[1]
return {key: _transform_recursive(value, annotation=items_type) for key, value in data.items()}
if (
# List[T]
(is_list_type(stripped_type) and is_list(data))
# Iterable[T]
or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str))
# Sequence[T]
or (is_sequence_type(stripped_type) and is_sequence(data) and not isinstance(data, str))
):
# dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually
# intended as an iterable, so we don't transform it.
if isinstance(data, dict):
return cast(object, data)
inner_type = extract_type_arg(stripped_type, 0)
if _no_transform_needed(inner_type):
# for some types there is no need to transform anything, so we can get a small
# perf boost from skipping that work.
#
# but we still need to convert to a list to ensure the data is json-serializable
if is_list(data):
return data
return list(data)
return [await _async_transform_recursive(d, annotation=annotation, inner_type=inner_type) for d in data]
if is_union_type(stripped_type):
# For union types we run the transformation against all subtypes to ensure that everything is transformed.
#
# TODO: there may be edge cases where the same normalized field name will transform to two different names
# in different subtypes.
for subtype in get_args(stripped_type):
data = await _async_transform_recursive(data, annotation=annotation, inner_type=subtype)
return data
if isinstance(data, pydantic.BaseModel):
return model_dump(data, exclude_unset=True, mode="json")
annotated_type = _get_annotated_type(annotation)
if annotated_type is None:
return data
# ignore the first argument as it is the actual type
annotations = get_args(annotated_type)[1:]
for annotation in annotations:
if isinstance(annotation, PropertyInfo) and annotation.format is not None:
return await _async_format_data(data, annotation.format, annotation.format_template)
return data
async def _async_format_data(data: object, format_: PropertyFormat, format_template: str | None) -> object:
if isinstance(data, (date, datetime)):
if format_ == "iso8601":
return data.isoformat()
if format_ == "custom" and format_template is not None:
return data.strftime(format_template)
if format_ == "base64" and is_base64_file_input(data):
binary: str | bytes | None = None
if isinstance(data, pathlib.Path):
binary = await anyio.Path(data).read_bytes()
elif isinstance(data, io.IOBase):
binary = data.read()
if isinstance(binary, str): # type: ignore[unreachable]
binary = binary.encode()
if not isinstance(binary, bytes):
raise RuntimeError(f"Could not read bytes from {data}; Received {type(binary)}")
return base64.b64encode(binary).decode("ascii")
return data
async def _async_transform_typeddict(
data: Mapping[str, object],
expected_type: type,
) -> Mapping[str, object]:
result: dict[str, object] = {}
annotations = get_type_hints(expected_type, include_extras=True)
for key, value in data.items():
if not is_given(value):
# we don't need to include omitted values here as they'll
# be stripped out before the request is sent anyway
continue
type_ = annotations.get(key)
if type_ is None:
# we do not have a type annotation for this field, leave it as is
result[key] = value
else:
result[_maybe_transform_key(key, type_)] = await _async_transform_recursive(value, annotation=type_)
return result
@lru_cache(maxsize=8096)
def get_type_hints(
obj: Any,
globalns: dict[str, Any] | None = None,
localns: Mapping[str, Any] | None = None,
include_extras: bool = False,
) -> dict[str, Any]:
return _get_type_hints(obj, globalns=globalns, localns=localns, include_extras=include_extras)
|
PropertyInfo
|
python
|
huggingface__transformers
|
src/transformers/models/xlm/modeling_xlm.py
|
{
"start": 26987,
"end": 29247
}
|
class ____(PreTrainedModel):
config: XLMConfig
base_model_prefix = "transformer"
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
@property
def dummy_inputs(self):
inputs_list = torch.tensor([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]])
attns_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])
if self.config.use_lang_emb and self.config.n_langs > 1:
langs_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])
else:
langs_list = None
return {"input_ids": inputs_list, "attention_mask": attns_list, "langs": langs_list}
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, nn.Embedding):
if self.config is not None and self.config.embed_init_std is not None:
init.normal_(module.weight, mean=0, std=self.config.embed_init_std)
# Here we need the check explicitly, as we slice the weight in the `zeros_` call, so it looses the flag
if module.padding_idx is not None and not getattr(module.weight, "_is_hf_initialized", False):
init.zeros_(module.weight[module.padding_idx])
if isinstance(module, nn.Linear):
if self.config is not None and self.config.init_std is not None:
init.normal_(module.weight, mean=0, std=self.config.init_std)
if module.bias is not None:
init.constant_(module.bias, 0.0)
if isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
if isinstance(module, XLMModel) and self.config.sinusoidal_embeddings:
init.copy_(
module.position_embeddings.weight,
create_sinusoidal_embeddings(
self.config.max_position_embeddings,
self.config.emb_dim,
out=torch.empty_like(module.position_embeddings.weight),
),
)
@dataclass
@auto_docstring(
custom_intro="""
Base class for outputs of question answering models using a `XLMSQuADHead`.
"""
)
|
XLMPreTrainedModel
|
python
|
run-llama__llama_index
|
llama-index-integrations/graph_stores/llama-index-graph-stores-nebula/llama_index/graph_stores/nebula/nebula_property_graph.py
|
{
"start": 3347,
"end": 30315
}
|
class ____(PropertyGraphStore):
"""
NebulaGraph Property Graph Store.
This class implements a NebulaGraph property graph store.
You could go with NebulaGraph-lite freely on Google Colab.
- https://github.com/nebula-contrib/nebulagraph-lite
Or Install with Docker Extension(search in the Docker Extension marketplace) on your local machine.
Examples:
`pip install llama-index-graph-stores-nebula`
`pip install jupyter-nebulagraph`
Create a new NebulaGraph Space with Basic Schema:
```jupyter
%load_ext ngql
%ngql --address 127.0.0.1 --port 9669 --user root --password nebula
%ngql CREATE SPACE IF NOT EXISTS llamaindex_nebula_property_graph(vid_type=FIXED_STRING(256));
```
"""
_space: str
_client: BaseExecutor
sanitize_query_output: bool
enhanced_schema: bool
def __init__(
self,
space: str,
client: Optional[BaseExecutor] = None,
username: str = "root",
password: str = "nebula",
url: str = "nebula://localhost:9669",
overwrite: bool = False,
props_schema: str = DEFAULT_PROPS_SCHEMA,
refresh_schema: bool = True,
sanitize_query_output: bool = False, # We don't put Embedding-Like values as Properties
enhanced_schema: bool = False,
) -> None:
self.sanitize_query_output = sanitize_query_output
self.enhanced_schema = enhanced_schema
self._space = space
if client:
self._client = client
else:
session_pool = SessionPool(
username,
password,
self._space,
[url_scheme_parse(url)],
)
session_pool.init()
self._client = session_pool
self._client.execute(DDL.render(props_schema=props_schema))
self._client.execute(INDEX_DDL)
if overwrite:
self._client.execute(f"CLEAR SPACE {self._space};")
self.structured_schema = {}
if refresh_schema:
try:
self.refresh_schema()
except Exception:
# fails to refresh for the first time
pass
self.supports_structured_queries = True
@property
def client(self):
"""Client of NebulaGraph."""
return self._client
def _execute(self, query: str) -> ResultSet:
return self._client.execute(query)
def refresh_schema(self) -> None:
"""
Refresh schema.
Example data of self.structured_schema
{
"node_props": {
"Person": [
{"property": "name", "type": "STRING", "comment": "The name of the person"},
{"property": "age", "type": "INTEGER", "comment": "The age of the person"},
{"property": "dob", "type": "DATE", "comment": "The date of birth of the person"}
],
"Company": [
{"property": "name", "type": "STRING", "comment": "The name of the company"},
{"property": "founded", "type": "DATE", "comment": "The date of foundation of the company"}
]
},
"rel_props": {
"WORKS_AT": [
{"property": "since", "type": "DATE", "comment": "The date when the person started working at the company"}
],
"MANAGES": [
{"property": "since", "type": "DATE", "comment": "The date when the person started managing the company"}
]
},
"relationships": [
{"start": "Person", "type": "WORKS_AT", "end": "Company"},
{"start": "Person", "type": "MANAGES", "end": "Company"}
]
}
"""
tags_schema = {}
edge_types_schema = {}
relationships = []
for node_label in self.structured_query(
"MATCH ()-[node_label:`__meta__node_label__`]->() "
"RETURN node_label.label AS name, "
"JSON_EXTRACT(node_label.props_json) AS props"
):
tags_schema[node_label["name"]] = []
# TODO: add properties to tags_schema
for rel_label in self.structured_query(
"MATCH ()-[rel_label:`__meta__rel_label__`]->() "
"RETURN rel_label.label AS name, "
"src(rel_label) AS src, dst(rel_label) AS dst, "
"JSON_EXTRACT(rel_label.props_json) AS props"
):
edge_types_schema[rel_label["name"]] = []
# TODO: add properties to edge_types_schema
relationships.append(
{
"start": rel_label["src"],
"type": rel_label["name"],
"end": rel_label["dst"],
}
)
self.structured_schema = {
"node_props": tags_schema,
"rel_props": edge_types_schema,
"relationships": relationships,
# TODO: need to check necessarity of meta data here
}
def upsert_nodes(self, nodes: List[LabelledNode]) -> None:
# meta tag Entity__ is used to store the entity name
# meta tag Chunk__ is used to store the chunk text
# other labels are used to store the entity properties
# which must be created before upserting the nodes
# Lists to hold separated types
entity_list: List[EntityNode] = []
chunk_list: List[ChunkNode] = []
other_list: List[LabelledNode] = []
# Sort by type
for item in nodes:
if isinstance(item, EntityNode):
entity_list.append(item)
elif isinstance(item, ChunkNode):
chunk_list.append(item)
else:
other_list.append(item)
if chunk_list:
# TODO: need to double check other properties if any(it seems for now only text is there)
# model chunk as tag and perform upsert
# i.e. INSERT VERTEX `Chunk__` (`text`) VALUES "foo":("hello world"), "baz":("lorem ipsum");
insert_query = "INSERT VERTEX `Chunk__` (`text`) VALUES "
for i, chunk in enumerate(chunk_list):
insert_query += f'"{chunk.id}":($chunk_{i}),'
insert_query = insert_query[:-1] # Remove trailing comma
self.structured_query(
insert_query,
param_map={
f"chunk_{i}": chunk.text for i, chunk in enumerate(chunk_list)
},
)
if entity_list:
# model with tag Entity__ and other tags(label) if applicable
# need to add properties as well, for extractors like SchemaLLMPathExtractor there is no properties
# NebulaGraph is Schema-Full, so we need to be strong schema mindset to abstract this.
# i.e.
# INSERT VERTEX Entity__ (name) VALUES "foo":("bar"), "baz":("qux");
# INSERT VERTEX Person (name) VALUES "foo":("bar"), "baz":("qux");
# The meta tag Entity__ is used to store the entity name
insert_query = "INSERT VERTEX `Entity__` (`name`) VALUES "
for i, entity in enumerate(entity_list):
insert_query += f'"{entity.id}":($entity_{i}),'
insert_query = insert_query[:-1] # Remove trailing comma
self.structured_query(
insert_query,
param_map={
f"entity_{i}": entity.name for i, entity in enumerate(entity_list)
},
)
# Create tags for each LabelledNode
# This could be revisited, if we don't have any properties for labels, mapping labels to
# Properties of tag: Entity__ is also feasible.
schema_ensurence_cache = set()
for i, entity in enumerate(nodes):
keys, values_k, values_params = self._construct_property_query(
entity.properties
)
stmt = f'INSERT VERTEX Props__ ({keys}) VALUES "{entity.id}":({values_k});'
self.structured_query(
stmt,
param_map=values_params,
)
stmt = (
f'INSERT VERTEX Node__ (label) VALUES "{entity.id}":("{entity.label}");'
)
# if entity.label not in schema_ensurence_cache:
# if ensure_node_meta_schema(
# entity.label, self.structured_schema, self.client, entity.properties
# ):
# self.refresh_schema()
# schema_ensurence_cache.add(entity.label)
self.structured_query(stmt)
def _construct_property_query(self, properties: Dict[str, Any]):
keys = ",".join([f"`{k}`" for k in properties])
values_k = ""
values_params: Dict[Any] = {}
for idx, v in enumerate(properties.values()):
values_k += f"$kv_{idx},"
values_params[f"kv_{idx}"] = v
values_k = values_k[:-1]
return keys, values_k, values_params
def upsert_relations(self, relations: List[Relation]) -> None:
"""Add relations."""
schema_ensurence_cache = set()
for relation in relations:
keys, values_k, values_params = self._construct_property_query(
relation.properties
)
stmt = f'INSERT EDGE `Relation__` (`label`,{keys}) VALUES "{relation.source_id}"->"{relation.target_id}":("{relation.label}",{values_k});'
# if relation.label not in schema_ensurence_cache:
# if ensure_relation_meta_schema(
# relation.source_id,
# relation.target_id,
# relation.label,
# self.structured_schema,
# self.client,
# relation.properties,
# ):
# self.refresh_schema()
# schema_ensurence_cache.add(relation.label)
self.structured_query(stmt, param_map=values_params)
def get(
self,
properties: Optional[dict] = None,
ids: Optional[List[str]] = None,
) -> List[LabelledNode]:
"""Get nodes."""
if not (properties or ids):
return []
else:
return self._get(properties, ids)
def _get(
self,
properties: Optional[dict] = None,
ids: Optional[List[str]] = None,
) -> List[LabelledNode]:
"""Get nodes."""
cypher_statement = "MATCH (e:Node__) "
if properties or ids:
cypher_statement += "WHERE "
params = {}
if ids:
cypher_statement += f"id(e) in $all_id "
params[f"all_id"] = ids
if properties:
for i, prop in enumerate(properties):
cypher_statement += f"e.Props__.`{prop}` == $property_{i} AND "
params[f"property_{i}"] = properties[prop]
cypher_statement = cypher_statement[:-5] # Remove trailing AND
return_statement = """
RETURN id(e) AS name,
e.Node__.label AS type,
properties(e.Props__) AS properties,
properties(e) AS all_props
"""
cypher_statement += return_statement
cypher_statement = cypher_statement.replace("\n", " ")
response = self.structured_query(cypher_statement, param_map=params)
nodes = []
for record in response:
if "text" in record["all_props"]:
node = ChunkNode(
id_=record["name"],
label=record["type"],
text=record["all_props"]["text"],
properties=remove_empty_values(record["properties"]),
)
elif "name" in record["all_props"]:
node = EntityNode(
id_=record["name"],
label=record["type"],
name=record["all_props"]["name"],
properties=remove_empty_values(record["properties"]),
)
else:
node = EntityNode(
name=record["name"],
type=record["type"],
properties=remove_empty_values(record["properties"]),
)
nodes.append(node)
return nodes
def get_all_nodes(self) -> List[LabelledNode]:
return self._get()
def get_triplets(
self,
entity_names: Optional[List[str]] = None,
relation_names: Optional[List[str]] = None,
properties: Optional[dict] = None,
ids: Optional[List[str]] = None,
) -> List[Triplet]:
cypher_statement = "MATCH (e:`Entity__`)-[r:`Relation__`]->(t:`Entity__`) "
if not (entity_names or relation_names or properties or ids):
return []
else:
cypher_statement += "WHERE "
params = {}
if entity_names:
cypher_statement += (
f"e.Entity__.name in $entities OR t.Entity__.name in $entities"
)
params[f"entities"] = entity_names
if relation_names:
cypher_statement += f"r.label in $relations "
params[f"relations"] = relation_names
if properties:
pass
if ids:
cypher_statement += f"id(e) in $all_id OR id(t) in $all_id"
params[f"all_id"] = ids
if properties:
v0_matching = ""
v1_matching = ""
edge_matching = ""
for i, prop in enumerate(properties):
v0_matching += f"e.Props__.`{prop}` == $property_{i} AND "
v1_matching += f"t.Props__.`{prop}` == $property_{i} AND "
edge_matching += f"r.`{prop}` == $property_{i} AND "
params[f"property_{i}"] = properties[prop]
v0_matching = v0_matching[:-5] # Remove trailing AND
v1_matching = v1_matching[:-5] # Remove trailing AND
edge_matching = edge_matching[:-5] # Remove trailing AND
cypher_statement += (
f"({v0_matching}) OR ({edge_matching}) OR ({v1_matching})"
)
return_statement = f"""
RETURN id(e) AS source_id, e.Node__.label AS source_type,
properties(e.Props__) AS source_properties,
r.label AS type,
properties(r) AS rel_properties,
id(t) AS target_id, t.Node__.label AS target_type,
properties(t.Props__) AS target_properties
"""
cypher_statement += return_statement
cypher_statement = cypher_statement.replace("\n", " ")
data = self.structured_query(cypher_statement, param_map=params)
triples = []
for record in data:
source = EntityNode(
name=record["source_id"],
label=record["source_type"],
properties=remove_empty_values(record["source_properties"]),
)
target = EntityNode(
name=record["target_id"],
label=record["target_type"],
properties=remove_empty_values(record["target_properties"]),
)
rel_properties = remove_empty_values(record["rel_properties"])
rel_properties.pop("label")
rel = Relation(
source_id=record["source_id"],
target_id=record["target_id"],
label=record["type"],
properties=rel_properties,
)
triples.append((source, rel, target))
return triples
def get_rel_map(
self,
graph_nodes: List[LabelledNode],
depth: int = 2,
limit: int = 30,
ignore_rels: Optional[List[str]] = None,
) -> List[Triplet]:
"""Get depth-aware rel map."""
triples = []
ids = [node.id for node in graph_nodes]
# Needs some optimization
response = self.structured_query(
f"""
MATCH (e:`Entity__`)
WHERE id(e) in $ids
MATCH p=(e)-[r*1..{depth}]-(other)
WHERE ALL(rel in relationships(p) WHERE rel.`label` <> 'MENTIONS')
UNWIND relationships(p) AS rel
WITH distinct rel
WITH startNode(rel) AS source,
rel.`label` AS type,
endNode(rel) AS endNode
MATCH (v) WHERE id(v)==id(source) WITH v AS source, type, endNode
MATCH (v) WHERE id(v)==id(endNode) WITH source, type, v AS endNode
RETURN id(source) AS source_id, source.`Node__`.`label` AS source_type,
properties(source.`Props__`) AS source_properties,
type,
id(endNode) AS target_id, endNode.`Node__`.`label` AS target_type,
properties(endNode.`Props__`) AS target_properties
LIMIT {limit}
""",
param_map={"ids": ids},
)
ignore_rels = ignore_rels or []
for record in response:
if record["type"] in ignore_rels:
continue
source = EntityNode(
name=record["source_id"],
label=record["source_type"],
properties=remove_empty_values(record["source_properties"]),
)
target = EntityNode(
name=record["target_id"],
label=record["target_type"],
properties=remove_empty_values(record["target_properties"]),
)
rel = Relation(
source_id=record["source_id"],
target_id=record["target_id"],
label=record["type"],
)
triples.append([source, rel, target])
return triples
def structured_query(
self, query: str, param_map: Optional[Dict[str, Any]] = None
) -> Any:
if not param_map:
result = self._client.execute(query)
else:
result = self._client.execute_parameter(query, build_param_map(param_map))
if not result.is_succeeded():
raise Exception(
"NebulaGraph query failed:",
result.error_msg(),
"Statement:",
query,
"Params:",
param_map,
)
full_result = [
{
key: result.row_values(row_index)[i].cast_primitive()
for i, key in enumerate(result.keys())
}
for row_index in range(result.row_size())
]
if self.sanitize_query_output:
# Not applicable for NebulaGraph for now though
return value_sanitize(full_result)
return full_result
def delete(
self,
entity_names: Optional[List[str]] = None,
relation_names: Optional[List[str]] = None,
properties: Optional[dict] = None,
ids: Optional[List[str]] = None,
) -> None:
"""Delete matching data."""
ans_ids: List[str] = []
if entity_names:
trips = self.get_triplets(
entity_names=entity_names,
)
for trip in trips:
if isinstance(trip[0], EntityNode) and trip[0].name in entity_names:
ans_ids.append(trip[0].id)
if isinstance(trip[2], EntityNode) and trip[2].name in entity_names:
ans_ids.append(trip[2].id)
if relation_names:
trips = self.get_triplets(
relation_names=relation_names,
)
for trip in trips:
ans_ids += [trip[0].id, trip[2].id, trip[1].source_id]
if properties:
nodes = self.get(properties=properties)
ans_ids += [node.id for node in nodes]
if ids:
nodes = self.get(ids=ids)
ans_ids += [node.id for node in nodes]
ans_ids = list(set(ans_ids))
for id in ans_ids or []:
self.structured_query(f'DELETE VERTEX "{id}" WITH EDGE;')
def _enhanced_schema_cypher(
self,
label_or_type: str,
properties: List[Dict[str, Any]],
exhaustive: bool,
is_relationship: bool = False,
) -> str:
"""Get enhanced schema information."""
def get_schema(self, refresh: bool = False) -> Any:
if refresh:
self.refresh_schema()
return self.structured_schema
def get_schema_str(self, refresh: bool = False) -> str:
schema = self.get_schema(refresh=refresh)
formatted_node_props = []
formatted_rel_props = []
if self.enhanced_schema:
# Enhanced formatting for nodes
for node_type, properties in schema["node_props"].items():
formatted_node_props.append(f"- **{node_type}**")
for prop in properties:
example = ""
if prop["type"] == "string" and prop.get("values"):
if prop.get("distinct_count", 11) > DISTINCT_VALUE_LIMIT:
example = (
f'Example: "{clean_string_values(prop["values"][0])}"'
if prop["values"]
else ""
)
else: # If less than 10 possible values return all
example = (
(
"Available options: "
f"{[clean_string_values(el) for el in prop['values']]}"
)
if prop["values"]
else ""
)
elif prop["type"] in [
# TODO: Add all numeric types
"int64",
"int32",
"int16",
"int8",
"uint64",
"uint32",
"uint16",
"uint8",
"date",
"datetime",
"timestamp",
"float",
"double",
]:
if prop.get("min") is not None:
example = f"Min: {prop['min']}, Max: {prop['max']}"
else:
example = (
f'Example: "{prop["values"][0]}"'
if prop.get("values")
else ""
)
formatted_node_props.append(
f" - `{prop['property']}`: {prop['type']} {example}"
)
# Enhanced formatting for relationships
for rel_type, properties in schema["rel_props"].items():
formatted_rel_props.append(f"- **{rel_type}**")
for prop in properties:
example = ""
if prop["type"] == "string":
if prop.get("distinct_count", 11) > DISTINCT_VALUE_LIMIT:
example = (
f'Example: "{clean_string_values(prop["values"][0])}"'
if prop.get("values")
else ""
)
else: # If less than 10 possible values return all
example = (
(
"Available options: "
f"{[clean_string_values(el) for el in prop['values']]}"
)
if prop.get("values")
else ""
)
elif prop["type"] in [
"int",
"int64",
"int32",
"int16",
"int8",
"uint64",
"uint32",
"uint16",
"uint8",
"float",
"double",
"date",
"datetime",
"timestamp",
]:
if prop.get("min"): # If we have min/max
example = f"Min: {prop['min']}, Max: {prop['max']}"
else: # return a single value
example = (
f'Example: "{prop["values"][0]}"'
if prop.get("values")
else ""
)
elif prop["type"] == "LIST":
# Skip embeddings
if prop["min_size"] > LIST_LIMIT:
continue
example = f"Min Size: {prop['min_size']}, Max Size: {prop['max_size']}"
formatted_rel_props.append(
f" - `{prop['property']}: {prop['type']}` {example}"
)
else:
# Format node properties
for label, props in schema["node_props"].items():
props_str = ", ".join(
[f"{prop['property']}: {prop['type']}" for prop in props]
)
formatted_node_props.append(f"{label} {{{props_str}}}")
# Format relationship properties using structured_schema
for type, props in schema["rel_props"].items():
props_str = ", ".join(
[f"{prop['property']}: {prop['type']}" for prop in props]
)
formatted_rel_props.append(f"{type} {{{props_str}}}")
# Format relationships
formatted_rels = [
f"(:{el['start']})-[:{el['type']}]->(:{el['end']})"
for el in schema["relationships"]
]
return "\n".join(
[
"Node properties:",
"\n".join(formatted_node_props),
"Relationship properties:",
"\n".join(formatted_rel_props),
"The relationships:",
"\n".join(formatted_rels),
]
)
def vector_query(
self, query: VectorStoreQuery, **kwargs: Any
) -> Tuple[List[LabelledNode], List[float]]:
raise NotImplementedError(
"Vector query not implemented for NebulaPropertyGraphStore."
)
|
NebulaPropertyGraphStore
|
python
|
django__django
|
tests/postgres_tests/__init__.py
|
{
"start": 442,
"end": 719
}
|
class ____(SimpleTestCase):
pass
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific tests")
# To register type handlers and locate the widget's template.
@modify_settings(INSTALLED_APPS={"append": "django.contrib.postgres"})
|
PostgreSQLSimpleTestCase
|
python
|
catalyst-team__catalyst
|
catalyst/extras/frozen_class.py
|
{
"start": 0,
"end": 588
}
|
class ____:
"""Class which prohibit ``__setattr__`` on existing attributes.
Examples:
>>> class IRunner(FrozenClass):
"""
__is_frozen = False
def __setattr__(self, key, value):
"""@TODO: Docs. Contribution is welcome."""
if self.__is_frozen and not hasattr(self, key):
raise TypeError("%r is a frozen class for key %s" % (self, key))
object.__setattr__(self, key, value)
def _freeze(self):
self.__is_frozen = True
def _unfreeze(self):
self.__is_frozen = False
__all__ = ["FrozenClass"]
|
FrozenClass
|
python
|
pytorch__pytorch
|
torch/_functorch/_aot_autograd/runtime_wrappers.py
|
{
"start": 108328,
"end": 111529
}
|
class ____(CompilerWrapper):
flat_requires_grad: list[Optional[bool]] = field(default_factory=list)
def post_compile(
self,
compiled_fn,
aot_config: AOTConfig,
*,
runtime_metadata: ViewAndMutationMeta,
):
@wraps(compiled_fn)
def debug_compiled_function(args: list[Any]):
# TODO: Check aliasing relationships
# TODO: Check strides for metadata mutation
# (NB: ideally, this logic is factored out of this function and
# you move these debug checks there)
# Check requires grad. Bad case is when we compiled with
# requires_grad = False, but input requires_grad = True
# (vice versa is OK; we compute a gradient and then throw
# it away when it hits the input.)
for i, a in enumerate(args):
can_require_grad = self.flat_requires_grad[i]
if can_require_grad is None:
assert not isinstance(a, Tensor)
elif not can_require_grad:
assert not a.requires_grad, format_guard_bug_msg(
aot_config,
f"{describe_input(i, aot_config)} would not require grad",
)
return compiled_fn(args)
return debug_compiled_function
def pre_compile(
wrappers: list[CompilerWrapper],
flat_fn: TraceFn,
flat_args: list[FxValue],
flat_args_descs: list[AOTInput],
aot_config: AOTConfig,
*,
fw_metadata: ViewAndMutationMeta,
) -> tuple[TraceFn, list[FxValue], list[AOTInput], ViewAndMutationMeta]:
"""
Runs a sequence of wrappers on the given function and arguments.
Mutates wrappers in place.
"""
for wrapper in wrappers:
flat_fn, flat_args, flat_args_descs, fw_metadata = wrapper.pre_compile(
flat_fn, flat_args, flat_args_descs, aot_config, fw_metadata=fw_metadata
)
return flat_fn, flat_args, flat_args_descs, fw_metadata
def post_compile(
wrappers: list[CompilerWrapper],
compiled_fn: Callable,
aot_config: AOTConfig,
*,
runtime_metadata: ViewAndMutationMeta,
) -> tuple[Callable, ViewAndMutationMeta]:
"""
Runs a sequence of wrappers on the given function. Should be called after pre_compile()
"""
for wrapper in reversed(wrappers):
compiled_fn = wrapper.post_compile(
compiled_fn, aot_config, runtime_metadata=runtime_metadata
)
return compiled_fn, runtime_metadata
def make_runtime_safe(
fw_metadata: ViewAndMutationMeta,
maybe_subclass_meta: Optional[SubclassMeta],
):
"""
Calls make_runtime_safe on all ViewAndMutationMetas.
Modifies both arguments. Allows ViewAndMutationMetas to
be safely cached in AOTAutogradCache.
"""
fw_metadata.make_runtime_safe()
if maybe_subclass_meta is not None:
maybe_subclass_meta.fw_metadata.make_runtime_safe()
if maybe_subclass_meta.grad_input_metas:
for meta in maybe_subclass_meta.grad_input_metas:
if isinstance(meta, SubclassCreationMeta):
meta.make_runtime_safe()
|
DebugAssertWrapper
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/classes/config_vector_index.py
|
{
"start": 8285,
"end": 8466
}
|
class ____(_QuantizerConfigUpdate):
enabled: Optional[bool]
rescoreLimit: Optional[int]
@staticmethod
def quantizer_name() -> str:
return "bq"
|
_BQConfigUpdate
|
python
|
pytorch__pytorch
|
torch/ao/nn/qat/modules/conv.py
|
{
"start": 317,
"end": 3939
}
|
class ____(nn.modules.conv._ConvNd):
_FLOAT_MODULE: ClassVar[type[nn.modules.conv._ConvNd]]
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: tuple[int, ...],
stride: tuple[int, ...],
padding: str | tuple[int, ...],
dilation: tuple[int, ...],
transposed: bool,
output_padding: tuple[int, ...],
groups: int,
bias: bool,
padding_mode: Literal["zeros", "reflect", "replicate", "circular"],
qconfig=None,
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
nn.modules.conv._ConvNd.__init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
bias,
padding_mode,
**factory_kwargs,
)
assert qconfig, "qconfig must be provided for QAT module"
self.qconfig = qconfig
self.weight_fake_quant = qconfig.weight(factory_kwargs=factory_kwargs)
def forward(self, input):
return self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias)
@staticmethod
def from_float(cls, mod, use_precomputed_fake_quant=False):
r"""Create a qat module from a float module
Args:
`mod`: a float module, either produced by torch.ao.quantization utilities
or directly from user
"""
assert type(mod) is cls._FLOAT_MODULE, (
"qat."
+ cls.__name__
+ ".from_float only works for "
+ cls._FLOAT_MODULE.__name__
)
assert hasattr(mod, "qconfig"), "Input float module must have qconfig defined"
assert mod.qconfig, "Input float module must have a valid qconfig"
if issubclass(type(mod), _FusedModule):
mod = mod[0]
qconfig = mod.qconfig
qat_conv = cls(
mod.in_channels,
mod.out_channels,
mod.kernel_size,
stride=mod.stride,
padding=mod.padding,
dilation=mod.dilation,
groups=mod.groups,
bias=mod.bias is not None,
padding_mode=mod.padding_mode,
qconfig=qconfig,
)
qat_conv.weight = mod.weight
qat_conv.bias = mod.bias
return qat_conv
def to_float(self):
"""This works for both single qat conv, and the qat conv - relu modules
to convert the qat module to a floating point module
"""
cls = type(self)
conv = cls._FLOAT_CONV_MODULE( # type: ignore[attr-defined]
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.padding,
self.dilation,
self.groups,
self.bias is not None,
self.padding_mode,
)
conv.weight = torch.nn.Parameter(self.weight.detach())
if self.bias is not None:
conv.bias = torch.nn.Parameter(self.bias.detach())
# conv relu
if issubclass(cls, _FusedModule):
modules = [conv]
assert hasattr(cls, "_FLOAT_RELU_MODULE")
relu = cls._FLOAT_RELU_MODULE()
modules.append(relu)
# pyrefly: ignore [missing-attribute]
fused = cls._FLOAT_MODULE(*modules)
fused.train(self.training)
return fused
else:
return conv
|
_ConvNd
|
python
|
pytorch__pytorch
|
torch/distributions/transforms.py
|
{
"start": 22767,
"end": 23091
}
|
class ____(Transform):
r"""Transform via the mapping :math:`y = |x|`."""
domain = constraints.real
codomain = constraints.positive
def __eq__(self, other):
return isinstance(other, AbsTransform)
def _call(self, x):
return x.abs()
def _inverse(self, y):
return y
|
AbsTransform
|
python
|
wandb__wandb
|
wandb/sdk/artifacts/_generated/input_types.py
|
{
"start": 234,
"end": 1429
}
|
class ____(GQLInput):
name: Optional[str] = Field(default=None, max_length=128)
description: Optional[str] = None
id: Optional[str] = None
framework: Optional[str] = None
entity_name: Optional[str] = Field(alias="entityName", default=None)
docker_image: Optional[str] = Field(
alias="dockerImage", default=None, max_length=512
)
repo: Optional[str] = Field(default=None, max_length=256)
access: Optional[str] = None
views: Optional[str] = None
is_benchmark: Optional[bool] = Field(alias="isBenchmark", default=None)
linked_benchmark: Optional[GQLId] = Field(alias="linkedBenchmark", default=None)
is_published: Optional[bool] = Field(alias="isPublished", default=None)
owner: Optional[GQLId] = None
allow_all_artifact_types_in_registry: Optional[bool] = Field(
alias="allowAllArtifactTypesInRegistry", default=None
)
rate_limits: Optional[RateLimitsInput] = Field(alias="rateLimits", default=None)
client_mutation_id: Optional[str] = Field(alias="clientMutationId", default=None)
artifact_types: Optional[List[ArtifactTypeInput]] = Field(
alias="artifactTypes", default=None
)
|
UpsertModelInput
|
python
|
huggingface__transformers
|
src/transformers/models/florence2/modeling_florence2.py
|
{
"start": 5677,
"end": 6381
}
|
class ____(nn.Module):
def __init__(self, config: Florence2VisionConfig, stage_idx: int):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.activation_function]
self.fc1 = nn.Linear(config.embed_dim[stage_idx], int(config.embed_dim[stage_idx] * config.mlp_ratio))
self.fc2 = nn.Linear(int(config.embed_dim[stage_idx] * config.mlp_ratio), config.embed_dim[stage_idx])
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
|
Florence2VisionMLP
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximum-sum-of-subsequence-with-non-adjacent-elements.py
|
{
"start": 52,
"end": 2681
}
|
class ____(object):
def maximumSumSubsequence(self, nums, queries):
"""
:type nums: List[int]
:type queries: List[List[int]]
:rtype: int
"""
MOD = 10**9+7
L0R0, L1R0, L0R1, L1R1 = range(4)
# Template:
# https://github.com/kamyu104/LeetCode-Solutions/blob/master/Python/block-placement-queries.py
class SegmentTree(object):
def __init__(self, N,
build_fn=lambda _: None,
query_fn=lambda x, y: y if x is None else x if y is None else max(x, y),
update_fn=lambda x: x):
self.tree = [None]*(1<<((N-1).bit_length()+1))
self.base = len(self.tree)>>1
self.query_fn = query_fn
self.update_fn = update_fn
for i in xrange(self.base, self.base+N):
self.tree[i] = build_fn(i-self.base)
for i in reversed(xrange(1, self.base)):
self.tree[i] = query_fn(self.tree[i<<1], self.tree[(i<<1)+1])
def update(self, i, h):
x = self.base+i
self.tree[x] = self.update_fn(h)
while x > 1:
x >>= 1
self.tree[x] = self.query_fn(self.tree[x<<1], self.tree[(x<<1)+1])
def query(self, L, R):
L += self.base
R += self.base
left = right = None
while L <= R:
if L & 1:
left = self.query_fn(left, self.tree[L])
L += 1
if R & 1 == 0:
right = self.query_fn(self.tree[R], right)
R -= 1
L >>= 1
R >>= 1
return self.query_fn(left, right)
def build(i):
return [max(nums[i], 0), 0, 0, 0]
def query(x, y):
if x is None:
return y
if y is None:
return x
return [max(x[L0R1]+y[L1R0], x[L0R0]+y[L1R0], x[L0R1]+y[L0R0]),
max(x[L1R1]+y[L1R0], x[L1R0]+y[L1R0], x[L1R1]+y[L0R0]),
max(x[L0R1]+y[L1R1], x[L0R0]+y[L1R1], x[L0R1]+y[L0R1]),
max(x[L1R1]+y[L1R1], x[L1R0]+y[L1R1], x[L1R1]+y[L0R1])]
st = SegmentTree(len(nums), build_fn=build, query_fn=query)
result = 0
for i, x in queries:
st.update(i, [max(x, 0), 0, 0, 0])
result = (result+max(st.tree[1]))%MOD
return result
|
Solution
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 633289,
"end": 634359
}
|
class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"edges",
"nodes",
"page_info",
"total_count",
"total_recurring_monthly_price_in_cents",
"total_recurring_monthly_price_in_dollars",
)
edges = sgqlc.types.Field(
sgqlc.types.list_of("SponsorshipEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(sgqlc.types.list_of("Sponsorship"), graphql_name="nodes")
page_info = sgqlc.types.Field(
sgqlc.types.non_null(PageInfo), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
total_recurring_monthly_price_in_cents = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalRecurringMonthlyPriceInCents"
)
total_recurring_monthly_price_in_dollars = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalRecurringMonthlyPriceInDollars"
)
|
SponsorshipConnection
|
python
|
sympy__sympy
|
sympy/functions/special/error_functions.py
|
{
"start": 29959,
"end": 35564
}
|
class ____(DefinedFunction):
r"""
The classical exponential integral.
Explanation
===========
For use in SymPy, this function is defined as
.. math:: \operatorname{Ei}(x) = \sum_{n=1}^\infty \frac{x^n}{n\, n!}
+ \log(x) + \gamma,
where $\gamma$ is the Euler-Mascheroni constant.
If $x$ is a polar number, this defines an analytic function on the
Riemann surface of the logarithm. Otherwise this defines an analytic
function in the cut plane $\mathbb{C} \setminus (-\infty, 0]$.
**Background**
The name exponential integral comes from the following statement:
.. math:: \operatorname{Ei}(x) = \int_{-\infty}^x \frac{e^t}{t} \mathrm{d}t
If the integral is interpreted as a Cauchy principal value, this statement
holds for $x > 0$ and $\operatorname{Ei}(x)$ as defined above.
Examples
========
>>> from sympy import Ei, polar_lift, exp_polar, I, pi
>>> from sympy.abc import x
>>> Ei(-1)
Ei(-1)
This yields a real value:
>>> Ei(-1).n(chop=True)
-0.219383934395520
On the other hand the analytic continuation is not real:
>>> Ei(polar_lift(-1)).n(chop=True)
-0.21938393439552 + 3.14159265358979*I
The exponential integral has a logarithmic branch point at the origin:
>>> Ei(x*exp_polar(2*I*pi))
Ei(x) + 2*I*pi
Differentiation is supported:
>>> Ei(x).diff(x)
exp(x)/x
The exponential integral is related to many other special functions.
For example:
>>> from sympy import expint, Shi
>>> Ei(x).rewrite(expint)
-expint(1, x*exp_polar(I*pi)) - I*pi
>>> Ei(x).rewrite(Shi)
Chi(x) + Shi(x)
See Also
========
expint: Generalised exponential integral.
E1: Special case of the generalised exponential integral.
li: Logarithmic integral.
Li: Offset logarithmic integral.
Si: Sine integral.
Ci: Cosine integral.
Shi: Hyperbolic sine integral.
Chi: Hyperbolic cosine integral.
uppergamma: Upper incomplete gamma function.
References
==========
.. [1] https://dlmf.nist.gov/6.6
.. [2] https://en.wikipedia.org/wiki/Exponential_integral
.. [3] Abramowitz & Stegun, section 5: https://web.archive.org/web/20201128173312/http://people.math.sfu.ca/~cbm/aands/page_228.htm
"""
@classmethod
def eval(cls, z):
if z.is_zero:
return S.NegativeInfinity
elif z is S.Infinity:
return S.Infinity
elif z is S.NegativeInfinity:
return S.Zero
if z.is_zero:
return S.NegativeInfinity
nz, n = z.extract_branch_factor()
if n:
return Ei(nz) + 2*I*pi*n
def fdiff(self, argindex=1):
arg = unpolarify(self.args[0])
if argindex == 1:
return exp(arg)/arg
else:
raise ArgumentIndexError(self, argindex)
def _eval_evalf(self, prec):
if (self.args[0]/polar_lift(-1)).is_positive:
return super()._eval_evalf(prec) + (I*pi)._eval_evalf(prec)
return super()._eval_evalf(prec)
def _eval_rewrite_as_uppergamma(self, z, **kwargs):
from sympy.functions.special.gamma_functions import uppergamma
# XXX this does not currently work usefully because uppergamma
# immediately turns into expint
return -uppergamma(0, polar_lift(-1)*z) - I*pi
def _eval_rewrite_as_expint(self, z, **kwargs):
return -expint(1, polar_lift(-1)*z) - I*pi
def _eval_rewrite_as_li(self, z, **kwargs):
if isinstance(z, log):
return li(z.args[0])
# TODO:
# Actually it only holds that:
# Ei(z) = li(exp(z))
# for -pi < imag(z) <= pi
return li(exp(z))
def _eval_rewrite_as_Si(self, z, **kwargs):
if z.is_negative:
return Shi(z) + Chi(z) - I*pi
else:
return Shi(z) + Chi(z)
_eval_rewrite_as_Ci = _eval_rewrite_as_Si
_eval_rewrite_as_Chi = _eval_rewrite_as_Si
_eval_rewrite_as_Shi = _eval_rewrite_as_Si
def _eval_rewrite_as_tractable(self, z, limitvar=None, **kwargs):
return exp(z) * _eis(z)
def _eval_rewrite_as_Integral(self, z, **kwargs):
from sympy.integrals.integrals import Integral
t = Dummy(uniquely_named_symbol('t', [z]).name)
return Integral(S.Exp1**t/t, (t, S.NegativeInfinity, z))
def _eval_as_leading_term(self, x, logx, cdir):
from sympy import re
x0 = self.args[0].limit(x, 0)
arg = self.args[0].as_leading_term(x, cdir=cdir)
cdir = arg.dir(x, cdir)
if x0.is_zero:
c, e = arg.as_coeff_exponent(x)
logx = log(x) if logx is None else logx
return log(c) + e*logx + EulerGamma - (
I*pi if re(cdir).is_negative else S.Zero)
return super()._eval_as_leading_term(x, logx=logx, cdir=cdir)
def _eval_nseries(self, x, n, logx, cdir=0):
x0 = self.args[0].limit(x, 0)
if x0.is_zero:
f = self._eval_rewrite_as_Si(*self.args)
return f._eval_nseries(x, n, logx)
return super()._eval_nseries(x, n, logx)
def _eval_aseries(self, n, args0, x, logx):
from sympy.series.order import Order
point = args0[0]
if point in (S.Infinity, S.NegativeInfinity):
z = self.args[0]
s = [factorial(k) / (z)**k for k in range(n)] + \
[Order(1/z**n, x)]
return (exp(z)/z) * Add(*s)
return super(Ei, self)._eval_aseries(n, args0, x, logx)
|
Ei
|
python
|
django-extensions__django-extensions
|
django_extensions/management/commands/merge_model_instances.py
|
{
"start": 2963,
"end": 9753
}
|
class ____(BaseCommand):
help = """
Removes duplicate model instances based on a specified
model and field name(s).
Makes sure that any OneToOne, ForeignKey, or ManyToMany relationships
attached to a deleted model(s) get reattached to the remaining model.
Based on the following:
https://djangosnippets.org/snippets/2283/
https://stackoverflow.com/a/41291137/2532070
https://gist.github.com/edelvalle/01886b6f79ba0c4dce66
"""
@signalcommand
def handle(self, *args, **options):
model = get_model_to_deduplicate()
field_names = get_field_names(model)
first_or_last = keep_first_or_last_instance()
total_deleted_objects_count = 0
for instance in model.objects.all():
kwargs = {}
for field_name in field_names:
instance_field_value = instance.__getattribute__(field_name)
kwargs.update({field_name: instance_field_value})
try:
model.objects.get(**kwargs)
except model.MultipleObjectsReturned:
instances = model.objects.filter(**kwargs)
if first_or_last == "first":
primary_object = instances.first()
alias_objects = instances.exclude(pk=primary_object.pk)
elif first_or_last == "last":
primary_object = instances.last()
alias_objects = instances.exclude(pk=primary_object.pk)
primary_object, deleted_objects, deleted_objects_count = (
self.merge_model_instances(primary_object, alias_objects)
)
total_deleted_objects_count += deleted_objects_count
print(
"Successfully deleted {} model instances.".format(
total_deleted_objects_count
)
)
@transaction.atomic()
def merge_model_instances(self, primary_object, alias_objects):
"""
Merge several model instances into one, the `primary_object`.
Use this function to merge model objects and migrate all of the related
fields from the alias objects the primary object.
"""
generic_fields = get_generic_fields()
# get related fields
related_fields = list(
filter(lambda x: x.is_relation is True, primary_object._meta.get_fields())
)
many_to_many_fields = list(
filter(lambda x: x.many_to_many is True, related_fields)
)
related_fields = list(filter(lambda x: x.many_to_many is False, related_fields))
# Loop through all alias objects and migrate their references to the
# primary object
deleted_objects = []
deleted_objects_count = 0
for alias_object in alias_objects:
# Migrate all foreign key references from alias object to primary
# object.
for many_to_many_field in many_to_many_fields:
alias_varname = many_to_many_field.name
related_objects = getattr(alias_object, alias_varname)
for obj in related_objects.all():
try:
# Handle regular M2M relationships.
getattr(alias_object, alias_varname).remove(obj)
getattr(primary_object, alias_varname).add(obj)
except AttributeError:
# Handle M2M relationships with a 'through' model.
# This does not delete the 'through model.
# TODO: Allow the user to delete a duplicate 'through' model.
through_model = getattr(alias_object, alias_varname).through
kwargs = {
many_to_many_field.m2m_reverse_field_name(): obj,
many_to_many_field.m2m_field_name(): alias_object,
}
through_model_instances = through_model.objects.filter(**kwargs)
for instance in through_model_instances:
# Re-attach the through model to the primary_object
setattr(
instance,
many_to_many_field.m2m_field_name(),
primary_object,
)
instance.save()
# TODO: Here, try to delete duplicate instances that are
# disallowed by a unique_together constraint
for related_field in related_fields:
if related_field.one_to_many:
alias_varname = related_field.get_accessor_name()
related_objects = getattr(alias_object, alias_varname)
for obj in related_objects.all():
field_name = related_field.field.name
setattr(obj, field_name, primary_object)
obj.save()
elif related_field.one_to_one or related_field.many_to_one:
alias_varname = related_field.name
related_object = getattr(alias_object, alias_varname)
primary_related_object = getattr(primary_object, alias_varname)
if primary_related_object is None:
setattr(primary_object, alias_varname, related_object)
primary_object.save()
elif related_field.one_to_one:
self.stdout.write(
"Deleted {} with id {}\n".format(
related_object, related_object.id
)
)
related_object.delete()
for field in generic_fields:
filter_kwargs = {}
filter_kwargs[field.fk_field] = alias_object._get_pk_val()
filter_kwargs[field.ct_field] = field.get_content_type(alias_object)
related_objects = field.model.objects.filter(**filter_kwargs)
for generic_related_object in related_objects:
setattr(generic_related_object, field.name, primary_object)
generic_related_object.save()
if alias_object.id:
deleted_objects += [alias_object]
self.stdout.write(
"Deleted {} with id {}\n".format(alias_object, alias_object.id)
)
alias_object.delete()
deleted_objects_count += 1
return primary_object, deleted_objects, deleted_objects_count
|
Command
|
python
|
apache__airflow
|
airflow-core/src/airflow/jobs/triggerer_job_runner.py
|
{
"start": 6350,
"end": 7480
}
|
class ____:
class StartTriggerer(BaseModel):
"""Tell the async trigger runner process to start, and where to send status update messages."""
type: Literal["StartTriggerer"] = "StartTriggerer"
class TriggerStateChanges(BaseModel):
"""
Report state change about triggers back to the TriggerRunnerSupervisor.
The supervisor will respond with a TriggerStateSync message.
"""
type: Literal["TriggerStateChanges"] = "TriggerStateChanges"
events: Annotated[
list[tuple[int, events.DiscrimatedTriggerEvent]] | None,
# We have to specify a default here, as otherwise Pydantic struggles to deal with the discriminated
# union :shrug:
Field(default=None),
]
# Format of list[str] is the exc traceback format
failures: list[tuple[int, list[str] | None]] | None = None
finished: list[int] | None = None
class TriggerStateSync(BaseModel):
type: Literal["TriggerStateSync"] = "TriggerStateSync"
to_create: list[workloads.RunTrigger]
to_cancel: set[int]
|
messages
|
python
|
lazyprogrammer__machine_learning_examples
|
rl2/cartpole/dqn_tf.py
|
{
"start": 808,
"end": 1277
}
|
class ____:
def __init__(self, M1, M2, f=tf.nn.tanh, use_bias=True):
self.W = tf.Variable(tf.random_normal(shape=(M1, M2)))
self.params = [self.W]
self.use_bias = use_bias
if use_bias:
self.b = tf.Variable(np.zeros(M2).astype(np.float32))
self.params.append(self.b)
self.f = f
def forward(self, X):
if self.use_bias:
a = tf.matmul(X, self.W) + self.b
else:
a = tf.matmul(X, self.W)
return self.f(a)
|
HiddenLayer
|
python
|
langchain-ai__langchain
|
libs/text-splitters/langchain_text_splitters/html.py
|
{
"start": 11753,
"end": 18658
}
|
class ____:
"""Splitting HTML files based on specified tag and font sizes.
Requires `lxml` package.
"""
def __init__(
self,
headers_to_split_on: list[tuple[str, str]],
**kwargs: Any,
) -> None:
"""Create a new `HTMLSectionSplitter`.
Args:
headers_to_split_on: list of tuples of headers we want to track mapped to
(arbitrary) keys for metadata. Allowed header values: `h1`, `h2`, `h3`,
`h4`, `h5`, `h6` e.g. `[("h1", "Header 1"), ("h2", "Header 2"]`.
**kwargs: Additional optional arguments for customizations.
"""
self.headers_to_split_on = dict(headers_to_split_on)
self.xslt_path = (
pathlib.Path(__file__).parent / "xsl/converting_to_header.xslt"
).absolute()
self.kwargs = kwargs
def split_documents(self, documents: Iterable[Document]) -> list[Document]:
"""Split documents."""
texts, metadatas = [], []
for doc in documents:
texts.append(doc.page_content)
metadatas.append(doc.metadata)
results = self.create_documents(texts, metadatas=metadatas)
text_splitter = RecursiveCharacterTextSplitter(**self.kwargs)
return text_splitter.split_documents(results)
def split_text(self, text: str) -> list[Document]:
"""Split HTML text string.
Args:
text: HTML text
"""
return self.split_text_from_file(StringIO(text))
def create_documents(
self, texts: list[str], metadatas: list[dict[Any, Any]] | None = None
) -> list[Document]:
"""Create a list of `Document` objects from a list of texts."""
metadatas_ = metadatas or [{}] * len(texts)
documents = []
for i, text in enumerate(texts):
for chunk in self.split_text(text):
metadata = copy.deepcopy(metadatas_[i])
for key in chunk.metadata:
if chunk.metadata[key] == "#TITLE#":
chunk.metadata[key] = metadata["Title"]
metadata = {**metadata, **chunk.metadata}
new_doc = Document(page_content=chunk.page_content, metadata=metadata)
documents.append(new_doc)
return documents
def split_html_by_headers(self, html_doc: str) -> list[dict[str, str | None]]:
"""Split an HTML document into sections based on specified header tags.
This method uses BeautifulSoup to parse the HTML content and divides it into
sections based on headers defined in `headers_to_split_on`. Each section
contains the header text, content under the header, and the tag name.
Args:
html_doc: The HTML document to be split into sections.
Returns:
A list of dictionaries representing sections. Each dictionary contains:
* `'header'`: The header text or a default title for the first section.
* `'content'`: The content under the header.
* `'tag_name'`: The name of the header tag (e.g., `h1`, `h2`).
"""
if not _HAS_BS4:
msg = "Unable to import BeautifulSoup/PageElement, \
please install with `pip install \
bs4`."
raise ImportError(msg)
soup = BeautifulSoup(html_doc, "html.parser")
header_names = list(self.headers_to_split_on.keys())
sections: list[dict[str, str | None]] = []
headers = _find_all_tags(soup, name=["body", *header_names])
for i, header in enumerate(headers):
if i == 0:
current_header = "#TITLE#"
current_header_tag = "h1"
section_content: list[str] = []
else:
current_header = header.text.strip()
current_header_tag = header.name
section_content = []
for element in header.next_elements:
if i + 1 < len(headers) and element == headers[i + 1]:
break
if isinstance(element, str):
section_content.append(element)
content = " ".join(section_content).strip()
if content:
sections.append(
{
"header": current_header,
"content": content,
"tag_name": current_header_tag,
}
)
return sections
def convert_possible_tags_to_header(self, html_content: str) -> str:
"""Convert specific HTML tags to headers using an XSLT transformation.
This method uses an XSLT file to transform the HTML content, converting
certain tags into headers for easier parsing. If no XSLT path is provided,
the HTML content is returned unchanged.
Args:
html_content: The HTML content to be transformed.
Returns:
The transformed HTML content as a string.
"""
if not _HAS_LXML:
msg = "Unable to import lxml, please install with `pip install lxml`."
raise ImportError(msg)
# use lxml library to parse html document and return xml ElementTree
# Create secure parsers to prevent XXE attacks
html_parser = etree.HTMLParser(no_network=True)
xslt_parser = etree.XMLParser(
resolve_entities=False, no_network=True, load_dtd=False
)
# Apply XSLT access control to prevent file/network access
# DENY_ALL is a predefined access control that blocks all file/network access
# Type ignore needed due to incomplete lxml type stubs
ac = etree.XSLTAccessControl.DENY_ALL # type: ignore[attr-defined]
tree = etree.parse(StringIO(html_content), html_parser)
xslt_tree = etree.parse(self.xslt_path, xslt_parser)
transform = etree.XSLT(xslt_tree, access_control=ac)
result = transform(tree)
return str(result)
def split_text_from_file(self, file: StringIO) -> list[Document]:
"""Split HTML content from a file into a list of `Document` objects.
Args:
file: A file path or a file-like object containing HTML content.
Returns:
A list of split Document objects.
"""
file_content = file.getvalue()
file_content = self.convert_possible_tags_to_header(file_content)
sections = self.split_html_by_headers(file_content)
return [
Document(
cast("str", section["content"]),
metadata={
self.headers_to_split_on[str(section["tag_name"])]: section[
"header"
]
},
)
for section in sections
]
@beta()
|
HTMLSectionSplitter
|
python
|
run-llama__llama_index
|
llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/tests/test_browser.py
|
{
"start": 296,
"end": 828
}
|
class ____:
@patch.dict(os.environ, {"AWS_REGION": "us-east-1"})
def test_get_aws_region_from_aws_region(self):
assert get_aws_region() == "us-east-1"
@patch.dict(
os.environ, {"AWS_DEFAULT_REGION": "us-west-1", "AWS_REGION": ""}, clear=True
)
def test_get_aws_region_from_aws_default_region(self):
assert get_aws_region() == "us-west-1"
@patch.dict(os.environ, {}, clear=True)
def test_get_aws_region_default(self):
assert get_aws_region() == "us-west-2"
|
TestGetAwsRegion
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/dialects/mssql/base.py
|
{
"start": 41424,
"end": 41518
}
|
class ____(_DateTimeBase, sqltypes.DateTime):
__visit_name__ = "SMALLDATETIME"
|
SMALLDATETIME
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/kernel_tests/reduce_test.py
|
{
"start": 1547,
"end": 9117
}
|
class ____(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testSum(self):
for i in range(10):
ds = dataset_ops.Dataset.range(1, i + 1)
result = ds.reduce(np.int64(0), lambda x, y: x + y)
self.assertEqual(((i + 1) * i) // 2, self.evaluate(result))
@combinations.generate(test_base.default_test_combinations())
def testSumTuple(self):
def reduce_fn(state, value):
v1, v2 = value
return state + v1 + v2
for i in range(10):
ds = dataset_ops.Dataset.range(1, i + 1)
ds = dataset_ops.Dataset.zip((ds, ds))
result = ds.reduce(constant_op.constant(0, dtype=dtypes.int64), reduce_fn)
self.assertEqual(((i + 1) * i), self.evaluate(result))
@combinations.generate(test_base.default_test_combinations())
def testSumAndCount(self):
def reduce_fn(state, value):
s, c = state
return s + value, c + 1
for i in range(10):
ds = dataset_ops.Dataset.range(1, i + 1)
result = ds.reduce((constant_op.constant(0, dtype=dtypes.int64),
constant_op.constant(0, dtype=dtypes.int64)),
reduce_fn)
s, c = self.evaluate(result)
self.assertEqual(((i + 1) * i) // 2, s)
self.assertEqual(i, c)
@combinations.generate(combinations.combine(tf_api_version=1, mode="graph"))
def testSquareUsingPlaceholder(self):
delta = array_ops.placeholder(dtype=dtypes.int64)
def reduce_fn(state, _):
return state + delta
for i in range(10):
ds = dataset_ops.Dataset.range(1, i + 1)
result = ds.reduce(np.int64(0), reduce_fn)
with self.cached_session() as sess:
square = sess.run(result, feed_dict={delta: i})
self.assertEqual(i * i, square)
@combinations.generate(test_base.default_test_combinations())
def testSparse(self):
def reduce_fn(_, value):
return value
def make_sparse_fn(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
for i in range(10):
ds = dataset_ops.Dataset.from_tensors(make_sparse_fn(i+1))
result = ds.reduce(make_sparse_fn(0), reduce_fn)
self.assertValuesEqual(make_sparse_fn(i + 1), self.evaluate(result))
@combinations.generate(test_base.default_test_combinations())
def testNested(self):
def reduce_fn(state, value):
state["dense"] += value["dense"]
state["sparse"] = value["sparse"]
return state
def make_sparse_fn(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
def map_fn(i):
return {"dense": math_ops.cast(i, dtype=dtypes.int64),
"sparse": make_sparse_fn(math_ops.cast(i, dtype=dtypes.int64))}
for i in range(10):
ds = dataset_ops.Dataset.range(1, i + 1).map(map_fn)
result = ds.reduce(map_fn(0), reduce_fn)
result = self.evaluate(result)
self.assertEqual(((i + 1) * i) // 2, result["dense"])
self.assertValuesEqual(make_sparse_fn(i), result["sparse"])
@combinations.generate(test_base.default_test_combinations())
def testDatasetSideEffect(self):
counter_var = variables.Variable(0)
def increment_fn(x):
counter_var.assign_add(1)
return x
def dataset_fn():
return dataset_ops.Dataset.range(10).map(increment_fn)
def reduce_fn(state, value):
return state + value
@def_function.function
def fn():
_ = dataset_fn().reduce(np.int64(0), reduce_fn)
return "hello"
self.evaluate(counter_var.initializer)
self.assertEqual(self.evaluate(fn()), b"hello")
self.assertEqual(self.evaluate(counter_var), 10)
@combinations.generate(test_base.default_test_combinations())
def testSideEffect(self):
counter_var = variables.Variable(0)
def dataset_fn():
return dataset_ops.Dataset.range(10)
def reduce_fn(state, value):
counter_var.assign_add(1)
return state + value
@def_function.function
def fn():
_ = dataset_fn().reduce(np.int64(0), reduce_fn)
return "hello"
self.evaluate(counter_var.initializer)
self.assertEqual(self.evaluate(fn()), b"hello")
self.assertEqual(self.evaluate(counter_var), 10)
@combinations.generate(test_base.default_test_combinations())
def testAutomaticControlDependencies(self):
counter_var = variables.Variable(1)
def dataset_fn():
return dataset_ops.Dataset.range(1)
def reduce1_fn(state, value):
counter_var.assign(counter_var + 1)
return state + value
def reduce2_fn(state, value):
counter_var.assign(counter_var * 2)
return state + value
@def_function.function
def fn():
_ = dataset_fn().reduce(np.int64(0), reduce1_fn)
_ = dataset_fn().reduce(np.int64(0), reduce2_fn)
return "hello"
self.evaluate(counter_var.initializer)
self.assertEqual(self.evaluate(fn()), b"hello")
self.assertEqual(self.evaluate(counter_var), 4)
@combinations.generate(test_base.default_test_combinations())
def testNestedAutomaticControlDependencies(self):
counter_var = variables.Variable(0)
def map_fn(x):
counter_var.assign_add(1)
return x
def dataset_fn():
return dataset_ops.Dataset.range(10).map(map_fn)
@def_function.function
def fn():
for _ in dataset_fn():
pass
return counter_var
self.evaluate(counter_var.initializer)
self.assertEqual(self.evaluate(fn()), 10)
@combinations.generate(test_base.default_test_combinations())
def testStateOnGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPUs available.")
state = constant_op.constant(0, dtype=dtypes.int64)
def reduce_fn(state, value):
with ops.device("/gpu:0"):
return state + value
for i in range(10):
ds = dataset_ops.Dataset.range(1, i + 1)
result = ds.reduce(state, reduce_fn)
self.assertEqual(((i + 1) * i) // 2, self.evaluate(result))
@combinations.generate(combinations.combine(tf_api_version=1, mode="graph"))
def testCancellation(self):
ds = dataset_ops.Dataset.from_tensors(1).repeat()
result = ds.reduce(0, lambda x, y: x + y)
with self.cached_session() as sess:
# The `result` op is guaranteed to not complete before cancelled because
# the dataset that is being reduced is infinite.
thread = self.checkedThread(self.assert_op_cancelled, args=(result,))
thread.start()
time.sleep(0.2)
sess.close()
thread.join()
@combinations.generate(test_base.default_test_combinations())
def testInvalidFunction(self):
ds = dataset_ops.Dataset.range(5)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(ds.reduce(0, lambda _, __: ()))
@combinations.generate(test_base.default_test_combinations())
def testOptions(self):
dataset = dataset_ops.Dataset.range(5)
dataset = dataset.apply(testing.assert_next(["MapAndBatch"]))
dataset = dataset.map(lambda x: x * 2).batch(5)
self.evaluate(dataset.reduce(0, lambda state, value: state))
@combinations.generate(test_base.default_test_combinations())
def testName(self):
dataset = dataset_ops.Dataset.from_tensors(42)
self.assertEqual(
self.evaluate(
dataset.reduce(0, lambda state, value: value, name="reduce")), 42)
if __name__ == "__main__":
test.main()
|
ReduceTest
|
python
|
ZoranPandovski__al-go-rithms
|
data_structures/Graphs/graph/Python/Disjoint_Set.py
|
{
"start": 0,
"end": 129
}
|
class ____:
def __init__(self):
self.parent=None
self.rank=0
self.element=None
self.e=[]
|
Disjoint
|
python
|
keras-team__keras
|
integration_tests/model_visualization_test.py
|
{
"start": 136,
"end": 2395
}
|
class ____(keras.models.Model):
def __init__(self, name):
super().__init__(name=name)
def call(self, x):
return x
def parse_text_from_html(html):
pattern = r"<font[^>]*>(.*?)</font>"
matches = re.findall(pattern, html)
for match in matches:
clean_text = re.sub(r"<[^>]*>", "", match)
return clean_text
return ""
def get_node_text(node):
attributes = node.get_attributes()
if "label" in attributes:
html = node.get_attributes()["label"]
return parse_text_from_html(html)
else:
return None
def get_edge_dict(dot):
def get_node_dict(graph, path=""):
nodes = {
node.get_name(): path + get_node_text(node)
for node in graph.get_nodes()
if node.get_name() != "node" # Dummy node inserted by pydot?
}
for subgraph in graph.get_subgraphs():
sub_nodes = get_node_dict(
subgraph, path=f"{path}{subgraph.get_label()} > "
)
nodes.update(sub_nodes)
return nodes
node_dict = get_node_dict(dot)
def get_edges(graph):
edges = list(graph.get_edges())
for subgraph in graph.get_subgraphs():
edges.extend(get_edges(subgraph))
return edges
edge_dict = dict()
dangling_edges = []
for edge in get_edges(dot):
source_node = node_dict.get(edge.get_source(), None)
destination_node = node_dict.get(edge.get_destination(), None)
if source_node is None or destination_node is None:
dangling_edges.append(
f"from '{source_node}'/'{edge.get_source()}' "
f"to '{destination_node}'/'{edge.get_destination()}'"
)
if source_node in edge_dict:
destination_nodes = edge_dict[source_node]
if not isinstance(destination_nodes, set):
destination_nodes = set([destination_nodes])
edge_dict[source_node] = destination_nodes
destination_nodes.add(destination_node)
else:
edge_dict[source_node] = destination_node
if dangling_edges:
raise ValueError(f"Dangling edges found: {dangling_edges}")
return edge_dict
|
SubclassModel
|
python
|
getsentry__sentry
|
tests/sentry/models/test_release.py
|
{
"start": 45756,
"end": 47874
}
|
class ____(TestCase):
def run_test(self, operator, build, expected_releases, organization_id=None, projects=None):
organization_id = organization_id if organization_id else self.organization.id
project_ids = [p.id for p in projects] if projects else None
assert set(
Release.objects.filter_by_semver_build(
organization_id, operator, build, project_ids=project_ids
)
) == set(expected_releases)
def test_no_build(self) -> None:
self.create_release(version="test@1.2.3")
self.create_release(version="test@1.2.4")
self.run_test("gt", "100", [])
self.run_test("exact", "105aab", [])
def test_numeric(self) -> None:
release_1 = self.create_release(version="test@1.2.3+123")
release_2 = self.create_release(version="test@1.2.4+456")
self.create_release(version="test@1.2.4+123abc")
self.run_test("gt", "123", [release_2])
self.run_test("lte", "123", [release_1])
self.run_test("exact", "123", [release_1])
def test_large_numeric(self) -> None:
release_1 = self.create_release(version="test@1.2.3+9223372036854775808")
self.create_release(version="test@1.2.3+9223372036854775809")
# This should only return `release_1`, since this exceeds the max size for a bigint and
# so should fall back to an exact string match instead.
self.run_test("gt", "9223372036854775808", [release_1])
def test_text(self) -> None:
release_1 = self.create_release(version="test@1.2.3+123")
release_2 = self.create_release(version="test@1.2.4+1234")
release_3 = self.create_release(version="test@1.2.4+123abc")
self.run_test("exact", "", [release_1, release_2, release_3])
self.run_test("exact", "*", [release_1, release_2, release_3])
self.run_test("exact", "123*", [release_1, release_2, release_3])
self.run_test("exact", "123a*", [release_3])
self.run_test("exact", "123ab", [])
self.run_test("exact", "123abc", [release_3])
|
ReleaseFilterBySemverBuildTest
|
python
|
weaviate__weaviate-python-client
|
weaviate/exceptions.py
|
{
"start": 12952,
"end": 13226
}
|
class ____(UnexpectedStatusCodeError):
"""Is raised when a request to Weaviate fails due to insufficient permissions."""
def __init__(self, res: Union[httpx.Response, AioRpcError, Call]) -> None:
super().__init__("forbidden", res)
|
InsufficientPermissionsError
|
python
|
matplotlib__matplotlib
|
lib/mpl_toolkits/axes_grid1/axes_size.py
|
{
"start": 5045,
"end": 5253
}
|
class ____(MaxExtent):
"""
Size whose absolute part is the largest width of the given *artist_list*.
"""
def __init__(self, artist_list):
super().__init__(artist_list, "width")
|
MaxWidth
|
python
|
pypa__pipenv
|
pipenv/vendor/click/types.py
|
{
"start": 13735,
"end": 14251
}
|
class ____(ParamType):
_number_class: t.ClassVar[t.Type[t.Any]]
def convert(
self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"]
) -> t.Any:
try:
return self._number_class(value)
except ValueError:
self.fail(
_("{value!r} is not a valid {number_type}.").format(
value=value, number_type=self.name
),
param,
ctx,
)
|
_NumberParamTypeBase
|
python
|
scrapy__scrapy
|
scrapy/core/engine.py
|
{
"start": 1721,
"end": 2935
}
|
class ____:
def __init__(
self,
close_if_idle: bool,
nextcall: CallLaterOnce[None],
scheduler: BaseScheduler,
) -> None:
self.closing: Deferred[None] | None = None
self.inprogress: set[Request] = set()
self.close_if_idle: bool = close_if_idle
self.nextcall: CallLaterOnce[None] = nextcall
self.scheduler: BaseScheduler = scheduler
self.heartbeat: AsyncioLoopingCall | LoopingCall = create_looping_call(
nextcall.schedule
)
def add_request(self, request: Request) -> None:
self.inprogress.add(request)
def remove_request(self, request: Request) -> None:
self.inprogress.remove(request)
self._maybe_fire_closing()
async def close(self) -> None:
self.closing = Deferred()
self._maybe_fire_closing()
await maybe_deferred_to_future(self.closing)
def _maybe_fire_closing(self) -> None:
if self.closing is not None and not self.inprogress:
if self.nextcall:
self.nextcall.cancel()
if self.heartbeat.running:
self.heartbeat.stop()
self.closing.callback(None)
|
_Slot
|
python
|
ray-project__ray
|
python/ray/serve/tests/unit/test_constants_utils.py
|
{
"start": 7849,
"end": 8723
}
|
class ____:
@pytest.mark.parametrize(
"name",
[
"RAY_SERVE_FOO",
"RAY_SERVE__DOUBLE_UNDERSCORE",
"RAY_SERVE_123",
"RAY_SERVE_VAR_NAME",
],
)
def test_validate_name_accepts_valid_prefix(self, name):
# Should not raise
assert _validate_name(name) is None
@pytest.mark.parametrize(
"name",
[
"",
"RAY_SERVE", # missing trailing underscore and name
"SERVE_VAR",
"ray_SERVE_BAR",
"RAY_service_VAR",
],
)
def test_validate_name_rejects_invalid_prefix(self, name):
with pytest.raises(ValueError, match=".*require prefix `RAY_SERVE_`*"):
_validate_name(name)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
|
TestValidation
|
python
|
PyCQA__pylint
|
doc/data/messages/s/super-init-not-called/bad.py
|
{
"start": 0,
"end": 118
}
|
class ____:
def __init__(self, name="fruit"):
self.name = name
print("Creating a {self.name}")
|
Fruit
|
python
|
django__django
|
tests/gis_tests/tests.py
|
{
"start": 1564,
"end": 3160
}
|
class ____(unittest.TestCase):
"""
The PostGIS version check parses correctly the version numbers
"""
def test_get_version(self):
expect = "1.0.0"
ops = FakePostGISOperations(expect)
actual = ops.postgis_lib_version()
self.assertEqual(expect, actual)
def test_version_classic_tuple(self):
expect = ("1.2.3", 1, 2, 3)
ops = FakePostGISOperations(expect[0])
actual = ops.postgis_version_tuple()
self.assertEqual(expect, actual)
def test_version_dev_tuple(self):
expect = ("1.2.3dev", 1, 2, 3)
ops = FakePostGISOperations(expect[0])
actual = ops.postgis_version_tuple()
self.assertEqual(expect, actual)
def test_version_loose_tuple(self):
expect = ("1.2.3b1.dev0", 1, 2, 3)
ops = FakePostGISOperations(expect[0])
actual = ops.postgis_version_tuple()
self.assertEqual(expect, actual)
def test_valid_version_numbers(self):
versions = [
("1.3.0", 1, 3, 0),
("2.1.1", 2, 1, 1),
("2.2.0dev", 2, 2, 0),
]
for version in versions:
with self.subTest(version=version):
ops = FakePostGISOperations(version[0])
actual = ops.spatial_version
self.assertEqual(version[1:], actual)
def test_no_version_number(self):
ops = FakePostGISOperations()
with self.assertRaises(ImproperlyConfigured):
ops.spatial_version
@unittest.skipUnless(HAS_POSTGRES, "PostGIS-specific tests.")
|
TestPostGISVersionCheck
|
python
|
facebook__pyre-check
|
client/commands/launch_and_subscribe_handler.py
|
{
"start": 1332,
"end": 17432
}
|
class ____(background_tasks.Task):
server_options_reader: PyreServerOptionsReader
remote_logging: Optional[backend_arguments.RemoteLogging]
server_state: ServerState
client_status_message_handler: status_message_handler.ClientStatusMessageHandler
client_type_error_handler: type_error_handler.ClientTypeErrorHandler
subscription_response_parser: PyreSubscriptionResponseParser
def __init__(
self,
server_options_reader: PyreServerOptionsReader,
server_state: ServerState,
client_status_message_handler: status_message_handler.ClientStatusMessageHandler,
client_type_error_handler: type_error_handler.ClientTypeErrorHandler,
subscription_response_parser: PyreSubscriptionResponseParser,
remote_logging: Optional[backend_arguments.RemoteLogging] = None,
) -> None:
self.server_options_reader = server_options_reader
self.remote_logging = remote_logging
self.server_state = server_state
self.client_status_message_handler = client_status_message_handler
self.client_type_error_handler = client_type_error_handler
self.subscription_response_parser = subscription_response_parser
@abc.abstractmethod
async def handle_type_error_event(
self,
type_error_subscription: subscription.TypeErrors,
) -> None:
pass
@abc.abstractmethod
async def handle_status_update_event(
self,
status_update_subscription: subscription.StatusUpdate,
) -> None:
pass
async def handle_error_event(self, error_subscription: subscription.Error) -> None:
message = error_subscription.message
LOG.info(f"Received error from subscription channel: {message}")
raise PyreDaemonShutdown(message)
@abc.abstractmethod
async def _subscribe(
self,
server_input_channel: connections.AsyncTextReader,
server_output_channel: connections.AsyncTextWriter,
) -> None:
pass
@abc.abstractmethod
async def client_setup(self) -> None:
pass
@abc.abstractmethod
async def client_teardown(self) -> None:
pass
def get_type_errors_availability(self) -> features.TypeErrorsAvailability:
return self.server_state.server_options.language_server_features.type_errors
@staticmethod
def _auxiliary_logging_info(
server_options: pyre_server_options.PyreServerOptions,
) -> Dict[str, Optional[str]]:
relative_local_root = (
server_options.start_arguments.base_arguments.relative_local_root
)
return {
"binary": str(
server_options.server_start_command.get_pyre_binary_location()
),
"log_path": server_options.start_arguments.base_arguments.log_path,
"global_root": (server_options.start_arguments.base_arguments.global_root),
**(
{}
if relative_local_root is None
else {"local_root": relative_local_root}
),
}
@staticmethod
async def _read_server_response(
server_input_channel: connections.AsyncTextReader,
) -> str:
return await server_input_channel.read_until(separator="\n")
async def _handle_subscription_body(
self, subscription_body: subscription.Body
) -> None:
if isinstance(subscription_body, subscription.TypeErrors):
await self.handle_type_error_event(subscription_body)
elif isinstance(subscription_body, subscription.StatusUpdate):
await self.handle_status_update_event(subscription_body)
elif isinstance(subscription_body, subscription.Error):
await self.handle_error_event(subscription_body)
elif isinstance(subscription_body, subscription.IncrementalTelemetry):
pass
async def _run_subscription_loop(
self,
subscription_name: str,
server_input_channel: connections.AsyncTextReader,
server_output_channel: connections.AsyncTextWriter,
) -> None:
while True:
raw_subscription_response = await self._read_server_response(
server_input_channel
)
subscription_response = self.subscription_response_parser.parse_response(
raw_subscription_response
)
await self._handle_subscription_body(subscription_response.body)
async def subscribe(
self,
server_input_channel: connections.AsyncTextReader,
server_output_channel: connections.AsyncTextWriter,
) -> None:
try:
await self._subscribe(
server_input_channel,
server_output_channel,
)
finally:
await self.client_status_message_handler.show_status_message_to_client(
"Lost connection to the background Pyre Server. "
"This usually happens when Pyre detect changes in project which "
"it was not able to handle incrementally. "
"A new Pyre server will be started next time you open or save "
"a .py file",
short_message=f"{self.server_state.server_options.flavor.simple_name()} Stopped",
level=lsp.MessageType.ERROR,
fallback_to_notification=True,
)
await self.client_type_error_handler.clear_type_errors_for_client()
self.server_state.diagnostics = {}
async def connect_and_subscribe(
self,
server_options: pyre_server_options.PyreServerOptions,
socket_path: Path,
connection_timer: timer.Timer,
is_preexisting: bool,
) -> None:
project_identifier = server_options.project_identifier
async with connections.connect_async(socket_path) as (
input_channel,
output_channel,
):
if is_preexisting:
await self.client_status_message_handler.log_and_show_status_message_to_client(
"Established connection with existing Pyre server at "
f"`{project_identifier}`.",
short_message=f"{server_options.flavor.simple_name()} Ready",
level=lsp.MessageType.INFO,
fallback_to_notification=True,
)
else:
await self.client_status_message_handler.log_and_show_status_message_to_client(
f"Pyre server at `{project_identifier}` has been initialized.",
short_message=f"{server_options.flavor.simple_name()} Ready",
level=lsp.MessageType.INFO,
fallback_to_notification=True,
)
self.server_state.consecutive_start_failure = 0
self.server_state.is_user_notified_on_buck_failure = False
log_lsp_event.log(
remote_logging=self.remote_logging,
event=log_lsp_event.LSPEvent.CONNECTED,
integers={"duration": int(connection_timer.stop_in_millisecond())},
normals={
"connected_to": (
"already_running_server"
if is_preexisting
else "newly_started_server"
),
**self._auxiliary_logging_info(server_options),
},
)
self.server_state.status_tracker.set_status(state.ConnectionStatus.READY)
await self.subscribe(input_channel, output_channel)
async def launch_and_subscribe(
self,
server_options: pyre_server_options.PyreServerOptions,
) -> None:
project_identifier = server_options.project_identifier
start_arguments = server_options.start_arguments
socket_path = server_options.get_socket_path()
flavor = server_options.flavor
connection_timer = timer.Timer()
try:
await self.client_setup()
if self.server_state.client_register_event is not None:
self.server_state.client_register_event.set()
await self.connect_and_subscribe(
server_options,
socket_path,
connection_timer,
is_preexisting=True,
)
except connections.ConnectionFailure:
if self.server_state.client_register_event is not None:
self.server_state.client_register_event.clear()
await self.client_status_message_handler.log_and_show_status_message_to_client(
f"Starting a new Pyre server at `{project_identifier}` in "
"the background.",
short_message=f"Starting {flavor.simple_name()}...",
level=lsp.MessageType.WARNING,
fallback_to_notification=True,
)
self.server_state.status_tracker.set_status(state.ConnectionStatus.STARTING)
start_status = await async_start_pyre_server(
server_options.server_start_command,
start_arguments,
flavor,
)
if isinstance(start_status, StartSuccess):
await self.client_setup()
if self.server_state.client_register_event is not None:
self.server_state.client_register_event.set()
await self.connect_and_subscribe(
server_options,
socket_path,
connection_timer,
is_preexisting=False,
)
elif isinstance(start_status, BuckStartFailure):
# Buck start failures are intentionally not counted towards
# `consecutive_start_failure` -- they happen far too often in practice
# so we do not want them to trigger suspensions.
log_lsp_event.log(
remote_logging=self.remote_logging,
event=log_lsp_event.LSPEvent.NOT_CONNECTED,
integers={"duration": int(connection_timer.stop_in_millisecond())},
normals={
**self._auxiliary_logging_info(server_options),
"exception": str(start_status.message),
},
)
if not self.server_state.is_user_notified_on_buck_failure:
await self.client_status_message_handler.show_notification_message_to_client(
f"Cannot start a new Pyre server at `{project_identifier}` "
"due to Buck failure. If you added or changed a target, "
"make sure the target file is parsable and the owning "
"targets are buildable by Buck. If you removed a target, "
"make sure that target is not explicitly referenced from the "
"Pyre configuration file of the containing project.",
level=lsp.MessageType.ERROR,
)
self.server_state.is_user_notified_on_buck_failure = True
await self.client_status_message_handler.show_status_message_to_client(
f"Cannot start a new Pyre server at `{project_identifier}`. "
f"{start_status.message}",
short_message=f"{server_options.flavor.simple_name()} Stopped",
level=lsp.MessageType.INFO,
fallback_to_notification=False,
)
elif isinstance(start_status, OtherStartFailure):
self.server_state.consecutive_start_failure += 1
if (
self.server_state.consecutive_start_failure
< CONSECUTIVE_START_ATTEMPT_THRESHOLD
):
log_lsp_event.log(
remote_logging=self.remote_logging,
event=log_lsp_event.LSPEvent.NOT_CONNECTED,
integers={"duration": int(connection_timer.stop_in_millisecond())},
normals={
**self._auxiliary_logging_info(server_options),
"exception": str(start_status.detail),
},
)
await self.client_status_message_handler.show_status_message_to_client(
f"Cannot start a new Pyre server at `{project_identifier}`. "
f"{start_status.message}",
short_message=f"{server_options.flavor.simple_name()} Stopped",
level=lsp.MessageType.INFO,
fallback_to_notification=True,
)
else:
log_lsp_event.log(
remote_logging=self.remote_logging,
event=log_lsp_event.LSPEvent.SUSPENDED,
integers={"duration": int(connection_timer.stop_in_millisecond())},
normals={
**self._auxiliary_logging_info(server_options),
"exception": str(start_status.detail),
},
)
await self.client_status_message_handler.show_status_message_to_client(
f"Pyre server restart at `{project_identifier}` has been "
"failing repeatedly. Disabling The Pyre plugin for now.",
short_message=f"{server_options.flavor.simple_name()} Disabled",
level=lsp.MessageType.ERROR,
fallback_to_notification=True,
)
else:
raise RuntimeError("Impossible type for `start_status`")
async def run(self) -> None:
"""
Reread the server start options, which can change due to configuration
reloading, and run with error logging.
"""
server_options = pyre_server_options.read_server_options(
self.server_options_reader, self.remote_logging
)
# Update the server options, which can change if the config is modified
self.server_state.server_options = server_options
session_timer = timer.Timer()
error_message: Optional[str] = None
try:
LOG.info(f"Starting Pyre server from configuration: {server_options}")
await self.launch_and_subscribe(server_options)
except asyncio.CancelledError as error:
error_message = f"Explicit termination request: {error}"
self.server_state.status_tracker.set_status(
state.ConnectionStatus.DISCONNECTED
)
raise
except PyreDaemonShutdown as error:
error_message = f"Pyre server shutdown: {error}"
self.server_state.status_tracker.set_status(
state.ConnectionStatus.DISCONNECTED
)
except BaseException as error:
error_message = traceback.format_exc()
if isinstance(error, asyncio.IncompleteReadError):
error_message += f"\nIncompleteReadError partial message: `{error.partial.decode('utf-8')}`"
self.server_state.status_tracker.set_status(
state.ConnectionStatus.DISCONNECTED
)
# we have this here and down below since we need to stop allowing
# requests to be sent before client_teardown
if self.server_state.client_register_event is not None:
self.server_state.client_register_event.clear()
await self.client_teardown()
raise
finally:
if self.server_state.client_register_event is not None:
self.server_state.client_register_event.clear()
if error_message is not None:
log_lsp_event.log(
remote_logging=self.remote_logging,
event=log_lsp_event.LSPEvent.DISCONNECTED,
integers={"duration": int(session_timer.stop_in_millisecond())},
normals={
**self._auxiliary_logging_info(server_options),
"exception": error_message,
},
)
|
PyreDaemonLaunchAndSubscribeHandler
|
python
|
qdrant__qdrant-client
|
qdrant_client/http/models/models.py
|
{
"start": 64418,
"end": 65173
}
|
class ____(str, Enum):
ARABIC = "arabic"
AZERBAIJANI = "azerbaijani"
BASQUE = "basque"
BENGALI = "bengali"
CATALAN = "catalan"
CHINESE = "chinese"
DANISH = "danish"
DUTCH = "dutch"
ENGLISH = "english"
FINNISH = "finnish"
FRENCH = "french"
GERMAN = "german"
GREEK = "greek"
HEBREW = "hebrew"
HINGLISH = "hinglish"
HUNGARIAN = "hungarian"
INDONESIAN = "indonesian"
ITALIAN = "italian"
JAPANESE = "japanese"
KAZAKH = "kazakh"
NEPALI = "nepali"
NORWEGIAN = "norwegian"
PORTUGUESE = "portuguese"
ROMANIAN = "romanian"
RUSSIAN = "russian"
SLOVENE = "slovene"
SPANISH = "spanish"
SWEDISH = "swedish"
TAJIK = "tajik"
TURKISH = "turkish"
|
Language
|
python
|
ansible__ansible
|
test/units/module_utils/facts/test_collectors.py
|
{
"start": 12490,
"end": 12718
}
|
class ____(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'platform']
valid_subsets = ['platform']
fact_namespace = 'ansible_platform'
collector_class = PlatformFactCollector
|
TestPlatformFactCollector
|
python
|
spyder-ide__spyder
|
spyder/utils/stylesheet.py
|
{
"start": 18641,
"end": 19755
}
|
class ____(BaseTabBarStyleSheet):
"""Base style for dockwidget tabbars."""
SCROLL_BUTTONS_BORDER_WIDTH = '2px'
SCROLL_BUTTONS_PADDING = 7 if WIN else 9
def set_stylesheet(self):
super().set_stylesheet()
# Main constants
css = self.get_stylesheet()
# Center tabs to differentiate them from the regular ones.
# See spyder-ide/spyder#9763 for details.
css.QTabBar.setValues(
alignment='center'
)
css['QTabWidget::tab-bar'].setValues(
alignment='center'
)
# Style for selected tabs
css['QTabBar::tab:selected'].setValues(
color=(
SpyderPalette.COLOR_TEXT_1 if is_dark_interface() else
SpyderPalette.COLOR_BACKGROUND_1
),
backgroundColor=SpyderPalette.SPECIAL_TABS_SELECTED,
)
# Make scroll button icons smaller on Windows and Mac
if WIN or MAC:
css['QTabBar QToolButton'].setValues(
padding=f'{self.SCROLL_BUTTONS_PADDING}px',
)
|
BaseDockTabBarStyleSheet
|
python
|
vyperlang__vyper
|
vyper/venom/basicblock.py
|
{
"start": 6675,
"end": 15299
}
|
class ____:
"""
IRInstruction represents an instruction in IR. Each instruction has an opcode,
operands, and return value. For example, the following IR instruction:
%1 = add %0, 1
has opcode "add", operands ["%0", "1"], and return value "%1".
Convention: the rightmost value is the top of the stack.
"""
opcode: str
operands: list[IROperand]
_outputs: list[IRVariable]
parent: IRBasicBlock
annotation: Optional[str]
ast_source: Optional[IRnode]
error_msg: Optional[str]
def __init__(
self,
opcode: str,
operands: list[IROperand] | Iterator[IROperand],
outputs: Optional[list[IRVariable]] = None,
):
assert isinstance(opcode, str), "opcode must be an str"
assert isinstance(operands, list | Iterator), "operands must be a list"
self.opcode = opcode
self.operands = list(operands) # in case we get an iterator
self._outputs = list(outputs) if outputs is not None else []
self.annotation = None
self.ast_source = None
self.error_msg = None
@property
def is_volatile(self) -> bool:
return self.opcode in VOLATILE_INSTRUCTIONS
@property
def is_commutative(self) -> bool:
return self.opcode in COMMUTATIVE_INSTRUCTIONS
@property
def is_comparator(self) -> bool:
return self.opcode in COMPARATOR_INSTRUCTIONS
@property
def flippable(self) -> bool:
return self.is_commutative or self.is_comparator
@property
def is_bb_terminator(self) -> bool:
return self.opcode in BB_TERMINATORS
@property
def is_phi(self) -> bool:
return self.opcode == "phi"
@property
def is_param(self) -> bool:
return self.opcode == "param"
@property
def is_pseudo(self) -> bool:
"""
Check if instruction is pseudo, i.e. not an actual instruction but
a construct for intermediate representation like phi and param.
"""
# do not reorder `source` instructions in dft pass - for testing
return self.is_phi or self.is_param or self.opcode == "source"
def get_read_effects(self) -> effects.Effects:
return effects.reads.get(self.opcode, effects.EMPTY)
def get_write_effects(self) -> effects.Effects:
return effects.writes.get(self.opcode, effects.EMPTY)
def get_label_operands(self) -> Iterator[IRLabel]:
"""
Get all labels in instruction.
"""
return (op for op in self.operands if isinstance(op, IRLabel))
def get_non_label_operands(self) -> Iterator[IROperand]:
"""
Get input operands for instruction which are not labels
"""
return (op for op in self.operands if not isinstance(op, IRLabel))
def get_input_variables(self) -> Iterator[IRVariable]:
"""
Get all input operands for instruction.
"""
return (op for op in self.operands if isinstance(op, IRVariable))
def get_outputs(self) -> list[IRVariable]:
"""
Get the outputs of the instruction.
Makes a copy to prevent external mutation, so
keep that in mind when performance matters.
"""
return list(self._outputs)
@property
def num_outputs(self) -> int:
"""
Return how many outputs this instruction produces.
"""
return len(self._outputs)
@property
def output(self) -> IRVariable:
"""
Return the single output for instructions with exactly one.
"""
assert len(self._outputs) == 1, f"expected single output for {self}"
return self._outputs[0]
@property
def has_outputs(self) -> bool:
"""
Check whether this instruction produces any outputs.
"""
return len(self._outputs) > 0
def set_outputs(self, outputs: list[IRVariable]) -> None:
"""
Replace all outputs for this instruction.
"""
self._outputs = list(outputs)
def make_nop(self):
self.annotation = str(self) # Keep original instruction as annotation for debugging
self.opcode = "nop"
self._outputs = []
self.operands = []
def flip(self):
"""
Flip operands for commutative or comparator opcodes
"""
assert self.flippable
self.operands.reverse()
if self.is_commutative:
return
assert self.opcode in COMPARATOR_INSTRUCTIONS # sanity
self.opcode = flip_comparison_opcode(self.opcode)
def replace_operands(self, replacements: dict) -> None:
"""
Update operands with replacements.
replacements are represented using a dict: "key" is replaced by "value".
"""
for i, operand in enumerate(self.operands):
if operand in replacements:
self.operands[i] = replacements[operand]
def replace_label_operands(self, replacements: dict) -> None:
"""
Update label operands with replacements.
replacements are represented using a dict: "key" is replaced by "value".
"""
replacements = {k.value: v for k, v in replacements.items()}
for i, operand in enumerate(self.operands):
if isinstance(operand, IRLabel) and operand.value in replacements:
self.operands[i] = replacements[operand.value]
@property
def phi_operands(self) -> Iterator[tuple[IRLabel, IROperand]]:
"""
Get phi operands for instruction.
"""
assert self.opcode == "phi", "instruction must be a phi"
for i in range(0, len(self.operands), 2):
label = self.operands[i]
var = self.operands[i + 1]
assert isinstance(label, IRLabel), f"not a label: {label} (at `{self}`)"
assert isinstance(var, IRVariable), f"not a variable: {var} (at `{self}`)"
yield label, var
def remove_phi_operand(self, label: IRLabel) -> None:
"""
Remove a phi operand from the instruction.
"""
assert self.opcode == "phi", "instruction must be a phi"
for i in range(0, len(self.operands), 2):
if self.operands[i] == label:
del self.operands[i : i + 2]
return
@property
def code_size_cost(self) -> int:
if self.opcode in ("ret", "param"):
return 0
if self.opcode in ("assign", "palloca", "alloca", "calloca"):
return 1
return 2
def get_ast_source(self) -> Optional[IRnode]:
if self.ast_source:
return self.ast_source
idx = self.parent.instructions.index(self)
for inst in reversed(self.parent.instructions[:idx]):
if inst.ast_source:
return inst.ast_source
return self.parent.parent.ast_source
def copy(self) -> IRInstruction:
ret = IRInstruction(self.opcode, self.operands.copy(), self.get_outputs())
ret.annotation = self.annotation
ret.ast_source = self.ast_source
ret.error_msg = self.error_msg
return ret
def str_short(self) -> str:
s = ""
outs = self.get_outputs()
if len(outs) > 0:
s += f"{', '.join(map(str, outs))} = "
opcode = f"{self.opcode} " if self.opcode != "assign" else ""
s += opcode
operands = self.operands
if opcode not in ["jmp", "jnz", "djmp", "invoke"]:
operands = list(reversed(operands))
s += ", ".join([(f"@{op}" if isinstance(op, IRLabel) else str(op)) for op in operands])
return s
def __repr__(self) -> str:
s = ""
outs = self.get_outputs()
if len(outs) > 0:
s += f"{', '.join(map(str, outs))} = "
opcode = f"{self.opcode} " if self.opcode != "assign" else ""
s += opcode
operands = self.operands
if self.opcode == "invoke":
operands = [operands[0]] + list(reversed(operands[1:]))
elif self.opcode not in ("jmp", "jnz", "djmp", "phi"):
operands = reversed(operands) # type: ignore
s += ", ".join([(f"@{op}" if isinstance(op, IRLabel) else str(op)) for op in operands])
if self.annotation:
s = f"{s: <30} ; {self.annotation}"
# debug:
# if self.error_msg:
# s += f" ;>>> {self.error_msg}"
return f"{s: <30}"
def _ir_operand_from_value(val: Any) -> IROperand:
if isinstance(val, IROperand):
return val
assert isinstance(val, int), val
return IRLiteral(val)
|
IRInstruction
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/backends/backend_webagg.py
|
{
"start": 1807,
"end": 1904
}
|
class ____(core.FigureCanvasWebAggCore):
manager_class = FigureManagerWebAgg
|
FigureCanvasWebAgg
|
python
|
apache__thrift
|
lib/py/src/protocol/TCompactProtocol.py
|
{
"start": 13217,
"end": 13690
}
|
class ____(TProtocolFactory):
def __init__(self,
string_length_limit=None,
container_length_limit=None):
self.string_length_limit = string_length_limit
self.container_length_limit = container_length_limit
def getProtocol(self, trans):
return TCompactProtocol(trans,
self.string_length_limit,
self.container_length_limit)
|
TCompactProtocolFactory
|
python
|
pydata__xarray
|
xarray/computation/arithmetic.py
|
{
"start": 3781,
"end": 4043
}
|
class ____(
ImplementsArrayReduce,
IncludeNumpySameMethods,
SupportsArithmetic,
DataArrayOpsMixin,
):
__slots__ = ()
# priority must be higher than Variable to properly work with binary ufuncs
__array_priority__ = 60
|
DataArrayArithmetic
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/partitions/definition/time_window_subclasses.py
|
{
"start": 540,
"end": 4533
}
|
class ____(TimeWindowPartitionsDefinition):
"""A set of hourly partitions.
The first partition in the set will start on the start_date at midnight. The last partition
in the set will end before the current time, unless the end_offset argument is set to a
positive number. If minute_offset is provided, the start and end times of each partition
will be minute_offset past the hour.
Args:
start_date (Union[datetime.datetime, str]): The first date in the set of partitions. Can
provide in either a datetime or string format.
end_date (Union[datetime.datetime, str, None]): The last date(excluding) in the set of partitions.
Default is None. Can provide in either a datetime or string format.
minute_offset (int): Number of minutes past the hour to "split" the partition. Defaults
to 0.
fmt (Optional[str]): The date format to use. Defaults to `%Y-%m-%d`. Note that if a non-UTC
timezone is used, the date format must include a timezone offset to disambiguate between
multiple instances of the same time before and after the Fall DST transition. If the
format does not contain this offset, the second instance of the ambiguous time partition
key will have the UTC offset automatically appended to it.
timezone (Optional[str]): The timezone in which each date should exist.
Supported strings for timezones are the ones provided by the
`IANA time zone database <https://www.iana.org/time-zones>`_ - e.g. "America/Los_Angeles".
end_offset (int): Extends the partition set by a number of partitions equal to the value
passed. If end_offset is 0 (the default), the last partition ends before the current
time. If end_offset is 1, the second-to-last partition ends before the current time,
and so on.
exclusions (Optional[Sequence[Union[str, datetime]]]): Specifies a sequence of cron strings
or datetime objects that should be excluded from the partition set. Every tick of the
cron schedule that matches an excluded datetime or matches the tick of an excluded
cron string will be excluded from the partition set.
.. code-block:: python
from datetime import datetime
from dagster import HourlyPartitionsDefinition
# Basic hourly partitions starting at midnight
hourly_partitions = HourlyPartitionsDefinition(start_date=datetime(2022, 3, 12))
# Hourly partitions with 15-minute offset
offset_partitions = HourlyPartitionsDefinition(
start_date=datetime(2022, 3, 12),
minute_offset=15
)
"""
# mapping for fields defined on TimeWindowPartitionsDefinition to this subclasses __new__
__field_remap__ = {
"start_ts": "start_date",
"end_ts": "end_date",
}
def __new__(
cls,
start_date: Union[datetime, str],
end_date: Union[datetime, str, None] = None,
minute_offset: int = 0,
timezone: Optional[str] = None,
fmt: Optional[str] = None,
end_offset: int = 0,
exclusions: Optional[Sequence[Union[str, datetime, TimestampWithTimezone]]] = None,
**kwargs,
):
_fmt = fmt or DEFAULT_HOURLY_FORMAT_WITHOUT_TIMEZONE
schedule_type = ScheduleType.HOURLY
# We accept cron_schedule "hidden" via kwargs to support record copy()
cron_schedule = kwargs.get("cron_schedule")
if cron_schedule:
schedule_type = None
return super().__new__(
cls,
schedule_type=schedule_type,
start=start_date,
end=end_date,
minute_offset=minute_offset,
timezone=timezone,
fmt=_fmt,
end_offset=end_offset,
cron_schedule=cron_schedule,
exclusions=exclusions,
)
@public
|
HourlyPartitionsDefinition
|
python
|
tiangolo__fastapi
|
scripts/label_approved.py
|
{
"start": 386,
"end": 2245
}
|
class ____(BaseSettings):
github_repository: str
token: SecretStr
debug: bool | None = False
config: dict[str, LabelSettings] | Literal[""] = default_config
settings = Settings()
if settings.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
logging.debug(f"Using config: {settings.model_dump_json()}")
g = Github(settings.token.get_secret_value())
repo = g.get_repo(settings.github_repository)
for pr in repo.get_pulls(state="open"):
logging.info(f"Checking PR: #{pr.number}")
pr_labels = list(pr.get_labels())
pr_label_by_name = {label.name: label for label in pr_labels}
reviews = list(pr.get_reviews())
review_by_user: dict[str, PullRequestReview] = {}
for review in reviews:
if review.user.login in review_by_user:
stored_review = review_by_user[review.user.login]
if review.submitted_at >= stored_review.submitted_at:
review_by_user[review.user.login] = review
else:
review_by_user[review.user.login] = review
approved_reviews = [
review for review in review_by_user.values() if review.state == "APPROVED"
]
config = settings.config or default_config
for approved_label, conf in config.items():
logging.debug(f"Processing config: {conf.model_dump_json()}")
if conf.await_label is None or (conf.await_label in pr_label_by_name):
logging.debug(f"Processable PR: {pr.number}")
if len(approved_reviews) >= conf.number:
logging.info(f"Adding label to PR: {pr.number}")
pr.add_to_labels(approved_label)
if conf.await_label:
logging.info(f"Removing label from PR: {pr.number}")
pr.remove_from_labels(conf.await_label)
logging.info("Finished")
|
Settings
|
python
|
spack__spack
|
lib/spack/spack/vendor/altgraph/Dot.py
|
{
"start": 3727,
"end": 9966
}
|
class ____(object):
"""
A class providing a **graphviz** (dot language) representation
allowing a fine grained control over how the graph is being
displayed.
If the :command:`dot` and :command:`dotty` programs are not in the current
system path their location needs to be specified in the contructor.
"""
def __init__(
self,
graph=None,
nodes=None,
edgefn=None,
nodevisitor=None,
edgevisitor=None,
name="G",
dot="dot",
dotty="dotty",
neato="neato",
graphtype="digraph",
):
"""
Initialization.
"""
self.name, self.attr = name, {}
assert graphtype in ["graph", "digraph"]
self.type = graphtype
self.temp_dot = "tmp_dot.dot"
self.temp_neo = "tmp_neo.dot"
self.dot, self.dotty, self.neato = dot, dotty, neato
# self.nodes: node styles
# self.edges: edge styles
self.nodes, self.edges = {}, {}
if graph is not None and nodes is None:
nodes = graph
if graph is not None and edgefn is None:
def edgefn(node, graph=graph):
return graph.out_nbrs(node)
if nodes is None:
nodes = ()
seen = set()
for node in nodes:
if nodevisitor is None:
style = {}
else:
style = nodevisitor(node)
if style is not None:
self.nodes[node] = {}
self.node_style(node, **style)
seen.add(node)
if edgefn is not None:
for head in seen:
for tail in (n for n in edgefn(head) if n in seen):
if edgevisitor is None:
edgestyle = {}
else:
edgestyle = edgevisitor(head, tail)
if edgestyle is not None:
if head not in self.edges:
self.edges[head] = {}
self.edges[head][tail] = {}
self.edge_style(head, tail, **edgestyle)
def style(self, **attr):
"""
Changes the overall style
"""
self.attr = attr
def display(self, mode="dot"):
"""
Displays the current graph via dotty
"""
if mode == "neato":
self.save_dot(self.temp_neo)
neato_cmd = "%s -o %s %s" % (self.neato, self.temp_dot, self.temp_neo)
os.system(neato_cmd)
else:
self.save_dot(self.temp_dot)
plot_cmd = "%s %s" % (self.dotty, self.temp_dot)
os.system(plot_cmd)
def node_style(self, node, **kwargs):
"""
Modifies a node style to the dot representation.
"""
if node not in self.edges:
self.edges[node] = {}
self.nodes[node] = kwargs
def all_node_style(self, **kwargs):
"""
Modifies all node styles
"""
for node in self.nodes:
self.node_style(node, **kwargs)
def edge_style(self, head, tail, **kwargs):
"""
Modifies an edge style to the dot representation.
"""
if tail not in self.nodes:
raise GraphError("invalid node %s" % (tail,))
try:
if tail not in self.edges[head]:
self.edges[head][tail] = {}
self.edges[head][tail] = kwargs
except KeyError:
raise GraphError("invalid edge %s -> %s " % (head, tail))
def iterdot(self):
# write graph title
if self.type == "digraph":
yield "digraph %s {\n" % (self.name,)
elif self.type == "graph":
yield "graph %s {\n" % (self.name,)
else:
raise GraphError("unsupported graphtype %s" % (self.type,))
# write overall graph attributes
for attr_name, attr_value in sorted(self.attr.items()):
yield '%s="%s";' % (attr_name, attr_value)
yield "\n"
# some reusable patterns
cpatt = '%s="%s",' # to separate attributes
epatt = "];\n" # to end attributes
# write node attributes
for node_name, node_attr in sorted(self.nodes.items()):
yield '\t"%s" [' % (node_name,)
for attr_name, attr_value in sorted(node_attr.items()):
yield cpatt % (attr_name, attr_value)
yield epatt
# write edge attributes
for head in sorted(self.edges):
for tail in sorted(self.edges[head]):
if self.type == "digraph":
yield '\t"%s" -> "%s" [' % (head, tail)
else:
yield '\t"%s" -- "%s" [' % (head, tail)
for attr_name, attr_value in sorted(self.edges[head][tail].items()):
yield cpatt % (attr_name, attr_value)
yield epatt
# finish file
yield "}\n"
def __iter__(self):
return self.iterdot()
def save_dot(self, file_name=None):
"""
Saves the current graph representation into a file
"""
if not file_name:
warnings.warn(DeprecationWarning, "always pass a file_name")
file_name = self.temp_dot
with open(file_name, "w") as fp:
for chunk in self.iterdot():
fp.write(chunk)
def save_img(self, file_name=None, file_type="gif", mode="dot"):
"""
Saves the dot file as an image file
"""
if not file_name:
warnings.warn(DeprecationWarning, "always pass a file_name")
file_name = "out"
if mode == "neato":
self.save_dot(self.temp_neo)
neato_cmd = "%s -o %s %s" % (self.neato, self.temp_dot, self.temp_neo)
os.system(neato_cmd)
plot_cmd = self.dot
else:
self.save_dot(self.temp_dot)
plot_cmd = self.dot
file_name = "%s.%s" % (file_name, file_type)
create_cmd = "%s -T%s %s -o %s" % (
plot_cmd,
file_type,
self.temp_dot,
file_name,
)
os.system(create_cmd)
|
Dot
|
python
|
neetcode-gh__leetcode
|
python/0416-partition-equal-subset-sum.py
|
{
"start": 0,
"end": 481
}
|
class ____:
def canPartition(self, nums: List[int]) -> bool:
if sum(nums) % 2:
return False
dp = set()
dp.add(0)
target = sum(nums) // 2
for i in range(len(nums) - 1, -1, -1):
nextDP = set()
for t in dp:
if (t + nums[i]) == target:
return True
nextDP.add(t + nums[i])
nextDP.add(t)
dp = nextDP
return False
|
Solution
|
python
|
getsentry__sentry
|
tests/sentry/api/endpoints/test_frontend_version.py
|
{
"start": 115,
"end": 1100
}
|
class ____(APITestCase):
def test_returns_frontend_commit_sha(self) -> None:
url = reverse("sentry-api-0-internal-frontend-version")
with patch("sentry.api.endpoints.frontend_version.get_frontend_commit_sha") as mock_get_sha:
mock_get_sha.return_value = "abc123def456"
response = self.client.get(url)
assert response.status_code == 200
assert response.data == {"version": "abc123def456"}
mock_get_sha.assert_called_once()
def test_returns_none_when_no_commit_sha(self) -> None:
url = reverse("sentry-api-0-internal-frontend-version")
with patch("sentry.api.endpoints.frontend_version.get_frontend_commit_sha") as mock_get_sha:
mock_get_sha.return_value = None
response = self.client.get(url)
assert response.status_code == 200
assert response.data == {"version": None}
mock_get_sha.assert_called_once()
|
FrontendVersionTest
|
python
|
pytorch__pytorch
|
test/inductor/test_flex_attention.py
|
{
"start": 11337,
"end": 150808
}
|
class ____(InductorTestCase):
def setUp(self):
super().setUp()
skipCPUIf(
LONG_COMPILATION_ON_CPU,
"skip UT for CPU due to long compilation time found in CI",
)
def _check_equal(
self,
golden_out: torch.Tensor,
ref_out: torch.Tensor,
compiled_out: torch.Tensor,
fudge_factor: float,
tensor_name: Optional[str] = None,
fudge_atol: float = 0,
):
compiled_error = (golden_out - compiled_out).abs().mean()
ref_error = (golden_out - ref_out).abs().mean()
if torch.isnan(compiled_error).any() or torch.isnan(ref_error).any():
self.fail("Output/Grad with NaN")
name = tensor_name if tensor_name is not None else ""
msg = f"{name} Compiled error {compiled_error} is greater than ref error {ref_error} by more than {fudge_factor}X."
torch.testing.assert_close(
compiled_error, ref_error, rtol=fudge_factor, atol=1e-7, msg=msg
)
def _check_out(
self,
golden_out: torch.Tensor,
ref_out: torch.Tensor,
compiled_out: torch.Tensor,
is_paged_attention: bool = False,
):
dtype = ref_out.dtype
with torch.no_grad():
# Note, it seems like we really are less accurate than the float32
# computation, likely due to the online softmax
if dtype == torch.float32:
fudge_factor = 10.0
if is_paged_attention:
# paged attention is less accurate since it may reorder
# the blocks from block mask
fudge_factor = 20.0
else:
fudge_factor = 1.1
# Checkout output
self._check_equal(golden_out, ref_out, compiled_out, fudge_factor, "Out")
def _check_out_and_grad(
self,
golden_out: torch.Tensor,
ref_out: torch.Tensor,
compiled_out: torch.Tensor,
q_gold: torch.Tensor,
q_ref: torch.Tensor,
q: torch.Tensor,
k_gold: torch.Tensor,
k_ref: torch.Tensor,
k: torch.Tensor,
v_gold: torch.Tensor,
v_ref: torch.Tensor,
v: torch.Tensor,
):
dtype = ref_out.dtype
with torch.no_grad():
# Note, it seems like we really are less accurate than the float32
# computation, likely due to the online softmax
if dtype == torch.float32:
fudge_factor = 10.0
else:
fudge_factor = 1.1
# Checkout output
self._check_equal(golden_out, ref_out, compiled_out, fudge_factor, "Out")
# Check gradients
q_fudge_factor = 1.0 * fudge_factor
self._check_equal(
q_gold.grad, q_ref.grad, q.grad, q_fudge_factor, "Grad_Query"
)
k_fudge_factor = 1.0 * fudge_factor
self._check_equal(
k_gold.grad, k_ref.grad, k.grad, k_fudge_factor, "Grad_Key"
)
v_fudge_factor = 1.0 * fudge_factor
self._check_equal(
v_gold.grad, v_ref.grad, v.grad, v_fudge_factor, "Grad_Value"
)
def run_test(
self,
score_mod: _score_mod_signature,
dtype: torch.dtype,
device: str,
Q_B: int = B,
Q_H: int = H,
Q_S: int = S,
Q_D: int = D,
KV_B: Optional[int] = None,
KV_H: Optional[int] = None,
KV_S: Optional[int] = None,
V_D: Optional[int] = None,
block_mask: Optional[BlockMask] = None,
):
requires_grad = device in DEVICE_SUPPORTS_BACKWARDS
if KV_B is None:
KV_B = Q_B
if KV_H is None:
KV_H = Q_H
if KV_S is None:
KV_S = Q_S
if V_D is None:
V_D = Q_D
if device == "cpu" and dtype is torch.float16:
dtype = torch.float32
requires_grad = device in DEVICE_SUPPORTS_BACKWARDS
q = torch.randn(
(Q_B, Q_H, Q_S, Q_D),
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
k = torch.randn(
(KV_B, KV_H, KV_S, Q_D),
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
v = torch.randn(
(KV_B, KV_H, KV_S, V_D),
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
if block_mask is None:
block_mask = create_block_mask(
noop_mask, Q_B, Q_H, Q_S, KV_S, device=device
)
q_ref, k_ref, v_ref = query_key_value_clones(q, k, v)
q_gold, k_gold, v_gold = query_key_value_clones(q, k, v, torch.float64)
sdpa_partial = create_attention(score_mod, block_mask, enable_gqa=(Q_H != KV_H))
compiled_sdpa = torch.compile(sdpa_partial)
golden_out = sdpa_partial(q_gold, k_gold, v_gold)
ref_out = sdpa_partial(q_ref, k_ref, v_ref)
compiled_out = compiled_sdpa(q, k, v)
assert isinstance(golden_out, torch.Tensor)
assert isinstance(ref_out, torch.Tensor)
assert isinstance(compiled_out, torch.Tensor)
if not requires_grad:
self._check_out(
golden_out,
ref_out,
compiled_out,
is_paged_attention=False,
)
else:
backward_grad = torch.randn(
(Q_B, Q_H, Q_S, V_D), dtype=dtype, device=device
)
golden_out.backward(backward_grad.to(torch.float64))
ref_out.backward(backward_grad)
compiled_out.backward(backward_grad)
self._check_out_and_grad(
golden_out,
ref_out,
compiled_out,
q_gold,
q_ref,
q,
k_gold,
k_ref,
k,
v_gold,
v_ref,
v,
)
def preprocess_paged_attention(
self,
score_mod: Optional[Callable],
q: Tensor,
k: Tensor,
v: Tensor,
block_mask,
dtype: torch.dtype,
device: str,
page_size: int = 128,
) -> tuple[Tensor, Tensor, BlockMask, _score_mod_signature]:
assert block_mask is not None, "Must provide block_mask"
Q_B, Q_H, Q_S, _ = q.shape
KV_B, KV_H, KV_S, QK_D = k.shape
_, _, _, V_D = v.shape
# test with different batch size
max_batch_size = max(Q_B, KV_B) + 3
n_pages = (KV_S + page_size - 1) // page_size * max_batch_size
# allocate cache
MAX_CACHED_SEQ_LEN = n_pages * page_size
k_cache = torch.zeros(
1,
KV_H,
MAX_CACHED_SEQ_LEN,
QK_D,
device=device,
dtype=dtype,
)
v_cache = torch.zeros(
1,
KV_H,
MAX_CACHED_SEQ_LEN,
V_D,
device=device,
dtype=dtype,
)
# For testing purposes, we randomly initialize the page table, which maps
# (batch_idx, logical_block_idx) to physical_block_idx. Specifically, PagedAttention
# maintains a stack empty_pages of unused physical_block_idx. The `batch_reserve`
# function grabs physical_block_idx from the top of empty_pages until there are enough
# pages for each batch index (i.e., num pages for batch_idx >= target_seq_len[batch_idx]).
# For example, at the first batch_reserve call, physical block indices (1,...,KV_S//4)
# are allocated to batch index 0, and physical block indices
# (KV_S//4+1, ..., KV_S//4 + KV_S//2) are allocated to batch index 1, etc.
# Thus, kv tensors of batch index 1 will be scattered in the kv cache, simulating
# a real use case of paged attention.
paged_attention = PagedAttention(
n_pages, page_size, max_batch_size, device=device
)
batch_reserve(
paged_attention,
torch.tensor([KV_S // 4, KV_S // 2, KV_S // 4, KV_S // 3], device=device),
)
batch_reserve(
paged_attention,
torch.tensor([KV_S // 4, KV_S // 2, KV_S // 2, KV_S // 2], device=device),
)
batch_reserve(
paged_attention,
torch.tensor([KV_S // 2, KV_S, KV_S // 2, KV_S], device=device),
)
batch_reserve(
paged_attention, torch.tensor([KV_S, KV_S, KV_S, KV_S], device=device)
)
# update cache with k and v
input_pos = (
torch.arange(KV_S, device=device, dtype=torch.int32)
.unsqueeze(0)
.expand(KV_B, KV_S)
)
batch_idx = torch.arange(KV_B, device=device, dtype=torch.int32)
paged_attention.assign(batch_idx, input_pos, k, v, k_cache, v_cache)
# convert block mask and score mod
kv_len_tensor = torch.full((KV_B,), KV_S, device=device, dtype=torch.int64)
converted_block_mask = paged_attention.convert_logical_block_mask(
block_mask, kv_len=kv_len_tensor
)
converted_score_mod = paged_attention.get_score_mod(
score_mod, kv_len=kv_len_tensor
)
return k_cache, v_cache, converted_block_mask, converted_score_mod
def run_paged_attention(
self,
score_mod: Optional[Callable],
q: Tensor,
k: Tensor,
v: Tensor,
dtype: torch.dtype,
device: str,
block_mask: Optional[BlockMask] = None,
kernel_options: Optional[dict] = None,
) -> tuple[Tensor, Tensor]:
B, Q_H, Q_S, KV_H, KV_S = (
q.shape[0],
q.shape[1],
q.shape[2],
k.shape[1],
k.shape[2],
)
if block_mask is None:
block_mask = create_block_mask(noop_mask, B, 1, Q_S, KV_S, device=device)
(
k_cache,
v_cache,
converted_block_mask,
converted_score_mod,
) = self.preprocess_paged_attention(
score_mod, q, k, v, block_mask, dtype, device, block_mask.BLOCK_SIZE[1]
)
compiled_sdpa = torch.compile(flex_attention)
# compute
return_lse = True
requires_grad = device in DEVICE_SUPPORTS_BACKWARDS
if requires_grad:
compiled_out, compiled_lse = compiled_sdpa(
q,
k_cache,
v_cache,
return_lse=return_lse,
block_mask=converted_block_mask,
score_mod=converted_score_mod,
enable_gqa=(Q_H != KV_H),
kernel_options=kernel_options,
)
else:
return_lse = False
compiled_lse = None
compiled_out = compiled_sdpa(
q,
k_cache,
v_cache,
return_lse=return_lse,
block_mask=converted_block_mask,
score_mod=converted_score_mod,
enable_gqa=(Q_H != KV_H),
kernel_options=kernel_options,
)
return compiled_out, compiled_lse
def run_test_with_paged_attention(
self,
score_mod: Optional[Callable],
dtype: torch.dtype,
device,
Q_B: int = B,
Q_H: int = H,
Q_S: int = S,
QK_D: int = D,
KV_B: int = B,
KV_H: int = H,
KV_S: int = S,
V_D: int = D,
block_mask: Optional[BlockMask] = None,
):
assert Q_H % KV_H == 0
if device == "cpu" and dtype is torch.float16:
dtype = torch.float32
q = torch.randn(
(Q_B, Q_H, Q_S, QK_D), dtype=dtype, device=device, requires_grad=False
)
k = torch.randn(
(KV_B, KV_H, KV_S, QK_D),
dtype=dtype,
device=device,
requires_grad=False,
)
v = torch.randn(
(KV_B, KV_H, KV_S, V_D),
dtype=dtype,
device=device,
requires_grad=False,
)
q_ref, k_ref, v_ref = query_key_value_clones(q, k, v)
q_gold, k_gold, v_gold = query_key_value_clones(q, k, v, torch.float64)
if block_mask is None:
block_mask = create_block_mask(noop_mask, Q_B, 1, Q_S, KV_S, device=device)
sdpa_partial = create_attention(score_mod, block_mask, enable_gqa=(Q_H != KV_H))
golden_out, golden_lse = sdpa_partial(q_gold, k_gold, v_gold, return_lse=True)
ref_out, ref_lse = sdpa_partial(q_ref, k_ref, v_ref, return_lse=True)
compiled_out, compiled_lse = self.run_paged_attention(
score_mod, q, k, v, dtype, device, block_mask
)
self._check_out(
golden_out,
ref_out,
compiled_out,
is_paged_attention=True,
)
requires_grad = device in DEVICE_SUPPORTS_BACKWARDS
if requires_grad:
self._check_out(
golden_lse,
ref_lse,
compiled_lse,
is_paged_attention=True,
)
def run_test_with_call(
self,
sdpa_call: Callable,
dtype: torch.dtype,
device: str,
Q_B: int = B,
Q_H: int = H,
Q_S: int = S,
Q_D: int = D,
KV_B: int = B,
KV_H: int = H,
KV_S: int = S,
V_D: int = D,
):
if device == "cpu" and dtype is torch.float16:
dtype = torch.float32
requires_grad = device in DEVICE_SUPPORTS_BACKWARDS
q = torch.randn(
(Q_B, Q_H, Q_S, Q_D),
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
k = torch.randn(
(KV_B, KV_H, KV_S, Q_D),
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
v = torch.randn(
(KV_B, KV_H, KV_S, V_D),
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
q_ref, k_ref, v_ref = query_key_value_clones(q, k, v)
q_gold, k_gold, v_gold = query_key_value_clones(q, k, v, torch.float64)
compiled_sdpa = torch.compile(sdpa_call)
golden_out = sdpa_call(q_gold, k_gold, v_gold)
ref_out = sdpa_call(q_ref, k_ref, v_ref)
compiled_out = compiled_sdpa(q, k, v)
if not requires_grad:
self._check_out(
golden_out,
ref_out,
compiled_out,
is_paged_attention=False,
)
else:
backward_grad = torch.randn(
(Q_B, Q_H, Q_S, V_D), dtype=dtype, device=device
)
golden_out.backward(backward_grad.to(torch.float64))
ref_out.backward(backward_grad)
compiled_out.backward(backward_grad)
self._check_out_and_grad(
golden_out,
ref_out,
compiled_out,
q_gold,
q_ref,
q,
k_gold,
k_ref,
k,
v_gold,
v_ref,
v,
)
def run_dynamic_test(
self,
score_mask_mod: tuple[Callable, Callable],
dtype: torch.dtype,
device,
B: int = B,
H: int = H,
S: int = S,
D: int = D,
):
if device == "cpu" and dtype is torch.float16:
dtype = torch.float32
score_mod, mask_mod = score_mask_mod
# First batch with original dimensions (B, H, S, D)
block_mask1 = create_block_mask(mask_mod, 1, 1, S, S, device=device)
sdpa_partial1 = create_attention(score_mod, block_mask=block_mask1)
requires_grad = device in DEVICE_SUPPORTS_BACKWARDS
q1 = torch.randn(
(B, H, S, D),
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
k1 = torch.randn(
(B, H, S, D),
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
v1 = torch.randn(
(B, H, S, D),
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
q1_ref, k1_ref, v1_ref = query_key_value_clones(q1, k1, v1)
q1_gold, k1_gold, v1_gold = query_key_value_clones(q1, k1, v1, torch.float64)
ref_out1 = sdpa_partial1(q1_ref, k1_ref, v1_ref)
golden_out1 = sdpa_partial1(q1_gold, k1_gold, v1_gold)
if requires_grad:
backward_grad1 = torch.randn((B, H, S, D), dtype=dtype, device=device)
golden_out1.backward(backward_grad1.to(torch.float64))
ref_out1.backward(backward_grad1)
# Second batch with modified dimensions (B * 2, H, S / 2, D)
B = int(B * 2)
S = int(S / 2)
block_mask2 = create_block_mask(mask_mod, 1, 1, S, S, device=device)
sdpa_partial2 = create_attention(score_mod, block_mask=block_mask2)
q2 = torch.randn(
(B, H, S, D),
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
k2 = torch.randn(
(B, H, S, D),
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
v2 = torch.randn(
(B, H, S, D),
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
q2_ref, k2_ref, v2_ref = query_key_value_clones(q2, k2, v2)
q2_gold, k2_gold, v2_gold = query_key_value_clones(q2, k2, v2, torch.float64)
ref_out2 = sdpa_partial2(q2_ref, k2_ref, v2_ref)
golden_out2 = sdpa_partial2(q2_gold, k2_gold, v2_gold)
if requires_grad:
backward_grad2 = torch.randn((B, H, S, D), dtype=dtype, device=device)
golden_out2.backward(backward_grad2.to(torch.float64))
ref_out2.backward(backward_grad2)
# Third batch with modified dimensions (B * 2, H, S / 4, D)
S = int(S / 2)
block_mask3 = create_block_mask(mask_mod, 1, 1, S, S, device=device)
sdpa_partial3 = create_attention(score_mod, block_mask=block_mask3)
q3 = torch.randn(
(B, H, S, D),
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
k3 = torch.randn(
(B, H, S, D),
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
v3 = torch.randn(
(B, H, S, D),
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
q3_ref, k3_ref, v3_ref = query_key_value_clones(q3, k3, v3)
q3_gold, k3_gold, v3_gold = query_key_value_clones(q3, k3, v3, torch.float64)
ref_out3 = sdpa_partial3(q3_ref, k3_ref, v3_ref)
golden_out3 = sdpa_partial3(q3_gold, k3_gold, v3_gold)
if requires_grad:
backward_grad3 = torch.randn((B, H, S, D), dtype=dtype, device=device)
golden_out3.backward(backward_grad3.to(torch.float64))
ref_out3.backward(backward_grad3)
# Clear dynamo counters
torch._dynamo.reset()
# First compilation with original dimensions
backend = torch._dynamo.testing.CompileCounterWithBackend("inductor")
compiled_sdpa1 = torch.compile(sdpa_partial1, backend=backend, dynamic=True)
compiled_out1 = compiled_sdpa1(q1, k1, v1)
if requires_grad:
compiled_out1.backward(backward_grad1)
self._check_out_and_grad(
golden_out1,
ref_out1,
compiled_out1,
q1_gold,
q1_ref,
q1,
k1_gold,
k1_ref,
k1,
v1_gold,
v1_ref,
v1,
)
else:
self._check_out(golden_out1, ref_out1, compiled_out1)
self.assertEqual(backend.frame_count, 1)
# Second compilation with new dimensions
compiled_sdpa2 = torch.compile(sdpa_partial2, backend=backend, dynamic=True)
compiled_out2 = compiled_sdpa2(q2, k2, v2)
if requires_grad:
compiled_out2.backward(backward_grad2)
self._check_out_and_grad(
golden_out2,
ref_out2,
compiled_out2,
q2_gold,
q2_ref,
q2,
k2_gold,
k2_ref,
k2,
v2_gold,
v2_ref,
v2,
)
else:
self._check_out(golden_out2, ref_out2, compiled_out2)
self.assertEqual(backend.frame_count, 1)
# Third compilation with new dimensions
compiled_sdpa3 = torch.compile(sdpa_partial3, backend=backend, dynamic=True)
compiled_out3 = compiled_sdpa3(q3, k3, v3)
if requires_grad:
compiled_out3.backward(backward_grad3)
self._check_out_and_grad(
golden_out3,
ref_out3,
compiled_out3,
q3_gold,
q3_ref,
q3,
k3_gold,
k3_ref,
k3,
v3_gold,
v3_ref,
v3,
)
else:
self._check_out(golden_out3, ref_out3, compiled_out3)
self.assertEqual(backend.frame_count, 1)
def run_automatic_dynamic_test(
self,
score_mod: Callable,
dtype: torch.dtype,
device: str,
B: int = B,
H: int = H,
S: int = S,
D: int = D,
):
if device == "cpu" and dtype is torch.float16:
dtype = torch.float32
block_mask1 = create_block_mask(noop_mask, 1, 1, S, S, device=device)
sdpa_partial1 = create_attention(score_mod, block_mask=block_mask1)
# The first eager batch, shape (B, H, S, D)
requires_grad = device in DEVICE_SUPPORTS_BACKWARDS
q1 = torch.randn(
(B, H, S, D),
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
k1 = torch.randn(
(B, H, S, D),
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
v1 = torch.randn(
(B, H, S, D),
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
golden_out1 = sdpa_partial1(
q1.to(torch.float64), k1.to(torch.float64), v1.to(torch.float64)
)
ref_out1 = sdpa_partial1(q1, k1, v1)
# The second eager batch, shape (B * 2, H, S / 2, D)
B = int(B * 2)
S = int(S / 2)
block_mask2 = create_block_mask(noop_mask, 1, 1, S, S, device=device)
sdpa_partial2 = create_attention(score_mod, block_mask=block_mask2)
q2 = torch.randn(
(B, H, S, D),
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
k2 = torch.randn(
(B, H, S, D),
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
v2 = torch.randn(
(B, H, S, D),
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
golden_out2 = sdpa_partial2(
q2.to(torch.float64), k2.to(torch.float64), v2.to(torch.float64)
)
ref_out2 = sdpa_partial2(q2, k2, v2)
# The third eager batch, shape (B * 4, H, S / 4, D)
B = int(B * 2)
S = int(S / 2)
block_mask3 = create_block_mask(noop_mask, 1, 1, S, S, device=device)
sdpa_partial3 = create_attention(score_mod, block_mask=block_mask3)
q3 = torch.randn(
(B, H, S, D),
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
k3 = torch.randn(
(B, H, S, D),
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
v3 = torch.randn(
(B, H, S, D),
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
golden_out3 = sdpa_partial3(
q3.to(torch.float64), k3.to(torch.float64), v3.to(torch.float64)
)
ref_out3 = sdpa_partial3(q3, k3, v3)
# Need to clear dynamo counters, since flex attention eager mode also uses dynamo tracing.
# We check dynamo counters["frames"]["ok"] to ensure:
# 1, the first batch is compiled with static shape
# 2, the second batch is compiled with dynamic shape
# 3, no re-compilation in the third batch
torch._dynamo.reset()
# Note, it seems like we really are less accurate than the float32
# computation, likely due to the online softmax
if dtype == torch.float32:
fudge_factor = 10.0
else:
fudge_factor = 1.1
# The first batch.
backend = torch._dynamo.testing.CompileCounterWithBackend("inductor")
compiled_out1 = torch.compile(sdpa_partial1, backend=backend, fullgraph=True)(
q1, k1, v1
)
self._check_equal(golden_out1, ref_out1, compiled_out1, fudge_factor)
self.assertEqual(backend.frame_count, 1)
# The second batch (automatic dynamic).
compiled_out2 = torch.compile(sdpa_partial2, backend=backend, fullgraph=True)(
q2, k2, v2
)
self._check_equal(golden_out2, ref_out2, compiled_out2, fudge_factor)
self.assertEqual(backend.frame_count, 2)
# The third batch (no re-compilation).
compiled_out3 = torch.compile(sdpa_partial3, backend=backend, fullgraph=True)(
q3, k3, v3
)
self._check_equal(golden_out3, ref_out3, compiled_out3, fudge_factor)
self.assertEqual(backend.frame_count, 2)
@supported_platform
@dtypes(*device_configs["cpu"].dtypes)
@dtypesIfCUDA(*device_configs["cuda"].dtypes)
@dtypesIfXPU(*device_configs["xpu"].dtypes)
@common_utils.parametrize("score_mod", test_score_mods)
def test_builtin_score_mods(self, device, dtype, score_mod: Callable):
self.run_test(score_mod, dtype, device=device)
self.run_test_with_paged_attention(score_mod, dtype, device=device)
@running_on_a100_only
@common_utils.parametrize("score_mod", test_score_mods)
@dtypes(*device_configs["cpu"].dtypes_fast)
@dtypesIfCUDA(*device_configs["cuda"].dtypes_fast)
@dtypesIfXPU(*device_configs["xpu"].dtypes_fast)
def test_builtin_score_mods_seqlen_lt_default_sparse_block_size(
self, device, dtype, score_mod: Callable
):
# _DEFAULT_SPARSE_BLOCK_SIZE is 128
attention = functools.partial(
flex_attention,
score_mod=score_mod,
kernel_options={"FORCE_USE_FLEX_ATTENTION": True},
)
self.run_test_with_call(attention, dtype, device, B, H, 64, D, B, H, 64, D)
@running_on_a100_only
@dtypes(*device_configs["cpu"].dtypes_fast)
@dtypesIfCUDA(*device_configs["cuda"].dtypes_fast)
@dtypesIfXPU(*device_configs["xpu"].dtypes_fast)
@common_utils.parametrize("score_mod", test_score_mods)
def test_builtin_score_mods_seqlen_lt_custom_sparse_block_size(
self, device, dtype: torch.dtype, score_mod: Callable
):
def causal_mask(b, h, q, kv):
return q >= kv
block_mask = create_block_mask(
causal_mask, 1, 1, 64, 64, BLOCK_SIZE=256, device=device
)
attention = functools.partial(
flex_attention,
score_mod=score_mod,
block_mask=block_mask,
kernel_options={"FORCE_USE_FLEX_ATTENTION": True},
)
self.run_test_with_call(
attention,
dtype,
device,
B,
H,
64,
D,
B,
H,
64,
D,
)
@supported_platform
@dtypes(*device_configs["cpu"].dtypes_fast)
@dtypesIfCUDA(*device_configs["cuda"].dtypes_fast)
@dtypesIfXPU(*device_configs["xpu"].dtypes_fast)
@common_utils.parametrize("score_mask_mod", test_score_mask_mod_map.items())
def test_builtin_score_mods_dynamic(
self, device, dtype: torch.dtype, score_mask_mod: tuple[Callable, Callable]
):
self.run_dynamic_test(score_mask_mod, dtype, S=1024, device=device)
@supported_platform
@dtypes(*device_configs["cpu"].dtypes_fast)
@dtypesIfCUDA(*device_configs["cuda"].dtypes_fast)
@dtypesIfXPU(*device_configs["xpu"].dtypes_fast)
@common_utils.parametrize("score_mod", test_score_mods)
def test_builtin_score_mods_automatic_dynamic(
self, device, dtype: torch.dtype, score_mod: Callable
):
self.run_automatic_dynamic_test(score_mod, dtype, S=1024, device=device)
@supported_platform
@dtypes(*device_configs["cpu"].dtypes_fast)
@dtypesIfCUDA(*device_configs["cuda"].dtypes_fast)
@dtypesIfXPU(*device_configs["xpu"].dtypes_fast)
@common_utils.parametrize("score_mod", test_score_mods)
def test_builtin_score_mods_different_seqlen(
self, device, dtype: torch.dtype, score_mod: Callable
):
inputs = (
score_mod,
dtype,
device,
B,
H,
S // 2, # Seqlen of Q is different from seqlen of K/V
D,
B,
H,
S,
D,
)
self.run_test(*inputs)
self.run_test_with_paged_attention(*inputs)
@supported_platform
@dtypes(*device_configs["cpu"].dtypes)
@dtypesIfCUDA(*device_configs["cuda"].dtypes)
@dtypesIfXPU(*device_configs["xpu"].dtypes)
@common_utils.parametrize("score_mod", test_score_mods)
@common_utils.parametrize("BLOCK_SIZE", test_block_size)
def test_builtin_score_mods_different_block_size(
self,
device,
dtype: torch.dtype,
score_mod: Callable,
BLOCK_SIZE: Union[int, tuple[int, int]],
):
block_mask = create_block_mask(
noop_mask, B, H, S, S, BLOCK_SIZE=BLOCK_SIZE, device=device
)
self.run_test(score_mod, dtype, block_mask=block_mask, device=device)
self.run_test_with_paged_attention(
score_mod, dtype, block_mask=block_mask, device=device
)
@supported_platform
@dtypes(*device_configs["cpu"].dtypes_fast)
@dtypesIfCUDA(*device_configs["cuda"].dtypes_fast)
@dtypesIfXPU(*device_configs["xpu"].dtypes_fast)
@common_utils.parametrize("batch_dims", test_Bq_Bkv)
@common_utils.parametrize("head_dims", test_Hq_Hkv)
@common_utils.parametrize("score_mod", test_score_mods)
def test_kv_batch_broadcast(
self,
device,
dtype: torch.dtype,
batch_dims: tuple[int, int],
head_dims: tuple[int, int],
score_mod: Callable,
):
Hq, Hkv = head_dims
assert Hq % Hkv == 0
Bq, Bkv = batch_dims
assert Bq > 1 and Bkv == 1
block_mask = create_block_mask(noop_mask, Bq, 1, S, S, device=device)
self.run_test(
score_mod, dtype, device, Bq, Hq, S, D, Bkv, Hkv, S, D, block_mask
)
@supported_platform
@skip_on_cpu
def test_small_block_mask(self, device):
compiled_create_block_mask = torch.compile(create_block_mask)
def create_block_mask_from_seqlens(
q_batch: torch.Tensor,
kv_batch: torch.Tensor,
) -> BlockMask:
B, H = None, None
Q_LEN = q_batch.size(0)
KV_LEN = kv_batch.size(0)
def batch_mask_mod(
b: torch.Tensor,
h: torch.Tensor,
q_idx: torch.Tensor,
kv_idx: torch.Tensor,
):
q_idx_batch = q_batch[q_idx]
kv_idx_batch = kv_batch[kv_idx]
batch_mask = (
(q_idx_batch == kv_idx_batch)
& (q_idx_batch != -1)
& (kv_idx_batch != -1)
)
return batch_mask
return compiled_create_block_mask(
batch_mask_mod,
B=B,
H=H,
Q_LEN=Q_LEN,
KV_LEN=KV_LEN,
device=device,
)
a = torch.tensor([2, 42, 18, 21, 4, 2, 7, 1, 1], device=device)
b = torch.tensor([57, 21, 16, 8], device=device)
for seqlen in [a, b]:
create_block_mask_from_seqlens(seqlen, seqlen)
@supported_platform
@dtypes(*device_configs["cpu"].dtypes_fast)
@dtypesIfCUDA(*device_configs["cuda"].dtypes_fast)
@dtypesIfXPU(*device_configs["xpu"].dtypes_fast)
@common_utils.parametrize("batch_dims", test_Bq_Bkv)
@common_utils.parametrize("head_dims", test_Hq_Hkv)
@common_utils.parametrize("score_mod", test_score_mods)
def test_kv_batch_broadcast_causal_mask(
self,
device,
dtype: torch.dtype,
batch_dims: tuple[int, int],
head_dims: tuple[int, int],
score_mod: Callable,
):
Hq, Hkv = head_dims
assert Hq % Hkv == 0
Bq, Bkv = batch_dims
assert Bq > 1 and Bkv == 1
def mask_mod(b, h, q, kv):
return q >= kv
block_mask = create_block_mask(mask_mod, Bq, 1, S, S, device=device)
attention = functools.partial(
flex_attention, block_mask=block_mask, enable_gqa=(Hq != Hkv)
)
self.run_test_with_call(attention, dtype, device, Bq, Hq, S, D, Bkv, Hkv, S, D)
@supported_platform
@dtypes(*device_configs["cpu"].dtypes_fast)
@dtypesIfCUDA(*device_configs["cuda"].dtypes_fast)
@dtypesIfXPU(*device_configs["xpu"].dtypes_fast)
@common_utils.parametrize("score_mod", test_score_mods)
@skip_on_rocm # TODO: NaNs on ROCM
@skip_on_xpu # TODO: NaNs on XPU like ROCM, need another PR to fix.
def test_GQA(self, device, dtype: torch.dtype, score_mod: Callable):
inputs = (
score_mod,
dtype,
device,
B,
H * 4, # Hq = 4*Hkv.
S // 8,
D,
B,
H,
S,
D,
)
self.run_test(*inputs)
self.run_test_with_paged_attention(*inputs)
@supported_platform
@dtypes(*device_configs["cpu"].dtypes_fast)
@dtypesIfCUDA(*device_configs["cuda"].dtypes_fast)
@dtypesIfXPU(*device_configs["xpu"].dtypes_fast)
@common_utils.parametrize(
"q_s", test_strides[:2]
) # TODO: fix layout for query braodcasting
@common_utils.parametrize(
"k_s,v_s",
[
(test_strides[0], test_strides[0]),
(test_strides[0], test_strides[1]),
(test_strides[2], test_strides[3]),
(test_strides[3], test_strides[1]),
# (test_strides[2], test_strides[4]), # TODO: Doesn't work for
# broadcasting reasons i think
],
)
@common_utils.parametrize("do_s", test_strides[:3])
def test_strided_inputs(self, device, dtype: torch.dtype, q_s, k_s, v_s, do_s):
q1 = torch.randn((B * H * S * D * 2), dtype=dtype, device=device)
k1 = torch.randn((B * H * S * D * 2), dtype=dtype, device=device)
v1 = torch.randn((B * H * S * D * 2), dtype=dtype, device=device)
do1 = torch.randn((B * H * S * D * 2), dtype=dtype, device=device)
q_shape = (B, H, S // 2, D)
k_shape = (B, H, S, D)
v_shape = (B, H, S, D)
do_shape = (B, H, S // 2, D)
requires_grad = device in DEVICE_SUPPORTS_BACKWARDS
def coerce_to_strides(val, shape, strides):
strides, offset = strides
val_max = [x * (y - 1) for x, y in zip(strides, shape)]
assert sum(val_max) + offset < B * H * S * D * 2
assert strides[-1] == 1
return torch.as_strided(val, shape, strides, offset).requires_grad_(
requires_grad
)
q = coerce_to_strides(q1, q_shape, q_s)
k = coerce_to_strides(k1, k_shape, k_s)
v = coerce_to_strides(v1, v_shape, v_s)
do = coerce_to_strides(do1, do_shape, do_s)
kernel_options = {"USE_TMA": True}
block_mask = _create_empty_block_mask(q, k)
score_mod = _generate_alibi_bias(8)
sdpa_partial = create_attention(
score_mod=score_mod, block_mask=block_mask, kernel_options=kernel_options
)
compiled_sdpa = torch.compile(sdpa_partial, fullgraph=True)
ref_out = sdpa_partial(q, k, v)
compiled_out = compiled_sdpa(q, k, v)
tolerance = Tolerances(atol=2e-1, rtol=2e-1)
torch.testing.assert_close(
ref_out, compiled_out, atol=tolerance.atol, rtol=tolerance.rtol
)
if requires_grad:
ref_out.backward(do)
ref_grads = [q.grad, k.grad, v.grad]
q.grad = None
k.grad = None
v.grad = None
compiled_out.backward(do)
compiled_grads = [q.grad, k.grad, v.grad]
q.grad = None
k.grad = None
v.grad = None
torch.testing.assert_close(
compiled_grads[0],
ref_grads[0],
atol=tolerance.atol,
rtol=tolerance.rtol,
)
torch.testing.assert_close(
compiled_grads[1],
ref_grads[1],
atol=tolerance.atol,
rtol=tolerance.rtol,
)
torch.testing.assert_close(
compiled_grads[2],
ref_grads[2],
atol=tolerance.atol,
rtol=tolerance.rtol,
)
# test paged attention which does not support backward
q.requires_grad, k.requires_grad, v.requires_grad = False, False, False
paged_compiled_out, _ = self.run_paged_attention(
score_mod, q, k, v, dtype, device=device, kernel_options=kernel_options
)
torch.testing.assert_close(
ref_out, paged_compiled_out, atol=tolerance.atol, rtol=tolerance.rtol
)
@supported_platform
def test_doc_mask_sparse(self, device):
document_id = torch.zeros(S, dtype=torch.int, device=device)
for i in range(0, S, 256):
document_id[i : i + 256] = i // 256
def document_masking_causal(score, b, h, q_idx, kv_idx):
causal_mask = q_idx >= kv_idx
document_mask = document_id[q_idx] == document_id[kv_idx]
return torch.where(causal_mask & document_mask, score, -float("inf"))
self.run_test(document_masking_causal, torch.float16, device=device)
self.run_test_with_paged_attention(
document_masking_causal, torch.float16, device=device
)
@supported_platform
def test_index_multiple(self, device):
bias = torch.randn(B, S, device=device)
def index_multiple(score, b, h, q_idx, kv_idx):
return score + bias[b][q_idx]
self.run_test(index_multiple, torch.float16, device=device)
self.run_test_with_paged_attention(index_multiple, torch.float16, device=device)
@supported_platform
def test_index_weird1(self, device):
bias = torch.randn(4, B, H, S, device=device)
def index_weird1(score, b, h, q_idx, kv_idx):
return score + bias[0][b, h][q_idx]
self.run_test(index_weird1, torch.float16, device=device)
self.run_test_with_paged_attention(index_weird1, torch.float16, device=device)
@supported_platform
def test_index_weird2(self, device):
bias = torch.randn(B, H, 4, S, device=device)
which_bias = torch.tensor(0, device=device)
def index_weird2(score, b, h, q_idx, kv_idx):
return score + bias[b][h][which_bias, q_idx]
self.run_test(index_weird2, torch.float16, device=device)
self.run_test_with_paged_attention(index_weird2, torch.float16, device=device)
@supported_platform
@dtypes(*device_configs["cpu"].dtypes)
@dtypesIfCUDA(*device_configs["cuda"].dtypes)
@dtypesIfXPU(*device_configs["xpu"].dtypes)
def test_skip_odd_keys(self, device, dtype: torch.dtype):
def score_mod(score, b, h, q, kv):
return torch.where(kv % 2 == 0, score, float("-inf"))
self.run_test(score_mod, dtype, device=device)
self.run_test_with_paged_attention(score_mod, dtype, device=device)
@supported_platform
@dtypes(*device_configs["cpu"].dtypes)
@dtypesIfCUDA(*device_configs["cuda"].dtypes)
@dtypesIfXPU(*device_configs["xpu"].dtypes)
def test_function_composition(self, device, dtype: torch.dtype):
def score_mod_1(score, b, h, m, n):
return score + (m - n)
def score_mod_2(score, b, h, m, n):
return torch.where(m <= n, score, float("-inf"))
def composed_score_mod(score, b, h, m, n):
return score_mod_2(score_mod_1(score, b, h, m, n), b, h, m, n)
self.run_test(composed_score_mod, dtype, device=device)
self.run_test_with_paged_attention(composed_score_mod, dtype, device=device)
@supported_platform
@dtypes(*device_configs["cpu"].dtypes)
@dtypesIfCUDA(*device_configs["cuda"].dtypes)
@dtypesIfXPU(*device_configs["xpu"].dtypes)
def test_captured_buffers_all_dims(self, device, dtype: torch.dtype):
head_scale = torch.randn(H, device=device)
batch_scale = torch.randn(B, device=device)
tok_scale = torch.randn(S, device=device)
def all_bias(score, batch, head, token_q, token_kv):
score = score + tok_scale[token_q]
score = score + batch_scale[batch]
score = score + head_scale[head]
return score
self.run_test(all_bias, dtype, device=device)
self.run_test_with_paged_attention(all_bias, dtype, device=device)
@supported_platform
@dtypes(*device_configs["cpu"].dtypes_fast)
@dtypesIfCUDA(*device_configs["cuda"].dtypes_fast)
@dtypesIfXPU(*device_configs["xpu"].dtypes_fast)
def test_seq_masking(self, device, dtype):
seq_idx = torch.zeros(S, device=device, dtype=torch.bool)
seq_idx[S // 2 :] = 1
def seq_mask_mod(score, b, h, q, kv):
return torch.where(seq_idx[q] == seq_idx[kv], score, float("-inf"))
self.run_test(seq_mask_mod, dtype, device=device)
self.run_test_with_paged_attention(seq_mask_mod, dtype, device=device)
@supported_platform
@dtypes(*device_configs["cpu"].dtypes_fast)
@dtypesIfCUDA(*device_configs["cuda"].dtypes_fast)
@dtypesIfXPU(*device_configs["xpu"].dtypes_fast)
def test_load_from_bias_seq_only(self, device, dtype):
bias = torch.randn(S, S, device=device, dtype=dtype)
def bias_mod(score, b, h, q, kv):
return score + bias[q, kv]
self.run_test(bias_mod, dtype, device=device)
self.run_test_with_paged_attention(bias_mod, dtype, device=device)
@supported_platform
@dtypes(*device_configs["cpu"].dtypes_fast)
@dtypesIfCUDA(*device_configs["cuda"].dtypes_fast)
@dtypesIfXPU(*device_configs["xpu"].dtypes_fast)
def test_load_from_bias_seq_batch(self, device, dtype):
bias = torch.randn(B, S, S, device=device, dtype=dtype)
def bias_mod(score, b, h, q, kv):
return score + bias[b, q, kv]
self.run_test(bias_mod, dtype, device=device)
self.run_test_with_paged_attention(bias_mod, dtype, device=device)
@supported_platform
@skip_on_cpu
def test_load_from_view_buffer(self, device):
dtype = torch.float16
W = 8
class SimpleAttention(torch.nn.Module):
def __init__(self):
super().__init__()
self.rel_pos_h = torch.randn(2 * H - 1, D, device=device, dtype=dtype)
def forward(self, q, k, v):
q = q.view(B * H, H * W, -1)
score_mod = self.generate_score_mod(q)
q = q.view(B, H, H * W, -1)
return flex_attention(q, k, v, score_mod=score_mod)
def generate_score_mod(self, q):
rel_h = self.add_decomposed_rel_pos(q)
rel_h = rel_h.view(
B, H, rel_h.size(1), rel_h.size(2), rel_h.size(3)
).squeeze(-1)
def score_mod(score, batch, head, q_idx, k_idx):
h_idx = k_idx // W
return score + rel_h[batch, head, q_idx, h_idx]
return score_mod
@torch.no_grad()
def add_decomposed_rel_pos(self, q):
q_coords = torch.arange(H, device=self.rel_pos_h.device)[:, None]
k_coords = torch.arange(H, device=self.rel_pos_h.device)[None, :]
relative_coords = (q_coords - k_coords) + (H - 1)
Rh = self.rel_pos_h[relative_coords.long()]
r_q = q.reshape(B * H, H, W, D)
rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh)
return rel_h.reshape(B * H, H * W, H, 1)
m = SimpleAttention().to(device).eval()
m = torch.compile(m, mode="max-autotune", fullgraph=True)
q = torch.randn(B, H, H * W, D, device=device, dtype=dtype, requires_grad=True)
k = torch.randn(B, H, H * W, D, device=device, dtype=dtype, requires_grad=True)
v = torch.randn(B, H, H * W, D, device=device, dtype=dtype, requires_grad=True)
out = m(q, k, v)
out.sum().backward()
@supported_platform
@dtypes(*device_configs["cpu"].dtypes_fast)
@dtypesIfCUDA(*device_configs["cuda"].dtypes_fast)
@dtypesIfXPU(*device_configs["xpu"].dtypes_fast)
def test_load_from_bias_head_seq_batch(self, device, dtype):
bias = torch.randn(B, H, S, S, device=device, dtype=dtype)
def bias_mod(score, b, h, q, kv):
return score + bias[b, h, q, kv]
self.run_test(bias_mod, dtype, device=device)
self.run_test_with_paged_attention(bias_mod, dtype, device=device)
@supported_platform
@dtypes(*device_configs["cpu"].dtypes_fast)
@dtypesIfCUDA(*device_configs["cuda"].dtypes_fast)
@dtypesIfXPU(*device_configs["xpu"].dtypes_fast)
def test_load_rel_bias(self, device, dtype):
rel_bias = torch.randn(2 * S, device=device, dtype=dtype)
def bias_mod(score, b, h, q, kv):
return score + rel_bias[(q - kv) + S]
self.run_test(bias_mod, dtype, device=device)
self.run_test_with_paged_attention(bias_mod, dtype, device=device)
@supported_platform
@dtypes(*device_configs["cpu"].dtypes_fast)
@dtypesIfCUDA(*device_configs["cuda"].dtypes_fast)
@dtypesIfXPU(*device_configs["xpu"].dtypes_fast)
def test_dependent_causal_bidirectional(self, device, dtype):
num_bidirectional = torch.randint(0, S, (B,), device=device, dtype=torch.int32)
def bias_mod(score, b, h, q, kv):
causal_attention = q >= kv
cur_num_bidirectional = num_bidirectional[b]
bidirectional_attention_on_video = (q <= cur_num_bidirectional) & (
kv <= cur_num_bidirectional
)
return torch.where(
bidirectional_attention_on_video | causal_attention,
score,
-float("inf"),
)
self.run_test(bias_mod, dtype, device=device)
self.run_test_with_paged_attention(bias_mod, dtype, device=device)
@supported_platform
@dtypes(*device_configs["cpu"].dtypes_fast)
@dtypesIfCUDA(*device_configs["cuda"].dtypes_fast)
@dtypesIfXPU(*device_configs["xpu"].dtypes_fast)
def test_natten_2d(self, device, dtype):
H = 32
W = S // H
WINDOW = 3
assert W * H == S
def get_x_y(idx):
# This should be a floor divide, but we don't support that properly
return idx / W, idx % W
def natten_mask(score, b, h, q, kv):
q_x, q_y = get_x_y(q)
kv_x, kv_y = get_x_y(kv)
return torch.where(
((q_x - kv_x).abs() <= WINDOW) | ((q_y - kv_y).abs() <= WINDOW),
score,
float("-inf"),
)
self.run_test(natten_mask, dtype, device=device)
self.run_test_with_paged_attention(natten_mask, dtype, device=device)
@supported_platform
def test_subgraph_respect_decompostion(self, device):
from torch._decomp import core_aten_decompositions
from torch.fx.experimental.proxy_tensor import make_fx
def score_mod_func(score, b, h, q, kv):
return score - q // (1 + kv)
make_tensor = functools.partial(
torch.randn,
(2, 2, 128, 4),
device=device,
dtype=torch.float64,
requires_grad=True,
)
query, key, value = make_tensor(), make_tensor(), make_tensor()
# floor_div is not decomposed in decomposition_table is empty
attention = functools.partial(flex_attention, score_mod=score_mod_func)
gm = make_fx(attention, decomposition_table={})(query, key, value)
self.assertExpectedInline(
gm.sdpa_score0.code.strip(),
"""\
def forward(self, arg0_1, arg1_1, arg2_1, arg3_1, arg4_1):
add = torch.ops.aten.add.Tensor(arg4_1, 1); arg4_1 = None
floor_divide = torch.ops.aten.floor_divide.default(arg3_1, add); arg3_1 = add = None
sub = torch.ops.aten.sub.Tensor(arg0_1, floor_divide); arg0_1 = floor_divide = None
return sub""",
)
# floor_div is decomposed for core_aten_decompositions
gm = make_fx(attention, decomposition_table=core_aten_decompositions())(
query, key, value
)
self.assertExpectedInline(
gm.sdpa_score0.code.strip(),
"""\
def forward(self, arg0_1, arg1_1, arg2_1, arg3_1, arg4_1):
add = torch.ops.aten.add.Tensor(arg4_1, 1); arg4_1 = None
div = torch.ops.aten.div.Tensor_mode(arg3_1, add, rounding_mode = 'floor'); arg3_1 = add = None
sub = torch.ops.aten.sub.Tensor(arg0_1, div); arg0_1 = div = None
return sub""",
)
@supported_platform
@dtypes(*device_configs["cpu"].dtypes_fast)
@dtypesIfCUDA(*device_configs["cuda"].dtypes_fast)
@dtypesIfXPU(*device_configs["xpu"].dtypes_fast)
def test_silu_on_score(self, device, dtype):
def silu_score(score, b, h, q, kv):
return torch.nn.functional.silu(score)
self.run_test(silu_score, dtype, device=device)
self.run_test_with_paged_attention(silu_score, dtype, device=device)
@supported_platform
@dtypes(*device_configs["cpu"].dtypes_fast)
@dtypesIfCUDA(*device_configs["cuda"].dtypes_fast)
@dtypesIfXPU(*device_configs["xpu"].dtypes_fast)
def test_padded_dense_causal(self, device, dtype):
seq_len = torch.arange(B, device=device, dtype=torch.int32) + 1
def create_padded_dense_wrapper(orig_score_mod):
def njt_score_mod(qk, b, h, q, kv):
return torch.where(
qk <= seq_len[b], orig_score_mod(qk, b, h, q, kv), -float("inf")
)
return njt_score_mod
causal_njt = create_padded_dense_wrapper(_causal)
self.run_test(causal_njt, dtype, device=device)
@supported_platform
@dtypes(*device_configs["cpu"].dtypes_fast)
@dtypesIfCUDA(*device_configs["cuda"].dtypes_fast)
@dtypesIfXPU(*device_configs["xpu"].dtypes_fast)
def test_captured_scale(self, device, dtype):
scale = torch.ones((), device=device, dtype=torch.int32)
def score_mod_scale(qk, b, h, q, kv):
return qk + scale
self.run_test(score_mod_scale, dtype, device=device)
self.run_test_with_paged_attention(score_mod_scale, dtype, device=device)
@supported_platform
@dtypes(*device_configs["cpu"].dtypes_fast)
@dtypesIfCUDA(*device_configs["cuda"].dtypes_fast)
@dtypesIfXPU(*device_configs["xpu"].dtypes_fast)
def test_recompile_changed_score_mod(self, device, dtype):
scale = torch.ones((), device=device, dtype=torch.int32)
ADD = True
def score_mod_scale(qk, b, h, q, kv):
if ADD:
return qk + scale
else:
return qk * scale
self.run_test(score_mod_scale, dtype, device=device)
self.run_test_with_paged_attention(score_mod_scale, dtype, device=device)
ADD = False
self.run_test(score_mod_scale, dtype, device=device)
self.run_test_with_paged_attention(score_mod_scale, dtype, device=device)
@supported_platform
@expectedFailure # If we capture a tensor then we can perform a reduction on it, and that shouldn't be allowed
@dtypes(*device_configs["cpu"].dtypes_fast)
@dtypesIfCUDA(*device_configs["cuda"].dtypes_fast)
@dtypesIfXPU(*device_configs["xpu"].dtypes_fast)
def test_captured_reduction(self, device, dtype):
scale = torch.randn((B, 8), device=device)
def score_mod_scale(qk, b, h, q, kv):
return qk + scale[b].sum(dim=-1)
self.run_test(score_mod_scale, dtype, device=device)
@supported_platform
@skip_on_cpu
@dtypes(torch.float16)
@dtypesIfCUDA(torch.float16)
def test_dynamic_captured_buffer(self, device, dtype):
def run_with_head_count(compiled_fa, head_count):
head_scale = torch.randn(
head_count, device=device, dtype=dtype, requires_grad=True
)
def score_mod(score, batch, head, token_q, token_kv):
return score * head_scale[head]
q = torch.randn(
B, head_count, S, D, device=device, dtype=dtype, requires_grad=True
)
k = torch.randn_like(q, requires_grad=True)
v = torch.randn_like(q, requires_grad=True)
block_mask = create_block_mask(noop_mask, B, 1, S, S, device=device)
out = compiled_fa(q, k, v, score_mod=score_mod, block_mask=block_mask)
loss = out.sum()
loss.backward()
return out
compiled_fa = torch.compile(flex_attention, fullgraph=True, dynamic=True)
head_counts = [4, 8, 4, 16, 4]
for head_count in head_counts:
run_with_head_count(compiled_fa, head_count)
@supported_platform
@dtypes(*device_configs["cpu"].dtypes_fast)
@dtypesIfCUDA(*device_configs["cuda"].dtypes_fast)
@dtypesIfXPU(*device_configs["xpu"].dtypes_fast)
@common_utils.parametrize(
"score_mod", test_score_mods, name_fn=lambda score_mod: score_mod.__name__
)
@skip_on_cpu
def test_return_max(self, device, dtype, score_mod):
make_tensor = functools.partial(
torch.randn,
(2, 2, 243, 16),
device=device,
dtype=dtype,
requires_grad=True,
)
query, key, value = make_tensor(), make_tensor(), make_tensor()
out_only = flex_attention(query, key, value, score_mod)
out_max, aux_max = flex_attention(
query,
key,
value,
score_mod,
return_aux=AuxRequest(max_scores=True),
)
out_both, aux_both = flex_attention(
query,
key,
value,
score_mod,
return_aux=AuxRequest(lse=True, max_scores=True),
)
flex_compile = torch.compile(flex_attention, fullgraph=True)
out_compiled, aux_compiled = flex_compile(
query,
key,
value,
score_mod,
return_aux=AuxRequest(max_scores=True),
)
torch.testing.assert_close(out_only, out_max, atol=1e-6, rtol=1e-6)
torch.testing.assert_close(out_only, out_both, atol=1e-6, rtol=1e-6)
torch.testing.assert_close(
aux_max.max_scores, aux_both.max_scores, atol=1e-6, rtol=1e-6
)
# we are calculating slightly different scores so add a lil fudge
# Extra tolerance for squared score_mod with float16 due to limited dynamic range
if score_mod.__name__ == "_squared" and dtype == torch.float16:
atol, rtol = 2e-2, 2e-2
else:
atol, rtol = 5e-3, 5e-3
torch.testing.assert_close(out_max, out_compiled, atol=atol, rtol=rtol)
torch.testing.assert_close(
aux_max.max_scores, aux_compiled.max_scores, atol=atol, rtol=rtol
)
B, H, L = query.shape[:3]
self.assertEqual(aux_max.max_scores.shape, (B, H, L))
max_score_tensors = [
aux_max.max_scores,
aux_both.max_scores,
aux_compiled.max_scores,
]
for max_tensor in max_score_tensors:
self.assertFalse(
max_tensor.requires_grad, "max_scores should not require gradients"
)
self.assertEqual(
max_tensor.dtype, torch.float32, "max_scores should be kept in fp32"
)
# Test gradient computation for both eager and compiled versions
test_cases = [
("eager", out_max, "eager mode"),
("compiled", out_compiled, "compiled mode"),
]
for mode_name, output, description in test_cases:
loss = output.sum()
grads = torch.autograd.grad(loss, (query, key, value))
# Verify gradients are computed for all inputs
input_names = ["query", "key", "value"]
for grad, input_name in zip(grads, input_names):
self.assertIsNotNone(
grad, f"{input_name} should receive gradients in {description}"
)
@supported_platform
@dtypes(*device_configs["cpu"].dtypes_fast)
@dtypesIfCUDA(*device_configs["cuda"].dtypes_fast)
@dtypesIfXPU(*device_configs["xpu"].dtypes_fast)
@common_utils.parametrize(
"score_mod", test_score_mods, name_fn=lambda score_mod: score_mod.__name__
)
@skip_on_cpu
def test_return_aux(self, device, dtype, score_mod):
"""Test the new return_aux API with AuxRequest/Output"""
make_tensor = functools.partial(
torch.randn,
(2, 2, 243, 16),
device=device,
dtype=dtype,
requires_grad=True,
)
query, key, value = make_tensor(), make_tensor(), make_tensor()
flex_compile = torch.compile(flex_attention, fullgraph=True)
flex_compile_partial = torch.compile(flex_attention, fullgraph=False)
# Test 1: No auxiliary outputs (default behavior)
out_only = flex_compile(query, key, value, score_mod)
self.assertIsInstance(out_only, torch.Tensor)
# Test 2: Request only LSE
out, aux_lse = flex_compile(
query, key, value, score_mod, return_aux=AuxRequest(lse=True)
)
self.assertIsInstance(aux_lse, AuxOutput)
self.assertIsInstance(aux_lse.lse, torch.Tensor)
self.assertIsNone(aux_lse.max_scores)
self.assertEqual(aux_lse.lse.shape, (2, 2, 243))
self.assertEqual(aux_lse.lse.dtype, torch.float32)
# Test 3: Request only max_scores
out, aux_max = flex_compile(
query,
key,
value,
score_mod,
return_aux=AuxRequest(max_scores=True),
)
self.assertIsInstance(aux_max, AuxOutput)
self.assertIsNone(aux_max.lse)
self.assertIsInstance(aux_max.max_scores, torch.Tensor)
self.assertEqual(aux_max.max_scores.shape, (2, 2, 243))
self.assertEqual(aux_max.max_scores.dtype, torch.float32)
# Test 4: Request both auxiliary outputs
out, aux_both = flex_compile(
query,
key,
value,
score_mod,
return_aux=AuxRequest(lse=True, max_scores=True),
)
self.assertIsInstance(aux_both, AuxOutput)
self.assertIsInstance(aux_both.lse, torch.Tensor)
self.assertIsInstance(aux_both.max_scores, torch.Tensor)
self.assertEqual(aux_both.lse.shape, (2, 2, 243))
self.assertEqual(aux_both.max_scores.shape, (2, 2, 243))
# Test 5: Request no auxiliary outputs explicitly
out, aux_none = flex_compile(
query,
key,
value,
score_mod,
return_aux=AuxRequest(), # Default is lse=False, max_scores=False
)
self.assertIsInstance(aux_none, AuxOutput)
self.assertIsNone(aux_none.lse)
self.assertIsNone(aux_none.max_scores)
# Test 6: Verify outputs are consistent with legacy API, can't fullgraph through warnings
out_legacy, lse_legacy = flex_compile_partial(
query, key, value, score_mod, return_lse=True
)
torch.testing.assert_close(out_only, out_legacy, atol=1e-6, rtol=1e-6)
torch.testing.assert_close(aux_lse.lse, lse_legacy, atol=1e-6, rtol=1e-6)
@supported_platform
@dtypes(*device_configs["cpu"].dtypes_fast)
@dtypesIfCUDA(*device_configs["cuda"].dtypes_fast)
@dtypesIfXPU(*device_configs["xpu"].dtypes_fast)
@skip_on_cpu
def test_return_aux_deprecation_warnings(self, device, dtype):
"""Test that deprecation warnings are issued for legacy parameters"""
import warnings
make_tensor = functools.partial(
torch.randn,
(2, 2, 64, 16),
device=device,
dtype=dtype,
)
query, key, value = make_tensor(), make_tensor(), make_tensor()
# Clear shown warnings to ensure we can test them
original_shown = _WARNINGS_SHOWN.copy()
_WARNINGS_SHOWN.clear()
try:
# Test deprecation warning for return_lse
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
flex_attention(query, key, value, return_lse=True)
self.assertTrue(
any(
"return_lse is deprecated" in str(warning.message)
for warning in w
)
)
# Clear for next test
_WARNINGS_SHOWN.clear()
# Test error when both old and new API are used
with self.assertRaises(ValueError) as cm:
flex_attention(
query,
key,
value,
return_lse=True,
return_aux=AuxRequest(lse=True),
)
self.assertIn(
"Cannot specify both return_lse and return_aux", str(cm.exception)
)
finally:
# Restore original warnings state
_WARNINGS_SHOWN.clear()
_WARNINGS_SHOWN.update(original_shown)
@supported_platform
@dtypes(*device_configs["cpu"].dtypes_fast)
@dtypesIfCUDA(*device_configs["cuda"].dtypes_fast)
@dtypesIfXPU(*device_configs["xpu"].dtypes_fast)
@skip_on_cpu
def test_dynamic_divisibility_guards(self, device, dtype):
"""Test guards for divisible/non-divisible shape transitions"""
if device == "cpu" and dtype is torch.float16:
dtype = torch.float32
def score_mod(qk, b, h, q, kv):
return torch.where(q >= kv, qk, -float("inf"))
def test_shape(S, backend):
"""Test a single shape configuration"""
block_mask = create_block_mask(noop_mask, 1, 1, S, S, device=device)
sdpa_partial = create_attention(score_mod, block_mask=block_mask)
tensors = [
torch.randn(
2, 4, S, 64, dtype=dtype, device=device, requires_grad=False
)
for _ in range(3)
]
compiled_sdpa = torch.compile(sdpa_partial, backend=backend)
out, code = run_and_get_code(compiled_sdpa, *tensors)
# Check divisibility flag
is_divisible = S % 128 == 0
expected_flag = f"IS_DIVISIBLE : tl.constexpr = {is_divisible}"
self.assertIn(
expected_flag, str(code), f"S={S} should have {expected_flag}"
)
self.assertEqual(out.shape, (2, 4, S, 64))
return out, code
torch._dynamo.reset()
backend = CompileCounterWithBackend("inductor")
# Test divisible and non-divisible shapes
test_shapes = [256, 255, 383, 384]
_ = [test_shape(S, backend) for S in test_shapes]
@supported_platform
@skip_on_cpu
def test_mask_mod_handles_symint_addition(self, device):
dtype = torch.float16
def run(q, k, v):
ql = q.size(-2)
kl = k.size(-2)
frame = 32
def _opaque_mask(b, h, q_idx, kv_idx):
ref = ql // frame
mot = kl // frame # codespell:ignore
limit = (ref + mot) * frame # codespell:ignore
return q_idx < limit
block_mask = create_block_mask(
_opaque_mask,
B=q.size(0),
H=q.size(1),
Q_LEN=ql,
KV_LEN=kl,
device=device,
)
return flex_attention(q, k, v, block_mask=block_mask)
compiled_run = torch.compile(run, fullgraph=True, dynamic=True)
q = torch.randn(1, 2, 192, 32, device=device, dtype=dtype)
k = torch.randn(1, 2, 128, 32, device=device, dtype=dtype)
v = torch.randn(1, 2, 128, 32, device=device, dtype=dtype)
eager_out = run(q, k, v)
compiled_out = compiled_run(q, k, v)
torch.testing.assert_close(eager_out, compiled_out, atol=1e-3, rtol=1e-3)
# Exercise different dynamic shapes to ensure SymInt sums remain well-formed.
q2 = torch.randn(1, 2, 160, 32, device=device, dtype=dtype)
k2 = torch.randn(1, 2, 96, 32, device=device, dtype=dtype)
v2 = torch.randn(1, 2, 96, 32, device=device, dtype=dtype)
eager_out2 = run(q2, k2, v2)
compiled_out2 = compiled_run(q2, k2, v2)
torch.testing.assert_close(eager_out2, compiled_out2, atol=1e-3, rtol=1e-3)
@supported_platform
def test_multiple_score_mod_calls(self, device):
query = torch.randn((1, 8, 1024, 64), dtype=torch.float32, device=device)
keys = [
torch.randn((1, 8, 1024, 64), dtype=torch.float32, device=device)
for _ in range(2)
]
values = [
torch.randn((1, 8, 1024, 64), dtype=torch.float32, device=device)
for _ in range(2)
]
def scoremod_1(qk, b, h, q, kv):
return qk + (q - kv)
def scoremod_2(qk, b, h, q, kv):
return torch.where(q >= kv, qk, -float("inf"))
def f(q, k1, k2, v1, v2):
q2 = flex_attention(q, k1, v1, score_mod=scoremod_1)
return flex_attention(q2, k2, v2, score_mod=scoremod_2)
out = f(query, *keys, *values)
out2 = torch.compile(f)(query, *keys, *values)
tolerance = Tolerances(atol=2e-1, rtol=2e-1)
torch.testing.assert_close(out, out2, atol=tolerance.atol, rtol=tolerance.rtol)
@supported_platform
@skip_on_cpu
@skip_on_rocm # TODO: Investigate
def test_multiple_mask_calls(self, device):
make_tensor = functools.partial(
torch.randn,
(1, 4, 512, 64),
dtype=torch.float32,
device=device,
requires_grad=True,
)
query, key, value = make_tensor(), make_tensor(), make_tensor()
window_size = 32
def causal_mask(b, h, q_idx, kv_idx):
return q_idx >= kv_idx
def causal_mask_slidewindow_mod(b, h, q_idx, kv_idx):
return (q_idx >= kv_idx) & (q_idx <= kv_idx + window_size)
mask1 = create_block_mask(
causal_mask, 1, None, 512, 512, _compile=False, device=device
)
mask2 = create_block_mask(
causal_mask_slidewindow_mod,
1,
None,
512,
512,
_compile=False,
device=device,
)
def f(q, k, v):
out1 = flex_attention(q, k, v, block_mask=mask1)
out2 = flex_attention(q, k, v, block_mask=mask2)
return out1 + out2
f_compiled = torch.compile(f, fullgraph=True)
out = f(query, key, value)
out_compiled = f_compiled(query, key, value)
grads = torch.autograd.grad((out,), (query, key, value), torch.ones_like(out))
grads_compile = torch.autograd.grad(
(out_compiled,), (query, key, value), torch.ones_like(out_compiled)
)
for grad, grad_compiled in zip(grads, grads_compile):
torch.testing.assert_close(grad, grad_compiled, atol=3e-2, rtol=3e-2)
@supported_platform
def test_multiple_score_mod_calls2(self, device):
query = torch.randn((1, 8, 1024, 64), dtype=torch.float32, device=device)
keys = [
torch.randn((1, 8, 1024, 64), dtype=torch.float32, device=device)
for _ in range(3)
]
values = [
torch.randn((1, 8, 1024, 64), dtype=torch.float32, device=device)
for _ in range(3)
]
def scoremod_1(qk, b, h, q, kv):
return qk + (q - kv)
def scoremod_2(qk, b, h, q, kv):
return torch.where(q >= kv, qk, -float("inf"))
attention1 = functools.partial(flex_attention, score_mod=scoremod_1)
def f(q, k1, k2, k3, v1, v2, v3):
q2 = attention1(q, k1, v1)
q3 = flex_attention(q2, k2, v2, score_mod=scoremod_2)
return flex_attention(q3, k3, v3, score_mod=scoremod_1)
out = f(query, *keys, *values)
out2 = torch.compile(f, fullgraph=True)(query, *keys, *values)
self.assertTrue((out - out2).abs().mean() < 1e-2)
@supported_platform
def test_multiple_score_mod_calls_paged_attention(self, device):
query = torch.randn((1, 8, 1024, 64), dtype=torch.float32, device=device)
keys = [
torch.randn((1, 8, 1024, 64), dtype=torch.float32, device=device)
for _ in range(2)
]
values = [
torch.randn((1, 8, 1024, 64), dtype=torch.float32, device=device)
for _ in range(2)
]
def scoremod_1(qk, b, h, q, kv):
return qk + (q - kv)
def scoremod_2(qk, b, h, q, kv):
return torch.where(q >= kv, qk, -float("inf"))
def f(q, k1, k2, v1, v2):
q2 = flex_attention(q, k1, v1, score_mod=scoremod_1)
return flex_attention(q2, k2, v2, score_mod=scoremod_2)
eager_out = f(query, *keys, *values)
block_mask = create_block_mask(noop_mask, 1, 1, 1024, 1024, device=device)
(
k_cache1,
v_cache1,
converted_block_mask1,
converted_score_mod1,
) = self.preprocess_paged_attention(
scoremod_1,
query,
keys[0],
values[0],
block_mask,
torch.float32,
device=device,
)
(
k_cache2,
v_cache2,
converted_block_mask2,
converted_score_mod2,
) = self.preprocess_paged_attention(
scoremod_2,
query,
keys[1],
values[1],
block_mask,
torch.float32,
device=device,
)
def paged_f(q, k1, k2, v1, v2):
q2 = flex_attention(
q,
k1,
v1,
score_mod=converted_score_mod1,
block_mask=converted_block_mask1,
)
return flex_attention(
q2,
k2,
v2,
score_mod=converted_score_mod2,
block_mask=converted_block_mask2,
)
compiled_out = torch.compile(paged_f, fullgraph=True)(
query, k_cache1, k_cache2, v_cache1, v_cache2
)
tolerance = Tolerances(atol=2e-1, rtol=2e-1)
torch.testing.assert_close(
eager_out, compiled_out, atol=tolerance.atol, rtol=tolerance.rtol
)
@supported_platform
def test_multiple_score_mod_calls2_paged_attention(self, device):
query = torch.randn((1, 8, 1024, 64), dtype=torch.float32, device=device)
keys = [
torch.randn((1, 8, 1024, 64), dtype=torch.float32, device=device)
for _ in range(3)
]
values = [
torch.randn((1, 8, 1024, 64), dtype=torch.float32, device=device)
for _ in range(3)
]
def scoremod_1(qk, b, h, q, kv):
return qk + (q - kv)
def scoremod_2(qk, b, h, q, kv):
return torch.where(q >= kv, qk, -float("inf"))
attention1 = functools.partial(flex_attention, score_mod=scoremod_1)
def f(q, k1, k2, k3, v1, v2, v3):
q2 = attention1(q, k1, v1)
q3 = flex_attention(q2, k2, v2, score_mod=scoremod_2)
return flex_attention(q3, k3, v3, score_mod=scoremod_1)
eager_out = f(query, *keys, *values)
block_mask = create_block_mask(noop_mask, 1, 1, 1024, 1024, device=device)
(
k_cache1,
v_cache1,
converted_block_mask1,
converted_score_mod1,
) = self.preprocess_paged_attention(
scoremod_1,
query,
keys[0],
values[0],
block_mask,
torch.float32,
device=device,
)
(
k_cache2,
v_cache2,
converted_block_mask2,
converted_score_mod2,
) = self.preprocess_paged_attention(
scoremod_2,
query,
keys[1],
values[1],
block_mask,
torch.float32,
device=device,
)
(
k_cache3,
v_cache3,
converted_block_mask3,
converted_score_mod3,
) = self.preprocess_paged_attention(
scoremod_1,
query,
keys[2],
values[2],
block_mask,
torch.float32,
device=device,
)
paged_attention1 = functools.partial(
flex_attention,
score_mod=converted_score_mod1,
block_mask=converted_block_mask1,
)
def paged_f(q, k1, k2, k3, v1, v2, v3):
q2 = paged_attention1(q, k1, v1)
q3 = flex_attention(
q2,
k2,
v2,
score_mod=converted_score_mod2,
block_mask=converted_block_mask2,
)
return flex_attention(
q3,
k3,
v3,
score_mod=converted_score_mod3,
block_mask=converted_block_mask3,
)
compiled_out = torch.compile(paged_f, fullgraph=True)(
query, k_cache1, k_cache2, k_cache3, v_cache1, v_cache2, v_cache3
)
tolerance = Tolerances(atol=2e-1, rtol=2e-1)
torch.testing.assert_close(
eager_out, compiled_out, atol=tolerance.atol, rtol=tolerance.rtol
)
@supported_platform
@skip_on_cpu
def test_inputs_are_realized(self, device):
def f(q, k, v):
x = torch.randn(1024, device=device)
x = x * 2
def func(qk, b, h, q, kv):
return qk + x[q]
return flex_attention(q.sin(), k, v, score_mod=func).cos()
q, k, v = (
torch.randn(1, 8, 1024, 64, device=device, requires_grad=True)
for _ in range(3)
)
ref = f(q, k, v)
out = torch.compile(f)(q, k, v)
self.assertTrue((ref - out).abs().mean() < 1e-2)
gradOut = torch.randn_like(q)
ref_grads = torch.autograd.grad(ref, (q, k, v), gradOut)
out_grads = torch.autograd.grad(out, (q, k, v), gradOut)
for ref, out in zip(ref_grads, out_grads):
self.assertTrue((ref - out).abs().mean() < 1e-2)
@supported_platform
@skip_on_cpu
def test_make_block_mask(self, device):
def causal_mask(b, h, q_idx, kv_idx):
return q_idx >= kv_idx
block_mask_a = torch.compile(create_block_mask, fullgraph=True)(
causal_mask, 1, 1, 512, 512, device=device
)
block_mask_b = create_block_mask(causal_mask, 1, 1, 512, 512, device=device)
self.assertEqual(block_mask_a.kv_num_blocks, block_mask_b.kv_num_blocks)
self.assertEqual(block_mask_a.kv_indices, block_mask_b.kv_indices)
self.assertEqual(block_mask_a.q_num_blocks, block_mask_b.q_num_blocks)
@supported_platform
def test_mask_mod_combiners(self, device):
def causal_mask(b, h, q, kv):
return q >= kv
def neg_causal_mask(b, h, q, kv):
return q < kv
def sliding_window(b, h, q, kv):
return (q - kv) <= 512
local_s = 2048
block_mask = create_block_mask(
and_masks(causal_mask, sliding_window),
1,
1,
local_s,
local_s,
device=device,
)
self.assertExpectedInline(block_mask.kv_num_blocks.sum().item(), """28""")
attention = functools.partial(flex_attention, block_mask=block_mask)
self.run_test_with_call(
attention, Q_S=local_s, KV_S=local_s, dtype=torch.float16, device=device
)
block_mask = create_block_mask(
and_masks(causal_mask, neg_causal_mask),
1,
1,
local_s,
local_s,
device=device,
)
self.assertEqual(block_mask.kv_num_blocks.sum(), 0)
block_mask1 = create_block_mask(
or_masks(causal_mask, neg_causal_mask),
1,
1,
local_s,
local_s,
device=device,
)
block_mask2 = create_block_mask(
noop_mask, 1, 1, local_s, local_s, device=device
)
self.assertEqual(block_mask1.sparsity(), block_mask2.sparsity())
@supported_platform
@skip_on_cpu
def test_epilogue_fused(self, device):
# set so that metrics appear
torch._logging.set_logs(inductor_metrics=True)
@torch.compile
def f(q, k, v):
out = flex_attention(q, k, v)
return out.cos()
q, k, v = (torch.randn(1, 8, 1024, 64, device=device) for _ in range(3))
metrics.reset()
_, code = run_and_get_code(f, q, k, v)
fc = FileCheck()
fc.check("triton_tem_fused") # template call
fc.check_not("poi_fused_cos") # No cos pointwise operation
fc.run(code[0])
accessed_bytes = 1 * 8 * 1024 * 64 * torch.float32.itemsize
num_accesses = 4 # q, k, v reads, one output.
# TODO: Get rid of this fudge factor
# We need this fudge factor for now as we write the extraneous logsumexp
num_accesses += 1
self.assertLess(metrics.num_bytes_accessed, accessed_bytes * num_accesses)
torch._logging.set_logs()
@supported_platform
@dtypes(*device_configs["cpu"].dtypes)
@dtypesIfCUDA(*device_configs["cuda"].dtypes)
@dtypesIfXPU(*device_configs["xpu"].dtypes)
def test_njt_causal(self, device, dtype):
offsets = torch.tensor(
[0, 1024, 1024 + 512, S], device=device, dtype=torch.int32
)
seq_idx = torch.zeros(S, device=device, dtype=torch.int32)
for idx in range(len(offsets) - 1):
seq_idx[offsets[idx] : offsets[idx + 1]] = idx
def create_njt_wrapper(orig_score_mod, offsets, seq_idx):
def njt_score_mod(qk, b, h, q, kv):
q_nested = q - offsets[seq_idx[q]]
kv_nested = kv - offsets[seq_idx[kv]]
return orig_score_mod(qk, b, h, q_nested, kv_nested)
return njt_score_mod
causal_njt = create_njt_wrapper(_causal, offsets, seq_idx)
self.run_test(causal_njt, dtype, device=device)
self.run_test_with_paged_attention(causal_njt, dtype, device=device)
@supported_platform
def test_mixed_dtypes_fails(self, device):
query = torch.randn((1, 1, 1024, 64), dtype=torch.float32, device=device)
key = torch.randn((1, 1, 1024, 64), dtype=torch.float16, device=device)
value = torch.randn((1, 1, 1024, 64), dtype=torch.float16, device=device)
with self.assertRaisesRegex(
ValueError, "Expected query, key, and value to have the same dtype"
):
flex_attention(query, key, value, _identity)
@supported_platform
@patch.object(torch._inductor.config, "max_autotune", True)
def test_max_autotune(self, device):
def score_mod(score, b, h, m, n):
return score * 2
self.run_test(score_mod, dtype=torch.float16, device=device)
self.run_test_with_paged_attention(
score_mod, dtype=torch.float16, device=device
)
self.run_test_with_paged_attention(
score_mod=score_mod,
dtype=torch.bfloat16,
KV_S=64,
device=device,
)
@supported_platform
@skip("TODO: Figure out why this is erroring")
@patch.object(torch._inductor.config, "max_autotune", True)
def test_max_autotune_with_captured(self, device):
head_scale = torch.randn(H, device=device)
batch_scale = torch.randn(B, device=device)
tok_scale = torch.randn(S, device=device)
def bias_mod(score, batch, head, token_q, token_kv):
score = score + tok_scale[token_q]
score = score + batch_scale[batch]
score = score + head_scale[head]
return score
self.run_test(bias_mod, dtype=torch.float32, device=device)
@supported_platform
@common_utils.parametrize("score_mod", test_score_mods)
@dtypes(*device_configs["cpu"].dtypes)
@dtypesIfCUDA(*device_configs["cuda"].dtypes)
@dtypesIfXPU(*device_configs["xpu"].dtypes)
@common_utils.parametrize("head_dims", [(D, D // 2), (D // 2, D)])
def test_non_equal_head_dims(self, device, dtype, score_mod, head_dims):
qk_d, v_d = head_dims
self.run_test(score_mod, dtype, device, B, H, S, qk_d, B, H, S, V_D=v_d)
self.run_test_with_paged_attention(
score_mod, dtype, device, B, H, S, qk_d, B, H, S, V_D=v_d
)
@supported_platform
@skip_on_cpu
def test_autograd_function_in_score_mod(self, device):
class ApplyMask(torch.autograd.Function):
generate_vmap_rule = True
@staticmethod
def forward(a, mask):
return torch.where(mask, a, -float("inf"))
@staticmethod
def setup_context(ctx, inputs, output):
_, mask = inputs
ctx.mark_non_differentiable(mask)
@staticmethod
def backward(ctx, i):
return i, None
def score_mod(score, b, h, q, kv):
return ApplyMask.apply(score, q <= kv)
func = torch.compile(flex_attention, fullgraph=True)
q, k, v = (
torch.randn(1, 8, 1024, 64, device=device, requires_grad=True)
for _ in range(3)
)
# Just checking that it runs
func(q, k, v)
# expectedFailure
# This doesn't work due to vmap + autograd.Function + torch.compile not composing
# self.run_test(score_mod)
@supported_platform
def test_causal_block(self, device):
def mask_mod(b, h, q, kv):
return q >= kv
block_mask = create_block_mask(mask_mod, 1, 1, S, S, device=device)
attention = functools.partial(flex_attention, block_mask=block_mask)
self.run_test_with_call(attention, dtype=torch.float16, device=device)
@supported_platform
def test_causal_block_paged_attention(self, device):
def mask_mod(b, h, q, kv):
return q >= kv
block_mask = create_block_mask(mask_mod, B, 1, S, S, device=device)
self.run_test_with_paged_attention(
score_mod=_identity,
dtype=torch.float16,
device=device,
block_mask=block_mask,
)
@supported_platform
def test_new_empty_mask_mod(self, device):
S = 128
q, k, v = (torch.randn(4, 1, S, 64, device=device) for _ in range(3))
attn_mask = torch.ones(4, 1, S, S, dtype=torch.bool, device=device).tril()
def score_mod(score, b, h, q_idx, kv_idx):
h_ = h.new_zeros(h.shape)
return score + attn_mask[b, h_, q_idx, kv_idx]
def causal(b, h, q_idx, kv_idx):
h_ = h.new_zeros(h.shape)
return attn_mask[b, h_, q_idx, kv_idx]
block_mask = create_block_mask(
causal, B=4, H=None, Q_LEN=S, KV_LEN=S, device=device
)
torch.compile(flex_attention, fullgraph=True)(
q, k, v, score_mod, block_mask=block_mask
)
@supported_platform
@common_utils.parametrize("head_dim", [17, 24, 94, 121])
@dtypes(*device_configs["cpu"].dtypes_fast)
@dtypesIfCUDA(*device_configs["cuda"].dtypes_fast)
@dtypesIfXPU(*device_configs["xpu"].dtypes_fast)
def test_non_pow_2_headdim(self, device, dtype, head_dim):
self.run_test(_rel_bias, dtype, device, B, H, S, head_dim, B, H, S, head_dim)
@supported_platform
def test_GQA_causal_mask(self, device):
def mask_mod(b, h, q, kv):
return q >= kv
block_mask = create_block_mask(mask_mod, B, 1, S // 8, S // 8, device=device)
attention = functools.partial(
flex_attention, block_mask=block_mask, enable_gqa=True
)
self.run_test_with_call(
attention,
torch.float16,
device,
B,
H * 4, # Hq = 4*Hkv.
S // 8,
D,
B,
H,
S // 8,
D,
)
self.run_test_with_paged_attention(
_identity,
dtype=torch.float16,
device=device,
Q_H=H * 4,
Q_S=S // 8,
KV_H=H,
KV_S=S // 8,
block_mask=block_mask,
)
@supported_platform
def test_custom_block_mask_generator(self, device):
def mask_mod(b, h, q, kv):
return q >= kv
auto_mask = create_block_mask(mask_mod, 1, 1, S, S, device=device)
BLOCK_SIZE = 128
def causal_constructor(S):
num_blocks = torch.arange(S // BLOCK_SIZE, device=device) + 1
indices = torch.arange(S // BLOCK_SIZE, device=device).expand(
S // BLOCK_SIZE, S // BLOCK_SIZE
)
num_blocks = num_blocks[None, None, :]
indices = indices[None, None, :]
return BlockMask.from_kv_blocks(
num_blocks, indices, BLOCK_SIZE=BLOCK_SIZE, mask_mod=mask_mod
)
manual_mask = causal_constructor(S)
self.assertEqual(auto_mask.to_dense(), manual_mask.to_dense())
@supported_platform
@skip_on_cpu
@dtypes(*device_configs["cpu"].dtypes)
@dtypesIfCUDA(*device_configs["cuda"].dtypes)
@dtypesIfXPU(*device_configs["xpu"].dtypes)
@common_utils.parametrize("score_mod", [_identity, _causal])
def test_logsumexp_correctness(self, device, dtype, score_mod):
make_tensor = functools.partial(
torch.randn,
(B, H, S, D),
dtype=dtype,
device=device,
requires_grad=True,
)
q, k, v = make_tensor(), make_tensor(), make_tensor()
@torch.compile
def sdpa_hop(q, k, v, score_mod):
return flex_attention(q, k, v, score_mod, return_lse=True)
@torch.compile(backend="aot_eager")
def eager_sdpa_hop(q, k, v, score_mod):
return flex_attention(q, k, v, score_mod, return_lse=True)
ref_out, ref_lse = eager_sdpa_hop(
q.to(torch.float64),
k.to(torch.float64),
v.to(torch.float64),
score_mod,
)
compiled_out, compiled_lse = sdpa_hop(q, k, v, score_mod)
self.assertTrue(ref_lse.dtype == torch.float64)
self.assertTrue(compiled_lse.dtype == torch.float32)
tolerance = Tolerances(atol=2e-2, rtol=2e-2)
torch.testing.assert_close(
ref_out.to(dtype=torch.float32),
compiled_out.to(dtype=torch.float32),
atol=tolerance.atol,
rtol=tolerance.rtol,
)
torch.testing.assert_close(
ref_lse.to(dtype=torch.float32),
compiled_lse.to(dtype=torch.float32),
atol=tolerance.atol,
rtol=tolerance.rtol,
)
@supported_platform
@skip_on_cpu
def test_logsumexp_only_return(self, device):
make_tensor = functools.partial(
torch.randn,
(B, H, S, D),
dtype=torch.float32,
device=device,
requires_grad=True,
)
q, k, v = make_tensor(), make_tensor(), make_tensor()
@torch.compile
def func(q, k, v, score_mod):
_, lse = flex_attention(q, k, v, score_mod, return_lse=True)
lse_2 = lse * 2
return lse_2
_, code = run_and_get_code(func, q, k, v, _identity)
# Ensure that we're still generating the flexattention kernel
FileCheck().check_count(".run(primals_1, primals_2, primals_3", 1, True).run(
code[0]
)
@supported_platform
@skip_on_cpu
@common_utils.parametrize(
"score_mod", [_identity, _causal, _times_two, _squared, _trig, _trig2]
)
def test_aot_eager_gradcheck(self, device, score_mod):
make_tensor = functools.partial(
torch.randn,
(2, 2, 11, 4),
device=device,
dtype=torch.float64,
requires_grad=True,
)
query, key, value = make_tensor(), make_tensor(), make_tensor()
func = torch.compile(flex_attention, backend="aot_eager", fullgraph=True)
self.assertTrue(
torch.autograd.gradcheck(
func, (query, key, value, score_mod), raise_exception=True
)
)
@supported_platform
@skip_on_cpu
def test_eager_backward_strides(self, device):
class Repro(torch.nn.Module):
def __init__(self):
super().__init__()
self.qkv_proj = torch.nn.Linear(256, 256 * 3)
self.n_head = 256 // 64
self.d_attn = 256
def forward(self, x):
n_batch, n_ctx, _ = x.shape
q, k, v = self.qkv_proj(x).split(
[self.d_attn, self.d_attn, self.d_attn], dim=2
)
q = q.reshape(n_batch, n_ctx, self.n_head, -1)
k = k.reshape(n_batch, n_ctx, self.n_head, -1)
v = v.reshape(n_batch, n_ctx, self.n_head, -1)
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
x = torch.nn.attention.flex_attention.flex_attention(q, k, v)
return x
model = Repro().to(device)
x = torch.randn((1, 512, 256), device=device, requires_grad=True)
out = torch.compile(model, backend="aot_eager", fullgraph=True)(x)
out.backward(torch.ones_like(out))
@supported_platform
@skip_on_cpu
def test_differentiable_logsumexp_gradcheck(self, device):
make_tensor = functools.partial(
torch.randn,
(2, 2, 11, 4),
device=device,
dtype=torch.float64,
requires_grad=True,
)
query, key, value = make_tensor(), make_tensor(), make_tensor()
def flex_attention_lse_only(q, k, v):
return flex_attention(q, k, v, return_lse=True)[1]
func = torch.compile(flex_attention_lse_only, backend="aot_eager")
self.assertTrue(
torch.autograd.gradcheck(func, (query, key, value), raise_exception=True)
)
@supported_platform
@skip_on_cpu
def test_differentiable_logsumexp_compiled(self, device):
make_tensor = functools.partial(
torch.randn,
(2, 2, 128, 64),
device=device,
dtype=torch.float32,
requires_grad=True,
)
q, k, v = make_tensor(), make_tensor(), make_tensor()
lse_mask = torch.randn(2, 2, 128, device=device)
out, lse = flex_attention(q, k, v, return_lse=True)
(out.mean() + (lse * lse_mask).sum()).backward()
q_grad, k_grad, v_grad = q.grad, k.grad, v.grad
q.grad = None
k.grad = None
v.grad = None
out2, lse2 = torch.compile(flex_attention)(q, k, v, return_lse=True)
(out2.mean() + (lse2 * lse_mask).sum()).backward()
q_grad2, k_grad2, v_grad2 = q.grad, k.grad, v.grad
tolerance = Tolerances(atol=1e-1, rtol=1e-1)
torch.testing.assert_close(out, out2, atol=tolerance.atol, rtol=tolerance.rtol)
torch.testing.assert_close(lse, lse2, atol=tolerance.atol, rtol=tolerance.rtol)
torch.testing.assert_close(
q_grad, q_grad2, atol=tolerance.atol, rtol=tolerance.rtol
)
torch.testing.assert_close(
k_grad, k_grad2, atol=tolerance.atol, rtol=tolerance.rtol
)
torch.testing.assert_close(
v_grad, v_grad2, atol=tolerance.atol, rtol=tolerance.rtol
)
# Use weird mask to test reusing block_mask does work well.
@supported_platform
@skip_on_cpu
def _test_block_mask_reuse_with_weird_mask(self, device):
def mask(b, h, q, kv):
return (kv < 256) | (kv >= 2048)
make_tensor = functools.partial(
torch.randn,
(4, 4, 4096, 64),
device=device,
dtype=torch.float32,
requires_grad=True,
)
block_mask = create_block_mask(mask, None, None, 4096, 4096, device=device)
# Compile 1st version with q/k/v(seqlen=4096) and block_mask(seqlen=4096)
torch.compile(flex_attention, dynamic=True, fullgraph=True)(
make_tensor(), make_tensor(), make_tensor(), block_mask=block_mask
)
make_tensor2 = functools.partial(
torch.randn,
(4, 4, 2048, 64),
device=device,
dtype=torch.float32,
requires_grad=True,
)
q, k, v = make_tensor2(), make_tensor2(), make_tensor2()
# Compile 2nd version with q/k/v(seqlen=2048) and block_mask(seqlen=4096),
# The graph includes the BlockMask._adjust part.
out = torch.compile(flex_attention, dynamic=True, fullgraph=True)(
q, k, v, block_mask=block_mask
)
out.sum().backward()
q_grad, k_grad, v_grad = q.grad, k.grad, v.grad
q.grad = None
k.grad = None
v.grad = None
block_mask2 = create_block_mask(mask, None, None, 2048, 2048, device=device)
# Reuse the 1st version with q/k/v(seqlen=2048) and block_mask(seqlen=2048)
out2 = torch.compile(flex_attention, dynamic=True, fullgraph=True)(
q, k, v, block_mask=block_mask2
)
out2.sum().backward()
q_grad2, k_grad2, v_grad2 = q.grad, k.grad, v.grad
tolerance = Tolerances(atol=1e-3, rtol=1e-3)
torch.testing.assert_close(out, out2, atol=tolerance.atol, rtol=tolerance.rtol)
torch.testing.assert_close(
q_grad, q_grad2, atol=tolerance.atol, rtol=tolerance.rtol
)
torch.testing.assert_close(
k_grad, k_grad2, atol=tolerance.atol, rtol=tolerance.rtol
)
torch.testing.assert_close(
v_grad, v_grad2, atol=tolerance.atol, rtol=tolerance.rtol
)
@supported_platform
@skip_on_cpu
def test_float32_matmul_precision(self, device):
make_tensor = functools.partial(
torch.zeros,
(2, 2, 128, 32),
device=device,
dtype=torch.float32,
requires_grad=False,
)
query, key, value = make_tensor(), make_tensor(), make_tensor()
query.fill_(0.2)
key.fill_(0.3)
value.fill_(0.4)
query.requires_grad = True
key.requires_grad = True
value.requires_grad = True
def score_mod(score, b, h, q, kv):
return score * 2
with temp_float32_matmul_precision("highest"):
out_eager = flex_attention(query, key, value, score_mod)
flex_compiled = torch.compile(flex_attention, fullgraph=True)
out_compiled = flex_compiled(query, key, value, score_mod)
grads_eager = torch.autograd.grad(out_eager.sum(), (query, key, value))
grads_compile = torch.autograd.grad(out_compiled.sum(), (query, key, value))
torch.testing.assert_close(grads_eager, grads_compile)
@supported_platform
@skip_on_cpu
@common_utils.parametrize("score_mod_name", ["_head_offset"])
@common_utils.parametrize("mode", ["eager", "aot_eager"])
def test_captured_score_mod_aot_eager_gradcheck(
self, device, score_mod_name: str, mode: str
):
make_tensor = functools.partial(
torch.randn,
(2, 2, 11, 4),
device=device,
dtype=torch.float64,
requires_grad=True,
)
query, key, value = make_tensor(), make_tensor(), make_tensor()
func = torch.compile(flex_attention, backend=mode, fullgraph=True)
score_mod = captured_buffers_map[score_mod_name](torch.float64, device)
self.assertTrue(
torch.autograd.gradcheck(
func, (query, key, value, score_mod), raise_exception=True
)
)
@supported_platform
@skip_on_cpu
@common_utils.parametrize("mode", ["eager", "aot_eager"])
def test_document_masking_edge_case(self, device, mode):
requires_grad = device in DEVICE_SUPPORTS_BACKWARDS
document_masks = torch.full((2, 128), 0, dtype=torch.int32, device=device)
document_masks[:, 64:] = 1
def mask_mod(b, h, q, kv):
same_doc = document_masks[b, q] == document_masks[b, kv]
return same_doc
make_tensor = functools.partial(
torch.randn,
(2, 1, 128, 4),
device=device,
dtype=torch.float64,
requires_grad=requires_grad,
)
query, key, value = make_tensor(), make_tensor(), make_tensor()
func = torch.compile(flex_attention, backend=mode, fullgraph=True)
block_mask = create_block_mask(mask_mod, 2, 1, 128, 128, device=device)
out = func(query, key, value, block_mask=block_mask)
if requires_grad:
out.sum().backward()
@supported_platform
@skip_on_cpu
def test_strided_backwards(self, device):
shape = (1, 2, 4096, 64)
Q = torch.randn(shape, requires_grad=True, device=device)
K = torch.randn(shape, requires_grad=True, device=device)
V = torch.randn(shape, requires_grad=True, device=device)
func = torch.compile(flex_attention, dynamic=True, fullgraph=True)
K_sliced = K[:, :, :-128]
V_sliced = V[:, :, :-128]
out_eager = flex_attention(Q, K_sliced, V_sliced)
out_compiled, code = run_and_get_code(func, Q, K_sliced, V_sliced)
# Make sure flex attention kernels have flex_attention in name
FileCheck().check_regex("triton_tem_fused_flex_attention.*").run(code[0])
FileCheck().check_regex("triton_tem_fused_flex_attention_backward.*").run(
code[1]
)
grad = torch.rand_like(out_eager)
eager_grads = torch.autograd.grad(out_eager, (Q, K, V), grad)
compiled_grads = torch.autograd.grad(out_compiled, (Q, K, V), grad)
for eager, compiled in zip(eager_grads, compiled_grads):
torch.testing.assert_close(eager, compiled, atol=9e-3, rtol=0)
@supported_platform
@skip_on_cpu
@common_utils.parametrize("mode", ["eager", "inductor", "paged_attention"])
@common_utils.parametrize(
"permute_order",
[
(0, 1, 2, 3), # Default order
(1, 0, 2, 3), # Reverse order
(0, 2, 1, 3), # Mixed order
(2, 0, 1, 3), # Another mixed order
(0, 1, 3, 2), # Non contiguous last dim
],
)
@common_utils.parametrize("shape", [(2, 1, 128, 16), (4, 2, 64, 16)])
def test_flex_attention_stride_ordering(self, device, mode, permute_order, shape):
from torch._inductor.ir import get_stride_order
if torch.version.hip and mode == "paged_attention":
raise self.skipTest(
"TODO: figure out why mode_paged_attention_permute_order3_shape0 on MI200 caused mem fault"
)
dtype = torch.float32
# Setup
requires_grad = device in DEVICE_SUPPORTS_BACKWARDS
make_tensor = functools.partial(
torch.randn,
shape,
device=device,
dtype=dtype,
requires_grad=False if mode == "paged_attention" else requires_grad,
)
# Create and permute tensors
query, key, value = make_tensor(), make_tensor(), make_tensor()
query = query.permute(permute_order)
key = key.permute(permute_order)
value = value.permute(permute_order)
if mode == "inductor":
func = torch.compile(flex_attention, backend=mode, fullgraph=True)
out = func(query, key, value)
elif mode == "paged_attention":
out, _ = self.run_paged_attention(
_identity, query, key, value, dtype, device=device
)
else:
func = flex_attention
out = func(query, key, value)
out_stride_order = get_stride_order(out.stride())
query_stride_order = get_stride_order(query.stride())
self.assertEqual(
out_stride_order,
query_stride_order,
f"Stride order mismatch: out {out_stride_order}, query {query_stride_order}",
)
@supported_platform
@skip_on_cpu
@common_utils.parametrize("mode", ["eager", "inductor"])
@common_utils.parametrize(
"permute_order",
[(0, 1, 2, 3), (1, 0, 2, 3), (0, 2, 1, 3), (2, 0, 1, 3), (0, 1, 3, 2)],
)
@common_utils.parametrize("shape", [(2, 5, 128, 16), (4, 2, 64, 16)])
def test_flex_attention_backward_stride_ordering(
self, device, mode, permute_order, shape
):
from torch._inductor.ir import get_stride_order
dtype = torch.float32
make_tensor = functools.partial(
torch.randn, shape, device=device, dtype=dtype, requires_grad=False
)
query, key, value = make_tensor(), make_tensor(), make_tensor()
query = query.permute(permute_order)
key = key.permute(permute_order)
value = value.permute(permute_order)
query.requires_grad_()
key.requires_grad_()
value.requires_grad_()
func = (
torch.compile(flex_attention, backend=mode, fullgraph=True)
if mode == "inductor"
else flex_attention
)
out = func(query, key, value)
grad_output = torch.randn_like(out)
out.backward(grad_output)
for leaf, grad, name in [
(query, query.grad, "query"),
(key, key.grad, "key"),
(value, value.grad, "value"),
]:
input_stride_order = get_stride_order(grad.stride())
orig_stride_order = get_stride_order(leaf.stride())
self.assertEqual(
input_stride_order,
orig_stride_order,
f"Mode: {mode}, Stride order mismatch for {name}: grad {input_stride_order}, input {orig_stride_order}.",
)
@supported_platform
def test_non_contiguous_last_dim(self, device):
"""Test flex_attention with tensors having non contiguous last dimension."""
B, H, D = 4, 8, 64
dtype = torch.float16 if device in DEVICE_SUPPORTS_BACKWARDS else torch.float32
for S in [16, 64]:
def column_major_tensor():
tensor = torch.randn(
(B, H, S, D),
dtype=dtype,
device=device,
)
# Column major in last 2 dims
return tensor.transpose(-1, -2).contiguous().transpose(-1, -2)
q = column_major_tensor()
k = column_major_tensor()
v = column_major_tensor()
requires_grad = device in DEVICE_SUPPORTS_BACKWARDS
if requires_grad:
q.requires_grad_(True)
k.requires_grad_(True)
v.requires_grad_(True)
self.assertNotEqual(q.stride()[-1], 1)
self.assertNotEqual(k.stride()[-1], 1)
self.assertNotEqual(v.stride()[-1], 1)
q_ref, k_ref, v_ref = query_key_value_clones(q, k, v)
q_gold, k_gold, v_gold = query_key_value_clones(q, k, v, torch.float64)
golden_out = flex_attention(q_gold, k_gold, v_gold)
ref_out = flex_attention(q_ref, k_ref, v_ref)
flex_compiled = torch.compile(flex_attention, fullgraph=True, dynamic=True)
compiled_out = flex_compiled(q, k, v)
self._check_out(golden_out, ref_out, compiled_out)
if requires_grad:
backward_grad = torch.randn_like(ref_out)
golden_out.backward(backward_grad.to(torch.float64))
ref_out.backward(backward_grad)
compiled_out.backward(backward_grad)
self._check_out_and_grad(
golden_out,
ref_out,
compiled_out,
q_gold,
q_ref,
q,
k_gold,
k_ref,
k,
v_gold,
v_ref,
v,
)
@supported_platform
@common_utils.parametrize("compile", [True, False])
def test_fully_masked_out_rows_0_check(self, device, compile: bool):
# Ensure fully masked out rows won't cause NaNs.
requires_grad = device in DEVICE_SUPPORTS_BACKWARDS
query = torch.randn(
(B, H, S, D),
dtype=torch.float32,
device=device,
requires_grad=requires_grad,
)
key = torch.randn(
(B, H, S, D),
dtype=torch.float32,
device=device,
requires_grad=requires_grad,
)
value = torch.randn(
(B, H, S, D),
dtype=torch.float32,
device=device,
requires_grad=requires_grad,
)
M = S // 2
def mask_mod(b, h, q, kv):
return q < M
block_mask = create_block_mask(mask_mod, B, 1, S, S, device=device)
flex = (
torch.compile(flex_attention, dynamic=False) if compile else flex_attention
)
if requires_grad:
out, lse = flex(query, key, value, block_mask=block_mask, return_lse=True)
self.assertEqual(out[:, :, M:, :].sum(), 0)
self.assertTrue((lse[:, :, M:] == -float("inf")).all())
loss = out.sum() + lse.sum()
loss.backward()
self.assertEqual(query.grad[:, :, M:, :].sum(), 0)
else:
out = flex(query, key, value, block_mask=block_mask, return_lse=False)
self.assertEqual(out[:, :, M:, :].sum(), 0)
@supported_platform
def test_fully_masked_out_rows(self, device):
M = S // 2
def mask_mod(b, h, q, kv):
return q < M
block_mask = create_block_mask(mask_mod, B, 1, S, S, device=device)
def noop_mod(score, b, h, q_idx, kv_idx):
return score
self.run_test(
noop_mod, torch.float32, device, B, H, S, D, B, H, S, D, block_mask
)
@supported_platform
@skip_on_cpu
def test_kernel_options_argument_is_respected(self, device):
make_tensor = functools.partial(
torch.randn,
(2, 2, 128, 64),
device=device,
dtype=torch.float32,
requires_grad=True,
)
q, k, v = make_tensor(), make_tensor(), make_tensor()
# Ensure we respect user's input kernel options.
_, code = run_and_get_code(
torch.compile(flex_attention, fullgraph=True),
q,
k,
v,
kernel_options={"BLOCK_M": 16},
)
FileCheck().check("BLOCK_M : tl.constexpr = 16").run(code[0])
@supported_platform
@skip_on_cpu
def test_backend_auto_matches_triton_large(self, device):
"""BACKEND='AUTO' should follow Triton heuristics on large shapes."""
make_tensor = functools.partial(
torch.randn,
(2, 2, 256, 64),
device=device,
dtype=torch.float16,
requires_grad=False,
)
q, k, v = make_tensor(), make_tensor(), make_tensor()
def compile_and_run(kernel_options):
return run_and_get_code(
torch.compile(flex_attention, fullgraph=True),
q,
k,
v,
kernel_options=kernel_options,
)
default_out, default_code = compile_and_run({"BACKEND": "AUTO"})
triton_out, triton_code = compile_and_run({"BACKEND": "TRITON"})
torch.testing.assert_close(default_out, triton_out, atol=0.0, rtol=0.0)
default_src = "\n".join(default_code)
FileCheck().check("flex_attention").check_not("flex_decoding").run(default_src)
triton_src = "\n".join(triton_code)
FileCheck().check("flex_attention").check_not("flex_decoding").run(triton_src)
@supported_platform
@skip_on_cpu
def test_backend_triton_decode_matches_auto(self, device):
"""BACKEND='TRITON_DECODE' should match heuristics on decode-friendly shapes."""
make_tensor = functools.partial(
torch.randn,
(1, 2, 64, 64),
device=device,
dtype=torch.float16,
requires_grad=False,
)
q, k, v = make_tensor(), make_tensor(), make_tensor()
def compile_and_run(kernel_options):
return run_and_get_code(
torch.compile(flex_attention, fullgraph=True),
q,
k,
v,
kernel_options=kernel_options,
)
from torch._inductor.kernel.flex import flex_attention as flex_kernel_mod
with mock.patch.object(
flex_kernel_mod,
"create_flex_decoding_kernel",
wraps=flex_kernel_mod.create_flex_decoding_kernel,
) as decode_kernel:
default_out, _ = compile_and_run({"BACKEND": "AUTO"})
self.assertTrue(
decode_kernel.called,
"Expected heuristics to dispatch to flex decoding kernel.",
)
with mock.patch.object(
flex_kernel_mod,
"create_flex_decoding_kernel",
wraps=flex_kernel_mod.create_flex_decoding_kernel,
) as decode_kernel:
decode_out, _ = compile_and_run({"BACKEND": "TRITON_DECODE"})
self.assertTrue(
decode_kernel.called,
"Expected explicit BACKEND='TRITON_DECODE' to use flex decoding kernel.",
)
self.assertEqual(decode_out.shape, (1, 2, 64, 64))
torch.testing.assert_close(default_out, decode_out, atol=3e-3, rtol=3e-3)
@supported_platform
@skip_on_cpu
def test_backend_triton_decode_errors_when_not_supported(self, device):
"""Requesting decode on unsupported shapes should raise a helpful error."""
make_tensor = functools.partial(
torch.randn,
(1, 2, 256, 64),
device=device,
dtype=torch.float16,
requires_grad=False,
)
q, k, v = make_tensor(), make_tensor(), make_tensor()
flex_compiled = torch.compile(flex_attention, fullgraph=True)
with self.assertRaisesRegex(
RuntimeError,
r"BACKEND='TRITON_DECODE' was specified but flex_decoding cannot be used",
):
flex_compiled(q, k, v, kernel_options={"BACKEND": "TRITON_DECODE"})
@supported_platform
@skip_on_cpu
def test_backend_triton_decode_errors_with_non_power_of_two_gqa(self, device):
"""BACKEND='TRITON_DECODE' should fail when GQA ratio is not a power of two."""
q = torch.randn(
1, 3, 64, 64, device=device, dtype=torch.float16, requires_grad=False
)
k = torch.randn(
1, 1, 64, 64, device=device, dtype=torch.float16, requires_grad=False
)
v = torch.randn(
1, 1, 64, 64, device=device, dtype=torch.float16, requires_grad=False
)
flex_compiled = torch.compile(flex_attention, fullgraph=True)
with self.assertRaisesRegex(
RuntimeError,
r"BACKEND='TRITON_DECODE' was specified but flex_decoding cannot be used",
):
flex_compiled(
q,
k,
v,
enable_gqa=True,
kernel_options={"BACKEND": "TRITON_DECODE"},
)
@supported_platform
@skip_on_cpu
def test_backend_rejects_legacy_force_use_flag(self, device):
"""Combining BACKEND with FORCE_USE_FLEX_ATTENTION should raise an error."""
make_tensor = functools.partial(
torch.randn,
(2, 2, 128, 64),
device=device,
dtype=torch.float16,
requires_grad=False,
)
q, k, v = make_tensor(), make_tensor(), make_tensor()
flex_compiled = torch.compile(flex_attention, fullgraph=True)
with self.assertRaisesRegex(
RuntimeError,
r"BACKEND cannot be combined with legacy FORCE_USE_FLEX_ATTENTION",
):
flex_compiled(
q,
k,
v,
kernel_options={
"BACKEND": "TRITON",
"FORCE_USE_FLEX_ATTENTION": True,
},
)
@supported_platform
def test_backend_defaults_and_rejects_invalid(self, device):
device = torch.device(device)
query = torch.randn(1, 1, 4, 8, device=device, dtype=torch.float32)
key = torch.randn(1, 1, 4, 8, device=device, dtype=torch.float32)
value = torch.randn(1, 1, 4, 8, device=device, dtype=torch.float32)
kernel_options = _apply_kernel_options(
query, key, value, return_lse=True, kernel_options={}
)
self.assertEqual(kernel_options["BACKEND"], "AUTO")
with self.assertRaisesRegex(ValueError, r"Invalid BACKEND value 'INVALID'"):
_apply_kernel_options(
query,
key,
value,
return_lse=True,
kernel_options={"BACKEND": "INVALID"},
)
@supported_platform
def test_block_mask_non_divisible(self, device):
seq = torch.arange(1023, device=device) // 128
def mod(b, h, q, kv):
return seq[q] == seq[kv]
block_mask = create_block_mask(mod, None, None, 1023, 1023, device=device)
torch.compile(create_block_mask)(mod, None, None, 1023, 1023, device=device)
self.run_test_with_call(
lambda q, k, v: flex_attention(q, k, v, block_mask=block_mask),
torch.float16,
device,
Q_S=1023,
KV_S=1023,
)
@supported_platform
def test_causal_block_non_divisible(self, device):
def mask_mod(b, h, q, kv):
return q >= kv
block_mask = create_block_mask(mask_mod, B, 1, S - 1, S - 1, device=device)
attention = functools.partial(flex_attention, block_mask=block_mask)
self.run_test_with_call(attention, torch.float16, device, Q_S=S - 1, KV_S=S - 1)
@supported_platform
@skip_on_cpu
def test_modular_indexing(self, device):
B, H, N, D = 100, 12, 128, 64
dtype = torch.bfloat16
device = torch.device(device)
class Attention(torch.nn.Module):
def __init__(self):
super().__init__()
self.bias = torch.randn(B, N, N, H, device=device, dtype=dtype)
def forward(
self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor
) -> torch.Tensor:
score_mod = generate_score_mod(self.bias)
o = flex_attention(q, k, v, score_mod=score_mod)
return o
def generate_score_mod(bias):
bias = (2 * bias).view(B, H, N, N).contiguous()
def score_mod(score, batch, head, q_idx, k_idx):
attn_bias = bias[batch, head, q_idx, k_idx]
return score + attn_bias
return score_mod
m = Attention().to(device).eval().to(dtype)
m = torch.compile(m, mode="default", fullgraph=False)
q = torch.randn(B, H, N, D, device=device, dtype=dtype)
k = torch.randn(B, H, N, D, device=device, dtype=dtype)
v = torch.randn(B, H, N, D, device=device, dtype=dtype)
m(q, k, v)
@supported_platform
@skip_on_cpu
def test_force_write_lse(self, device):
dtype = torch.float32
make_tensor = functools.partial(
torch.randn,
(2, 2, 128, 16),
device=device,
dtype=dtype,
requires_grad=False,
)
query, key, value = make_tensor(), make_tensor(), make_tensor()
out_eager, lse_eager = flex_attention(query, key, value, return_lse=True)
flex_compile = torch.compile(flex_attention)
out_compiled, lse_compiled = flex_compile(query, key, value, return_lse=True)
out_paged, lse_paged = self.run_paged_attention(
score_mod=_identity, q=query, k=key, v=value, dtype=dtype, device=device
)
torch.testing.assert_close(lse_eager, lse_compiled, atol=3e-3, rtol=0)
requires_grad = device in DEVICE_SUPPORTS_BACKWARDS
if requires_grad:
torch.testing.assert_close(lse_eager, lse_paged, atol=3e-3, rtol=0)
@supported_platform
@skip_on_cpu
@common_utils.parametrize("backend", ["flex_attention", "flex_decode", "eager"])
def test_lse_masked_output(self, device, backend):
if backend == "flex_decode":
kernel_options = {"FORCE_USE_FLEX_ATTENTION": False}
flex_call = torch.compile(flex_attention, fullgraph=True)
N_CTX = 96
elif backend == "flex_attention":
kernel_options = {"FORCE_USE_FLEX_ATTENTION": True}
flex_call = torch.compile(flex_attention, fullgraph=True)
N_CTX = 196
else:
kernel_options = {}
flex_call = flex_attention
N_CTX = 196
SLIDING_WINDOW = 64
make_tensor = functools.partial(
torch.randn,
(2, 2, N_CTX, 64),
device=device,
dtype=torch.float32,
requires_grad=True,
)
def sliding_window_causal(b, h, q_idx, kv_idx):
causal_mask = q_idx >= kv_idx
window_mask = q_idx - kv_idx <= SLIDING_WINDOW
return causal_mask & window_mask
def global_causal(b, h, q_idx, kv_idx):
causal_mask = q_idx >= kv_idx
window_mask = q_idx - kv_idx > SLIDING_WINDOW
return causal_mask & window_mask
sliding_window_causal = torch.nn.attention.flex_attention.create_block_mask(
sliding_window_causal,
B=None,
H=None,
Q_LEN=N_CTX,
KV_LEN=N_CTX,
device=device,
)
global_causal = torch.nn.attention.flex_attention.create_block_mask(
global_causal, B=None, H=None, Q_LEN=N_CTX, KV_LEN=N_CTX, device=device
)
local_attn = functools.partial(
flex_call,
block_mask=sliding_window_causal,
return_lse=True,
kernel_options=kernel_options,
)
global_attn = functools.partial(
flex_call,
block_mask=global_causal,
return_lse=True,
kernel_options=kernel_options,
)
q, k, v = make_tensor(), make_tensor(), make_tensor()
gradOut = make_tensor(requires_grad=False)
x_local, lse_local = local_attn(q, k, v)
x_global, lse_global = global_attn(q, k, v)
max_lse = torch.maximum(lse_local, lse_global)
lse_global = lse_global - max_lse
lse_local = lse_local - max_lse
lse_global = torch.exp(lse_global)
lse_local = torch.exp(lse_local)
x = ((x_local * lse_local[..., None]) + (x_global * lse_global[..., None])) / (
lse_global[..., None] + lse_local[..., None]
)
x.backward(gradOut)
flex_q_grad, flex_k_grad, flex_v_grad = q.grad, k.grad, v.grad
q.grad = None
k.grad = None
v.grad = None
out = torch.nn.functional.scaled_dot_product_attention(q, k, v, is_causal=True)
out.backward(gradOut)
torch.testing.assert_close(x, out, atol=3e-3, rtol=2e-3)
torch.testing.assert_close(flex_q_grad, q.grad, atol=3e-3, rtol=2e-3)
torch.testing.assert_close(flex_k_grad, k.grad, atol=3e-3, rtol=2e-3)
torch.testing.assert_close(flex_v_grad, v.grad, atol=3e-3, rtol=2e-3)
@supported_platform
@skip_on_cpu
def test_mixed_device_error_message(self, device):
# Create tensors on different devices
cpu_tensor = torch.randn(2, 2, 128, 16, device="cpu")
gpu_tensor = torch.randn(2, 2, 128, 16, device=device)
# Use different devices for query, key, and value
query, key, value = cpu_tensor, gpu_tensor, cpu_tensor
expected_error_message = (
"Expected query, key, and value to have the same device type, "
f"but got query.device: {query.device}, key.device: {key.device}, "
f"and value.device: {value.device} instead."
)
with self.assertRaisesRegex(ValueError, expected_error_message):
flex_attention(query, key, value)
@supported_platform
@skip_on_cpu
def test_captured_wrong_device_error_message(self, device):
means = torch.randn(64, 3, device=device)
length_scales = torch.logspace(0.001, 0.1, 8, device="cpu")
def euclidean_dist_pos_embed(score, b, h, q_idx, k_idx):
q_pos = means[q_idx]
k_pos = means[k_idx]
dist = (q_pos - k_pos).pow(2).sum(-1).sqrt()
scale = length_scales[h]
inv_dist = torch.exp(-dist / scale)
return inv_dist * score
expected_error_message = "Buffers cannot be created"
q, k, v = (torch.randn(1, 8, 64, 64, device=device) for _ in range(3))
with self.assertRaisesRegex(RuntimeError, expected_error_message):
torch.compile(flex_attention)(q, k, v, score_mod=euclidean_dist_pos_embed)
@supported_platform
@skip_on_cpu
def test_cant_lower_error_message(self, device):
# We can't lower a 256-element reduction inside a pointwise reduction
means = torch.randn(64, 256, device=device)
length_scales = torch.logspace(0.001, 0.1, 8, device=device)
def euclidean_dist_pos_embed(score, b, h, q_idx, k_idx):
q_pos = means[q_idx]
k_pos = means[k_idx]
dist = (q_pos - k_pos).pow(2).sum(-1).sqrt()
scale = length_scales[h]
inv_dist = torch.exp(-dist / scale)
return inv_dist * score
expected_error_message = "Buffers cannot be created"
q, k, v = (torch.randn(1, 8, 64, 64, device=device) for _ in range(3))
with self.assertRaisesRegex(RuntimeError, expected_error_message):
torch.compile(flex_attention)(q, k, v, score_mod=euclidean_dist_pos_embed)
@supported_platform
@skip_on_cpu
def test_reduction_unrolled(self, device):
# We can't lower a 256-element reduction inside a pointwise reduction
means = torch.randn(S, 3, device=device)
length_scales = torch.logspace(0.001, 0.1, H, device=device)
def euclidean_dist_pos_embed(score, b, h, q_idx, k_idx):
q_pos = means[q_idx]
k_pos = means[k_idx]
dist = (q_pos - k_pos).pow(2).sum(-1).sqrt()
scale = length_scales[h]
inv_dist = torch.exp(-dist / scale)
return inv_dist * score
self.run_test(euclidean_dist_pos_embed, torch.bfloat16, device=device)
@supported_platform
@skip_on_cpu
def test_invalid_block_size(self, device):
# Create tensors on different devices
q, k, v = (torch.randn(1, 8, 128, 64, device=device) for _ in range(3))
expected_error_message = (
"ValueError: Q and KV block size must be divisible by BLOCK_M and BLOCK_N."
)
block_mask = create_block_mask(
noop_mask, 1, 8, 128, 128, BLOCK_SIZE=96, device=device
)
with self.assertRaisesRegex(RuntimeError, expected_error_message):
torch.compile(flex_attention)(q, k, v, block_mask=block_mask)
@supported_platform
@skip_on_cpu
def test_small_q_kv_len(self, device):
make_tensor = functools.partial(
torch.ones,
(1, 1, 1, 16),
device=device,
dtype=torch.float32,
requires_grad=True,
)
query, key, value = make_tensor(), make_tensor(), make_tensor()
kernel_options = {"FORCE_USE_FLEX_ATTENTION": True}
out_eager, lse_eager = flex_attention(
query, key, value, return_lse=True, kernel_options=kernel_options
)
flex_compile = torch.compile(flex_attention, fullgraph=True)
out_compiled, lse_compiled = flex_compile(
query, key, value, return_lse=True, kernel_options=kernel_options
)
assert torch.equal(out_eager, out_compiled)
assert torch.equal(lse_eager, lse_compiled)
grads_eager = torch.autograd.grad(out_eager.sum(), (query, key, value))
grads_compile = torch.autograd.grad(out_compiled.sum(), (query, key, value))
torch.testing.assert_close(grads_eager, grads_compile)
@supported_platform
@skip_on_cpu
def test_dynamic_shapes_bug_dynamic_batch(self, device):
def _flex_attention_mask(b, h, q_idx, kv_idx, input_lengths):
padding_condition = (q_idx < input_lengths[b]) & (kv_idx < input_lengths[b])
return padding_condition
counter = CompileCounterWithBackend("inductor")
class Model(torch.nn.Module):
def __init__(self, dim=1024):
super().__init__()
self.subsampler = torch.nn.Conv1d(256, 256, 5)
self.projector = torch.nn.Linear(256, dim)
self.num_heads = 4
def forward(self, x, input_lengths):
x = self.subsampler(x.transpose(-1, -2)).transpose(-1, -2)
x = self.projector(x).transpose(0, 1)
head_dim = x.size(-1) // self.num_heads
x = x.view(-1, x.size(1), self.num_heads, head_dim)
x = x.permute(1, 2, 0, 3)
max_time = x.size(-2)
mask = torch.compile(create_block_mask, dynamic=True, fullgraph=False)(
functools.partial(
_flex_attention_mask,
input_lengths=input_lengths,
),
B=input_lengths.size(0),
H=None,
Q_LEN=max_time,
KV_LEN=max_time,
device=device,
)
x = torch.compile(
flex_attention, dynamic=True, fullgraph=True, backend=counter
)(
query=x,
key=x,
value=x,
block_mask=mask,
)
return x
model = Model(128).to(device)
B, F, T = 16, 256, 12
for _ in range(5):
x = torch.randn(B, T, F, device=device)
l = torch.randint(0, T, (B,), device=device)
model(x, l)
assert counter.frame_count == 1, (
f"Expected 1 graph, but got {counter.frame_count} graphs"
)
@supported_platform
@skip_on_cpu
def test_dynamic_shapes_with_custom_kernel_options(self, device):
make_tensor = functools.partial(
torch.ones,
(8, 8, 1024, 64),
device=device,
dtype=torch.bfloat16,
)
query, key, value = make_tensor(), make_tensor(), make_tensor()
kernel_options = {"BLOCK_M": 64, "BLOCK_N": 64}
out_eager = flex_attention(query, key, value, kernel_options=kernel_options)
flex_compile = torch.compile(flex_attention, fullgraph=True, dynamic=True)
out_compiled = flex_compile(query, key, value, kernel_options=kernel_options)
torch.testing.assert_close(out_eager, out_compiled, atol=3e-3, rtol=2e-3)
@supported_platform
def test_dynamic_shapes_with_max_autotune(self, device):
make_tensor = functools.partial(
torch.ones,
(8, 8, 1024, 64),
device=device,
dtype=torch.float if device == "cpu" else torch.bfloat16,
)
query, key, value = make_tensor(), make_tensor(), make_tensor()
block_mask = create_block_mask(
_causal_mask, None, None, 1024, 1024, device=device
)
out_eager = flex_attention(query, key, value, block_mask=block_mask)
flex_compile = torch.compile(
flex_attention, fullgraph=True, dynamic=True, mode="max-autotune"
)
out_compiled = flex_compile(query, key, value, block_mask=block_mask)
torch.testing.assert_close(out_eager, out_compiled, atol=3e-3, rtol=2e-3)
@supported_platform
@skip_on_cpu
def test_zero_length_sequence_error(self, device):
make_tensor = functools.partial(
torch.ones,
(8, 8, 0, 64), # Zero in sequence dimension
device=device,
dtype=torch.bfloat16,
)
query, key, value = make_tensor(), make_tensor(), make_tensor()
# Test compiled mode - should also raise assertion error
flex_compile = torch.compile(flex_attention, fullgraph=True)
with self.assertRaisesRegex(
torch._inductor.exc.InductorError, "Query length must be greater than 0"
):
flex_compile(query, key, value)
@supported_platform
def test_causal_block_non_divisible_with_captured_buffer(
self,
device,
):
Q_S = S - 3
KV_S = S - 3
offset_q = torch.randn(Q_S, device=device, dtype=torch.bfloat16)
offset_kv = torch.randn(KV_S, device=device, dtype=torch.bfloat16)
def score_mod(score, b, h, q, kv):
return score + offset_q[q] + offset_kv[kv]
def mask_mod(b, h, q, kv):
return q >= kv
block_mask = create_block_mask(mask_mod, B, 1, Q_S, KV_S, device=device)
attention = functools.partial(flex_attention, block_mask=block_mask)
self.run_test_with_call(
attention, Q_S=Q_S, KV_S=KV_S, dtype=torch.bfloat16, device=device
)
@supported_platform
def test_non_divisible_with_captured_buffer(self, device):
Q_S = S + 3
KV_S = S + 3
multiplier = torch.randn(Q_S, device=device, dtype=torch.bfloat16)
def apply_multiplicative_bias(score, b, h, q_idx, kv_idx):
return score * multiplier[q_idx]
attention = functools.partial(
flex_attention, score_mod=apply_multiplicative_bias
)
self.run_test_with_call(
attention, Q_S=Q_S, KV_S=KV_S, dtype=torch.bfloat16, device=device
)
@supported_platform
def test_num_warps_8_error(self, device):
attention = functools.partial(flex_attention, score_mod=_identity)
self.run_test_with_call(
attention,
dtype=torch.float16,
device=device,
Q_S=128,
KV_S=128,
Q_D=128,
V_D=128,
)
@supported_platform
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_qkv_and_block_mask_on_the_same_device(self, device):
make_tensor = functools.partial(
torch.ones,
(2, 2, 256, 32),
device="cuda:0",
dtype=torch.float32,
requires_grad=True,
)
query, key, value = make_tensor(), make_tensor(), make_tensor()
def mask_mod(b, h, q, kv):
return q >= kv
block_mask = create_block_mask(mask_mod, 1, 1, 256, 256, device="cuda:1")
with self.assertRaisesRegex(
RuntimeError, "Expect q/k/v and block_mask to be on the same device"
):
torch.compile(flex_attention)(query, key, value, block_mask=block_mask)
@supported_platform
@skip_on_cpu
@unittest.skipIf(config.triton.native_matmul, "different dynamo counters")
def test_free_symbol_dynamic(self, device):
def batch_flip_causal(b, h, q_idx, kv_idx):
return (q_idx >= kv_idx) & (b % 2 == 0)
class SimpleAttention(torch.nn.Module):
def __init__(self, dim=512, n_head=8):
super().__init__()
self.qkv = torch.nn.Linear(dim, 3 * dim)
self.n_head = n_head
self.head_dim = dim // n_head
def forward(self, x, block_mask=None):
B, T, C = x.size()
qkv = self.qkv(x).view(B, T, 3, self.n_head, self.head_dim)
qkv = qkv.permute(2, 0, 3, 1, 4)
q, k, v = qkv
y = flex_attention(
q,
k,
v,
block_mask=block_mask,
)
return y.transpose(1, 2).contiguous().view(B, T, C)
model = SimpleAttention().to(device)
model.compile(mode="default", dynamic=True)
sequence_len = 256
# Test different batch shapes with dense masks
torch._dynamo.reset()
for batch_shape in [4, 16, 32]:
# Create dense mask
rand_mask = torch.randint(
0, 2, (batch_shape, sequence_len), device=device
).bool()
block_mask = torch.compile(create_block_mask, dynamic=True)(
B=batch_shape,
BLOCK_SIZE=128,
mask_mod=lambda b, h, q_idx, kv_idx: ~rand_mask[b, q_idx],
H=None,
Q_LEN=sequence_len,
KV_LEN=sequence_len,
device=device,
)
# Run forward pass
x = torch.randn(batch_shape, sequence_len, 512, device=device)
model(x, block_mask=block_mask)
self.assertEqual(torch._dynamo.utils.counters["aot_autograd"]["ok"], 2)
@supported_platform
@skip_on_cpu
def test_symbol_closure_in_score_mod(self, device):
class SimpleAttention(torch.nn.Module):
def __init__(self, dim=512, n_head=8):
super().__init__()
self.qkv = torch.nn.Linear(dim, 3 * dim)
self.n_head = n_head
self.head_dim = dim // n_head
def forward(self, x, block_mask=None):
B, T, C = x.size()
qkv = self.qkv(x).view(B, T, 3, self.n_head, self.head_dim)
qkv = qkv.permute(2, 0, 3, 1, 4)
q, k, v = qkv
return flex_attention(
q,
k,
v,
score_mod=lambda s, b, h, q, k: s + B,
block_mask=block_mask,
)
model = SimpleAttention().to(device)
from torch._dynamo.testing import EagerAndRecordGraphs
backend = EagerAndRecordGraphs()
model.compile(mode="default", dynamic=True, backend=backend)
sequence_len = 256
torch._dynamo.reset()
for batch_shape in [4, 16, 32]:
x = torch.randn(batch_shape, sequence_len, 512, device=device)
model(x)
self.assertEqual(len(backend.graphs), 1)
self.assertExpectedInline(
backend.graphs[0].score_mod_0.code.strip(),
"""\
def forward(self, child : torch.Tensor, child_1 : torch.Tensor, child_2 : torch.Tensor, child_3 : torch.Tensor, child_4 : torch.Tensor, getitem : torch.SymInt):
add = child + getitem; child = getitem = None
return add""",
)
@supported_platform
@skip_on_cpu
def test_fw_bw_graph_correctness(self, device):
cnt = CompileCounterWithBackend("aot_eager")
make_tensor = functools.partial(
torch.randn,
(2, 2, 128, 4),
device=device,
dtype=torch.float64,
requires_grad=True,
)
query, key, value = make_tensor(), make_tensor(), make_tensor()
def causal_mask(b, h, q_idx, kv_idx):
return q_idx >= kv_idx
block_mask = create_block_mask(causal_mask, 1, 1, 128, 128, device=device)
func = torch.compile(flex_attention, backend=cnt, fullgraph=True)
out = func(query, key, value, _squared, block_mask=block_mask)
out.sum().backward()
self.assertEqual(cnt.frame_count, 1)
self.assertEqual(len(cnt.graphs), 1)
graph = cnt.graphs[0]
norm_graph = normalize_gm(graph.print_readable(print_output=False))
self.assertExpectedInline(
norm_graph,
"""\
|
TestFlexAttention
|
python
|
huggingface__transformers
|
src/transformers/models/whisper/modeling_whisper.py
|
{
"start": 56803,
"end": 62321
}
|
class ____(WhisperPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"proj_out.weight": "model.decoder.embed_tokens.weight"}
main_input_name = "input_ids"
def __init__(self, config):
super().__init__(config)
config.is_encoder_decoder = False
self.model = WhisperDecoderWrapper(config)
self.proj_out = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.proj_out
def set_output_embeddings(self, new_embeddings):
self.proj_out = new_embeddings
def get_input_embeddings(self) -> nn.Module:
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[tuple[torch.FloatTensor]] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> Union[tuple, CausalLMOutputWithCrossAttentions]:
r"""
encoder_outputs (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import WhisperForCausalLM, WhisperForConditionalGeneration, WhisperProcessor
>>> import torch
>>> from datasets import load_dataset
>>> processor = WhisperProcessor.from_pretrained("openai/whisper-large-v2")
>>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large-v2")
>>> assistant_model = WhisperForCausalLM.from_pretrained("distil-whisper/distil-large-v2")
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> sample = ds[0]["audio"]
>>> input_features = processor(
... sample["array"], sampling_rate=sample["sampling_rate"], return_tensors="pt"
... ).input_features
>>> predicted_ids = model.generate(input_features, assistant_model=assistant_model)
>>> # decode token ids to text
>>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
>>> transcription
' Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel.'
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# If the user passed a tuple or `BaseModelOutput` for encoder_outputs, we extract only the hidden states
if isinstance(encoder_outputs, (BaseModelOutput, tuple, list)):
encoder_outputs = encoder_outputs[0]
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=encoder_outputs,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
logits = self.proj_out(outputs[0])
loss = None
if labels is not None:
labels = labels.to(logits.device)
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
@auto_docstring(
custom_intro="""
Whisper Encoder Model with a sequence classification head on top (a linear layer over the pooled output) for tasks
like SUPERB Keyword Spotting.
"""
)
|
WhisperForCausalLM
|
python
|
google__jax
|
tests/lax_numpy_test.py
|
{
"start": 243038,
"end": 251505
}
|
class ____(jtu.JaxTestCase):
def testWrappedSignaturesMatch(self):
"""Test that jax.numpy function signatures match numpy."""
# NumPy functions explicitly not implemented in JAX:
skip = {'array2string',
'asanyarray',
'asarray_chkfinite',
'ascontiguousarray',
'asfortranarray',
'asmatrix',
'base_repr',
'binary_repr',
'bmat',
'broadcast',
'busday_count',
'busday_offset',
'busdaycalendar',
'common_type',
'copyto',
'datetime_as_string',
'datetime_data',
'errstate',
'flatiter',
'format_float_positional',
'format_float_scientific',
'fromregex',
'genfromtxt',
'get_include',
'getbufsize',
'geterr',
'geterrcall',
'in1d',
'info',
'is_busday',
'isfortran',
'isnat',
'loadtxt',
'matrix',
'may_share_memory',
'memmap',
'min_scalar_type',
'mintypecode',
'ndenumerate',
'ndindex',
'nditer',
'nested_iters',
'poly1d',
'putmask',
'real_if_close',
'recarray',
'record',
'require',
'row_stack',
'savetxt',
'savez_compressed',
'setbufsize',
'seterr',
'seterrcall',
'shares_memory',
'show_config',
'show_runtime',
'test',
'trapz',
'typename'}
# symbols removed in NumPy 2.0
skip |= {'add_docstring',
'add_newdoc',
'add_newdoc_ufunc',
'alltrue',
'asfarray',
'byte_bounds',
'compare_chararrays',
'cumproduct',
'deprecate',
'deprecate_with_doc',
'disp',
'fastCopyAndTranspose',
'find_common_type',
'get_array_wrap',
'geterrobj',
'issctype',
'issubclass_',
'issubsctype',
'lookfor',
'mat',
'maximum_sctype',
'msort',
'obj2sctype',
'product',
'recfromcsv',
'recfromtxt',
'round_',
'safe_eval',
'sctype2char',
'set_numeric_ops',
'set_string_function',
'seterrobj',
'sometrue',
'source',
'who'}
self.assertEmpty(skip.intersection(dir(jnp)))
names = (name for name in dir(np) if not (name.startswith('_') or name in skip))
names = (name for name in names if callable(getattr(np, name)))
names = {name for name in names if not isinstance(getattr(np, name), type)}
self.assertEmpty(names.difference(dir(jnp)))
self.assertNotEmpty(names)
# TODO(jakevdp): fix some of the following signatures. Some are due to wrong argument names.
unsupported_params = {
'argpartition': ['kind', 'order'],
'asarray': ['like'],
'broadcast_to': ['subok'],
'clip': ['kwargs', 'out'],
'copy': ['subok'],
'corrcoef': ['ddof', 'bias'],
'cumulative_prod': ['out'],
'cumulative_sum': ['out'],
'empty_like': ['subok', 'order'],
'einsum': ['kwargs'],
'einsum_path': ['einsum_call'],
'eye': ['order', 'like'],
'hstack': ['casting'],
'identity': ['like'],
'isin': ['kind'],
'full': ['order', 'like'],
'full_like': ['subok', 'order'],
'fromfunction': ['like'],
'load': ['mmap_mode', 'allow_pickle', 'fix_imports', 'encoding', 'max_header_size'],
'nanpercentile': ['weights'],
'nanquantile': ['weights'],
'nanstd': ['correction'],
'nanvar': ['correction'],
'ones': ['order', 'like'],
'ones_like': ['subok', 'order'],
'partition': ['kind', 'order'],
'percentile': ['weights'],
'quantile': ['weights'],
'row_stack': ['casting'],
'stack': ['casting'],
'tri': ['like'],
'vstack': ['casting'],
'zeros_like': ['subok', 'order']
}
extra_params = {
'compress': ['size', 'fill_value'],
'einsum': ['subscripts', 'precision'],
'einsum_path': ['subscripts'],
'fill_diagonal': ['inplace'],
'load': ['args', 'kwargs'],
'take_along_axis': ['mode', 'fill_value'],
'unique': ['size', 'fill_value'],
}
mismatches = {}
for name in names:
jnp_fun = getattr(jnp, name)
np_fun = getattr(np, name)
if name in ['histogram', 'histogram2d', 'histogramdd']:
# numpy 1.24 re-orders the density and weights arguments.
# TODO(jakevdp): migrate histogram APIs to match newer numpy versions.
continue
if name == "clip":
# JAX's support of the Array API spec for clip, and the way it handles
# backwards compatibility was introduced in
# https://github.com/jax-ml/jax/pull/20550 with a different signature
# from the one in numpy, introduced in
# https://github.com/numpy/numpy/pull/26724
# TODO(dfm): After our deprecation period for the clip arguments ends
# it should be possible to reintroduce the check.
continue
if name == "reshape":
# Similar issue to clip: we'd need logic specific to the NumPy version
# because of the change in argument name from `newshape` to `shape`.
continue
# Note: can't use inspect.getfullargspec for some functions due to numpy issue
# https://github.com/numpy/numpy/issues/12225
try:
np_params = inspect.signature(np_fun).parameters
except ValueError:
continue
jnp_params = inspect.signature(jnp_fun).parameters
extra = set(extra_params.get(name, []))
unsupported = set(unsupported_params.get(name, []))
# Checks to prevent tests from becoming out-of-date. If these fail,
# it means that extra_params or unsupported_params need to be updated.
assert extra.issubset(jnp_params), f"{name}: {extra=} is not a subset of jnp_params={set(jnp_params)}."
assert not unsupported.intersection(jnp_params), f"{name}: {unsupported=} overlaps with jnp_params={set(jnp_params)}."
# Skip functions that only have *args and **kwargs; we can't introspect these further.
var_args = (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD)
if all(p.kind in var_args for p in jnp_params.values()):
continue
if all(p.kind in var_args for p in np_params.values()):
continue
# Remove known extra parameters.
jnp_params = {a: p for a, p in jnp_params.items() if a not in extra}
# Remove known unsupported parameters.
np_params = {a: p for a, p in np_params.items() if a not in unsupported}
# Older versions of numpy may have fewer parameters; to avoid extraneous errors on older numpy
# versions, we allow for jnp to have more parameters.
if list(jnp_params)[:len(np_params)] != list(np_params):
mismatches[name] = {'np_params': list(np_params), 'jnp_params': list(jnp_params)}
self.assertEqual(mismatches, {})
_available_numpy_dtypes: list[str] = [dtype.__name__ for dtype in jtu.dtypes.all
if dtype != dtypes.bfloat16]
# TODO(jakevdp): implement missing ufuncs.
UNIMPLEMENTED_UFUNCS = {'spacing', 'matvec', 'vecmat'}
def _all_numpy_ufuncs() -> Iterator[str]:
"""Generate the names of all ufuncs in the top-level numpy namespace."""
for name in dir(np):
f = getattr(np, name)
if isinstance(f, np.ufunc) and name not in UNIMPLEMENTED_UFUNCS:
yield name
def _dtypes_for_ufunc(name: str) -> Iterator[tuple[str, ...]]:
"""Generate valid dtypes of inputs to the given numpy ufunc."""
func = getattr(np, name)
for arg_dtypes in itertools.product(_available_numpy_dtypes, repeat=func.nin):
args = (np.ones(1, dtype=dtype) for dtype in arg_dtypes)
try:
with jtu.ignore_warning(
category=RuntimeWarning, message="(divide by zero|invalid value)"):
_ = func(*args)
except TypeError:
pass
else:
yield arg_dtypes
|
NumpySignaturesTest
|
python
|
Textualize__textual
|
src/textual/widgets/_data_table.py
|
{
"start": 8111,
"end": 107851
}
|
class ____(ScrollView, Generic[CellType], can_focus=True):
"""A tabular widget that contains data."""
BINDINGS: ClassVar[list[BindingType]] = [
Binding("enter", "select_cursor", "Select", show=False),
Binding("up", "cursor_up", "Cursor up", show=False),
Binding("down", "cursor_down", "Cursor down", show=False),
Binding("right", "cursor_right", "Cursor right", show=False),
Binding("left", "cursor_left", "Cursor left", show=False),
Binding("pageup", "page_up", "Page up", show=False),
Binding("pagedown", "page_down", "Page down", show=False),
Binding("ctrl+home", "scroll_top", "Top", show=False),
Binding("ctrl+end", "scroll_bottom", "Bottom", show=False),
Binding("home", "scroll_home", "Home", show=False),
Binding("end", "scroll_end", "End", show=False),
]
"""
| Key(s) | Description |
| :- | :- |
| enter | Select cells under the cursor. |
| up | Move the cursor up. |
| down | Move the cursor down. |
| right | Move the cursor right. |
| left | Move the cursor left. |
| pageup | Move one page up. |
| pagedown | Move one page down. |
| ctrl+home | Move to the top. |
| ctrl+end | Move to the bottom. |
| home | Move to the home position (leftmost column). |
| end | Move to the end position (rightmost column). |
"""
COMPONENT_CLASSES: ClassVar[set[str]] = {
"datatable--cursor",
"datatable--hover",
"datatable--fixed",
"datatable--fixed-cursor",
"datatable--header",
"datatable--header-cursor",
"datatable--header-hover",
"datatable--odd-row",
"datatable--even-row",
}
"""
| Class | Description |
| :- | :- |
| `datatable--cursor` | Target the cursor. |
| `datatable--hover` | Target the cells under the hover cursor. |
| `datatable--fixed` | Target fixed columns and fixed rows. |
| `datatable--fixed-cursor` | Target highlighted and fixed columns or header. |
| `datatable--header` | Target the header of the data table. |
| `datatable--header-cursor` | Target cells highlighted by the cursor. |
| `datatable--header-hover` | Target hovered header or row label cells. |
| `datatable--even-row` | Target even rows (row indices start at 0) if zebra_stripes. |
| `datatable--odd-row` | Target odd rows (row indices start at 0) if zebra_stripes. |
"""
DEFAULT_CSS = """
DataTable {
background: $surface;
color: $foreground;
height: auto;
max-height: 100%;
&.datatable--fixed-cursor {
background: $block-cursor-blurred-background;
}
&:focus {
background-tint: $foreground 5%;
& > .datatable--cursor {
background: $block-cursor-background;
color: $block-cursor-foreground;
text-style: $block-cursor-text-style;
}
& > .datatable--header {
background-tint: $foreground 5%;
}
& > .datatable--fixed-cursor {
color: $block-cursor-foreground;
background: $block-cursor-background;
}
}
&:dark {
& > .datatable--even-row {
background: $surface-darken-1 40%;
}
}
& > .datatable--header {
text-style: bold;
background: $panel;
color: $foreground;
}
&:ansi > .datatable--header {
background: ansi_bright_blue;
color: ansi_default;
}
& > .datatable--fixed {
background: $secondary-muted;
color: $foreground;
}
& > .datatable--odd-row {
}
& > .datatable--even-row {
background: $surface-lighten-1 50%;
}
& > .datatable--cursor {
background: $block-cursor-blurred-background;
color: $block-cursor-blurred-foreground;
text-style: $block-cursor-blurred-text-style;
}
& > .datatable--fixed-cursor {
background: $block-cursor-blurred-background;
color: $foreground;
}
& > .datatable--header-cursor {
background: $accent-darken-1;
color: $foreground;
}
& > .datatable--header-hover {
background: $accent 30%;
}
& > .datatable--hover {
background: $block-hover-background;
}
}
"""
show_header = Reactive(True)
show_row_labels = Reactive(True)
fixed_rows = Reactive(0)
fixed_columns = Reactive(0)
zebra_stripes = Reactive(False)
header_height = Reactive(1)
show_cursor = Reactive(True)
cursor_type: Reactive[CursorType] = Reactive[CursorType]("cell")
"""The type of the cursor of the `DataTable`."""
cell_padding = Reactive(1)
"""Horizontal padding between cells, applied on each side of each cell."""
cursor_coordinate: Reactive[Coordinate] = Reactive(
Coordinate(0, 0), repaint=False, always_update=True
)
"""Current cursor [`Coordinate`][textual.coordinate.Coordinate].
This can be set programmatically or changed via the method
[`move_cursor`][textual.widgets.DataTable.move_cursor].
"""
hover_coordinate: Reactive[Coordinate] = Reactive(
Coordinate(0, 0), repaint=False, always_update=True
)
"""The coordinate of the `DataTable` that is being hovered."""
class CellHighlighted(Message):
"""Posted when the cursor moves to highlight a new cell.
This is only relevant when the `cursor_type` is `"cell"`.
It's also posted when the cell cursor is
re-enabled (by setting `show_cursor=True`), and when the cursor type is
changed to `"cell"`. Can be handled using `on_data_table_cell_highlighted` in
a subclass of `DataTable` or in a parent widget in the DOM.
"""
def __init__(
self,
data_table: DataTable,
value: CellType,
coordinate: Coordinate,
cell_key: CellKey,
) -> None:
self.data_table = data_table
"""The data table."""
self.value: CellType = value
"""The value in the highlighted cell."""
self.coordinate: Coordinate = coordinate
"""The coordinate of the highlighted cell."""
self.cell_key: CellKey = cell_key
"""The key for the highlighted cell."""
super().__init__()
def __rich_repr__(self) -> rich.repr.Result:
yield "value", self.value
yield "coordinate", self.coordinate
yield "cell_key", self.cell_key
@property
def control(self) -> DataTable:
"""Alias for the data table."""
return self.data_table
class CellSelected(Message):
"""Posted by the `DataTable` widget when a cell is selected.
This is only relevant when the `cursor_type` is `"cell"`. Can be handled using
`on_data_table_cell_selected` in a subclass of `DataTable` or in a parent
widget in the DOM.
"""
def __init__(
self,
data_table: DataTable,
value: CellType,
coordinate: Coordinate,
cell_key: CellKey,
) -> None:
self.data_table = data_table
"""The data table."""
self.value: CellType = value
"""The value in the cell that was selected."""
self.coordinate: Coordinate = coordinate
"""The coordinate of the cell that was selected."""
self.cell_key: CellKey = cell_key
"""The key for the selected cell."""
super().__init__()
def __rich_repr__(self) -> rich.repr.Result:
yield "value", self.value
yield "coordinate", self.coordinate
yield "cell_key", self.cell_key
@property
def control(self) -> DataTable:
"""Alias for the data table."""
return self.data_table
class RowHighlighted(Message):
"""Posted when a row is highlighted.
This message is only posted when the
`cursor_type` is set to `"row"`. Can be handled using
`on_data_table_row_highlighted` in a subclass of `DataTable` or in a parent
widget in the DOM.
"""
def __init__(
self, data_table: DataTable, cursor_row: int, row_key: RowKey
) -> None:
self.data_table = data_table
"""The data table."""
self.cursor_row: int = cursor_row
"""The y-coordinate of the cursor that highlighted the row."""
self.row_key: RowKey = row_key
"""The key of the row that was highlighted."""
super().__init__()
def __rich_repr__(self) -> rich.repr.Result:
yield "cursor_row", self.cursor_row
yield "row_key", self.row_key
@property
def control(self) -> DataTable:
"""Alias for the data table."""
return self.data_table
class RowSelected(Message):
"""Posted when a row is selected.
This message is only posted when the
`cursor_type` is set to `"row"`. Can be handled using
`on_data_table_row_selected` in a subclass of `DataTable` or in a parent
widget in the DOM.
"""
def __init__(
self, data_table: DataTable, cursor_row: int, row_key: RowKey
) -> None:
self.data_table = data_table
"""The data table."""
self.cursor_row: int = cursor_row
"""The y-coordinate of the cursor that made the selection."""
self.row_key: RowKey = row_key
"""The key of the row that was selected."""
super().__init__()
def __rich_repr__(self) -> rich.repr.Result:
yield "cursor_row", self.cursor_row
yield "row_key", self.row_key
@property
def control(self) -> DataTable:
"""Alias for the data table."""
return self.data_table
class ColumnHighlighted(Message):
"""Posted when a column is highlighted.
This message is only posted when the
`cursor_type` is set to `"column"`. Can be handled using
`on_data_table_column_highlighted` in a subclass of `DataTable` or in a parent
widget in the DOM.
"""
def __init__(
self, data_table: DataTable, cursor_column: int, column_key: ColumnKey
) -> None:
self.data_table = data_table
"""The data table."""
self.cursor_column: int = cursor_column
"""The x-coordinate of the column that was highlighted."""
self.column_key = column_key
"""The key of the column that was highlighted."""
super().__init__()
def __rich_repr__(self) -> rich.repr.Result:
yield "cursor_column", self.cursor_column
yield "column_key", self.column_key
@property
def control(self) -> DataTable:
"""Alias for the data table."""
return self.data_table
class ColumnSelected(Message):
"""Posted when a column is selected.
This message is only posted when the
`cursor_type` is set to `"column"`. Can be handled using
`on_data_table_column_selected` in a subclass of `DataTable` or in a parent
widget in the DOM.
"""
def __init__(
self, data_table: DataTable, cursor_column: int, column_key: ColumnKey
) -> None:
self.data_table = data_table
"""The data table."""
self.cursor_column: int = cursor_column
"""The x-coordinate of the column that was selected."""
self.column_key = column_key
"""The key of the column that was selected."""
super().__init__()
def __rich_repr__(self) -> rich.repr.Result:
yield "cursor_column", self.cursor_column
yield "column_key", self.column_key
@property
def control(self) -> DataTable:
"""Alias for the data table."""
return self.data_table
class HeaderSelected(Message):
"""Posted when a column header/label is clicked."""
def __init__(
self,
data_table: DataTable,
column_key: ColumnKey,
column_index: int,
label: Text,
):
self.data_table = data_table
"""The data table."""
self.column_key = column_key
"""The key for the column."""
self.column_index = column_index
"""The index for the column."""
self.label = label
"""The text of the label."""
super().__init__()
def __rich_repr__(self) -> rich.repr.Result:
yield "column_key", self.column_key
yield "column_index", self.column_index
yield "label", self.label.plain
@property
def control(self) -> DataTable:
"""Alias for the data table."""
return self.data_table
class RowLabelSelected(Message):
"""Posted when a row label is clicked."""
def __init__(
self,
data_table: DataTable,
row_key: RowKey,
row_index: int,
label: Text,
):
self.data_table = data_table
"""The data table."""
self.row_key = row_key
"""The key for the column."""
self.row_index = row_index
"""The index for the column."""
self.label = label
"""The text of the label."""
super().__init__()
def __rich_repr__(self) -> rich.repr.Result:
yield "row_key", self.row_key
yield "row_index", self.row_index
yield "label", self.label.plain
@property
def control(self) -> DataTable:
"""Alias for the data table."""
return self.data_table
def __init__(
self,
*,
show_header: bool = True,
show_row_labels: bool = True,
fixed_rows: int = 0,
fixed_columns: int = 0,
zebra_stripes: bool = False,
header_height: int = 1,
show_cursor: bool = True,
cursor_foreground_priority: Literal["renderable", "css"] = "css",
cursor_background_priority: Literal["renderable", "css"] = "renderable",
cursor_type: CursorType = "cell",
cell_padding: int = 1,
name: str | None = None,
id: str | None = None,
classes: str | None = None,
disabled: bool = False,
) -> None:
"""Initializes a widget to display tabular data.
Args:
show_header: Whether the table header should be visible or not.
show_row_labels: Whether the row labels should be shown or not.
fixed_rows: The number of rows, counting from the top, that should be fixed
and still visible when the user scrolls down.
fixed_columns: The number of columns, counting from the left, that should be
fixed and still visible when the user scrolls right.
zebra_stripes: Enables or disables a zebra effect applied to the background
color of the rows of the table, where alternate colors are styled
differently to improve the readability of the table.
header_height: The height, in number of cells, of the data table header.
show_cursor: Whether the cursor should be visible when navigating the data
table or not.
cursor_foreground_priority: If the data associated with a cell is an
arbitrary renderable with a set foreground color, this determines whether
that color is prioritized over the cursor component class or not.
cursor_background_priority: If the data associated with a cell is an
arbitrary renderable with a set background color, this determines whether
that color is prioritized over the cursor component class or not.
cursor_type: The type of cursor to be used when navigating the data table
with the keyboard.
cell_padding: The number of cells added on each side of each column. Setting
this value to zero will likely make your table very hard to read.
name: The name of the widget.
id: The ID of the widget in the DOM.
classes: The CSS classes for the widget.
disabled: Whether the widget is disabled or not.
"""
super().__init__(name=name, id=id, classes=classes, disabled=disabled)
self._data: dict[RowKey, dict[ColumnKey, CellType]] = {}
"""Contains the cells of the table, indexed by row key and column key.
The final positioning of a cell on screen cannot be determined solely by this
structure. Instead, we must check _row_locations and _column_locations to find
where each cell currently resides in space."""
self.columns: dict[ColumnKey, Column] = {}
"""Metadata about the columns of the table, indexed by their key."""
self.rows: dict[RowKey, Row] = {}
"""Metadata about the rows of the table, indexed by their key."""
# Keep tracking of key -> index for rows/cols. These allow us to retrieve,
# given a row or column key, the index that row or column is currently
# present at, and mean that rows and columns are location independent - they
# can move around without requiring us to modify the underlying data.
self._row_locations: TwoWayDict[RowKey, int] = TwoWayDict({})
"""Maps row keys to row indices which represent row order."""
self._column_locations: TwoWayDict[ColumnKey, int] = TwoWayDict({})
"""Maps column keys to column indices which represent column order."""
self._row_render_cache: LRUCache[
RowCacheKey, tuple[SegmentLines, SegmentLines]
] = LRUCache(1000)
"""For each row (a row can have a height of multiple lines), we maintain a
cache of the fixed and scrollable lines within that row to minimize how often
we need to re-render it. """
self._cell_render_cache: LRUCache[CellCacheKey, SegmentLines] = LRUCache(10000)
"""Cache for individual cells."""
self._row_renderable_cache: LRUCache[tuple[int, int], RowRenderables] = (
LRUCache(1000)
)
"""Caches row renderables - key is (update_count, row_index)"""
self._line_cache: LRUCache[LineCacheKey, Strip] = LRUCache(1000)
"""Cache for lines within rows."""
self._offset_cache: LRUCache[int, list[tuple[RowKey, int]]] = LRUCache(1)
"""Cached y_offset - key is update_count - see y_offsets property for more
information """
self._ordered_row_cache: LRUCache[tuple[int, int], list[Row]] = LRUCache(1)
"""Caches row ordering - key is (num_rows, update_count)."""
self._pseudo_class_state = PseudoClasses(False, False, False)
"""The pseudo-class state is used as part of cache keys to ensure that, for example,
when we lose focus on the DataTable, rules which apply to :focus are invalidated
and we prevent lingering styles."""
self._require_update_dimensions: bool = False
"""Set to re-calculate dimensions on idle."""
self._new_rows: set[RowKey] = set()
"""Tracking newly added rows to be used in calculation of dimensions on idle."""
self._updated_cells: set[CellKey] = set()
"""Track which cells were updated, so that we can refresh them once on idle."""
self._show_hover_cursor = False
"""Used to hide the mouse hover cursor when the user uses the keyboard."""
self._update_count = 0
"""Number of update (INCLUDING SORT) operations so far. Used for cache invalidation."""
self._header_row_key = RowKey()
"""The header is a special row - not part of the data. Retrieve via this key."""
self._label_column_key = ColumnKey()
"""The column containing row labels is not part of the data. This key identifies it."""
self._labelled_row_exists = False
"""Whether or not the user has supplied any rows with labels."""
self._label_column = Column(self._label_column_key, Text(), auto_width=True)
"""The largest content width out of all row labels in the table."""
self.show_header = show_header
"""Show/hide the header row (the row of column labels)."""
self.show_row_labels = show_row_labels
"""Show/hide the column containing the labels of rows."""
self.header_height = header_height
"""The height of the header row (the row of column labels)."""
self.fixed_rows = fixed_rows
"""The number of rows to fix (prevented from scrolling)."""
self.fixed_columns = fixed_columns
"""The number of columns to fix (prevented from scrolling)."""
self.zebra_stripes = zebra_stripes
"""Apply alternating styles, datatable--even-row and datatable-odd-row, to create a zebra effect, e.g.,
alternating light and dark backgrounds."""
self.show_cursor = show_cursor
"""Show/hide both the keyboard and hover cursor."""
self.cursor_foreground_priority = cursor_foreground_priority
"""Should we prioritize the cursor component class CSS foreground or the renderable foreground
in the event where a cell contains a renderable with a foreground color."""
self.cursor_background_priority = cursor_background_priority
"""Should we prioritize the cursor component class CSS background or the renderable background
in the event where a cell contains a renderable with a background color."""
self.cursor_type = cursor_type
"""The type of cursor of the `DataTable`."""
self.cell_padding = cell_padding
"""Horizontal padding between cells, applied on each side of each cell."""
@property
def hover_row(self) -> int:
"""The index of the row that the mouse cursor is currently hovering above."""
return self.hover_coordinate.row
@property
def hover_column(self) -> int:
"""The index of the column that the mouse cursor is currently hovering above."""
return self.hover_coordinate.column
@property
def cursor_row(self) -> int:
"""The index of the row that the DataTable cursor is currently on."""
return self.cursor_coordinate.row
@property
def cursor_column(self) -> int:
"""The index of the column that the DataTable cursor is currently on."""
return self.cursor_coordinate.column
@property
def row_count(self) -> int:
"""The number of rows currently present in the DataTable."""
return len(self.rows)
@property
def _y_offsets(self) -> list[tuple[RowKey, int]]:
"""Contains a 2-tuple for each line (not row!) of the DataTable. Given a
y-coordinate, we can index into this list to find which row that y-coordinate
lands on, and the y-offset *within* that row. The length of the returned list
is therefore the total height of all rows within the DataTable."""
y_offsets: list[tuple[RowKey, int]] = []
if self._update_count in self._offset_cache:
y_offsets = self._offset_cache[self._update_count]
else:
for row in self.ordered_rows:
y_offsets += [(row.key, y) for y in range(row.height)]
self._offset_cache[self._update_count] = y_offsets
return y_offsets
@property
def _total_row_height(self) -> int:
"""The total height of all rows within the DataTable"""
return len(self._y_offsets)
def update_cell(
self,
row_key: RowKey | str,
column_key: ColumnKey | str,
value: CellType,
*,
update_width: bool = False,
) -> None:
"""Update the cell identified by the specified row key and column key.
Args:
row_key: The key identifying the row.
column_key: The key identifying the column.
value: The new value to put inside the cell.
update_width: Whether to resize the column width to accommodate
for the new cell content.
Raises:
CellDoesNotExist: When the supplied `row_key` and `column_key`
cannot be found in the table.
"""
if isinstance(row_key, str):
row_key = RowKey(row_key)
if isinstance(column_key, str):
column_key = ColumnKey(column_key)
if (
row_key not in self._row_locations
or column_key not in self._column_locations
):
raise CellDoesNotExist(
f"No cell exists for row_key={row_key!r}, column_key={column_key!r}."
)
self._data[row_key][column_key] = value
self._update_count += 1
# Recalculate widths if necessary
if update_width:
self._updated_cells.add(CellKey(row_key, column_key))
self._require_update_dimensions = True
self.refresh()
def update_cell_at(
self, coordinate: Coordinate, value: CellType, *, update_width: bool = False
) -> None:
"""Update the content inside the cell currently occupying the given coordinate.
Args:
coordinate: The coordinate to update the cell at.
value: The new value to place inside the cell.
update_width: Whether to resize the column width to accommodate
for the new cell content.
"""
if not self.is_valid_coordinate(coordinate):
raise CellDoesNotExist(f"Coordinate {coordinate!r} is invalid.")
row_key, column_key = self.coordinate_to_cell_key(coordinate)
self.update_cell(row_key, column_key, value, update_width=update_width)
def get_cell(self, row_key: RowKey | str, column_key: ColumnKey | str) -> CellType:
"""Given a row key and column key, return the value of the corresponding cell.
Args:
row_key: The row key of the cell.
column_key: The column key of the cell.
Returns:
The value of the cell identified by the row and column keys.
"""
try:
cell_value = self._data[row_key][column_key]
except KeyError:
raise CellDoesNotExist(
f"No cell exists for row_key={row_key!r}, column_key={column_key!r}."
)
return cell_value
def get_cell_at(self, coordinate: Coordinate) -> CellType:
"""Get the value from the cell occupying the given coordinate.
Args:
coordinate: The coordinate to retrieve the value from.
Returns:
The value of the cell at the coordinate.
Raises:
CellDoesNotExist: If there is no cell with the given coordinate.
"""
row_key, column_key = self.coordinate_to_cell_key(coordinate)
return self.get_cell(row_key, column_key)
def get_cell_coordinate(
self, row_key: RowKey | str, column_key: ColumnKey | str
) -> Coordinate:
"""Given a row key and column key, return the corresponding cell coordinate.
Args:
row_key: The row key of the cell.
column_key: The column key of the cell.
Returns:
The current coordinate of the cell identified by the row and column keys.
Raises:
CellDoesNotExist: If the specified cell does not exist.
"""
if (
row_key not in self._row_locations
or column_key not in self._column_locations
):
raise CellDoesNotExist(
f"No cell exists for row_key={row_key!r}, column_key={column_key!r}."
)
row_index = self._row_locations.get(row_key)
column_index = self._column_locations.get(column_key)
return Coordinate(row_index, column_index)
def get_row(self, row_key: RowKey | str) -> list[CellType]:
"""Get the values from the row identified by the given row key.
Args:
row_key: The key of the row.
Returns:
A list of the values contained within the row.
Raises:
RowDoesNotExist: When there is no row corresponding to the key.
"""
if row_key not in self._row_locations:
raise RowDoesNotExist(f"Row key {row_key!r} is not valid.")
cell_mapping: dict[ColumnKey, CellType] = self._data.get(row_key, {})
ordered_row: list[CellType] = [
cell_mapping[column.key] for column in self.ordered_columns
]
return ordered_row
def get_row_at(self, row_index: int) -> list[CellType]:
"""Get the values from the cells in a row at a given index. This will
return the values from a row based on the rows _current position_ in
the table.
Args:
row_index: The index of the row.
Returns:
A list of the values contained in the row.
Raises:
RowDoesNotExist: If there is no row with the given index.
"""
if not self.is_valid_row_index(row_index):
raise RowDoesNotExist(f"Row index {row_index!r} is not valid.")
row_key = self._row_locations.get_key(row_index)
return self.get_row(row_key)
def get_row_index(self, row_key: RowKey | str) -> int:
"""Return the current index for the row identified by row_key.
Args:
row_key: The row key to find the current index of.
Returns:
The current index of the specified row key.
Raises:
RowDoesNotExist: If the row key does not exist.
"""
if row_key not in self._row_locations:
raise RowDoesNotExist(f"No row exists for row_key={row_key!r}")
return self._row_locations.get(row_key)
def get_column(self, column_key: ColumnKey | str) -> Iterable[CellType]:
"""Get the values from the column identified by the given column key.
Args:
column_key: The key of the column.
Returns:
A generator which yields the cells in the column.
Raises:
ColumnDoesNotExist: If there is no column corresponding to the key.
"""
if column_key not in self._column_locations:
raise ColumnDoesNotExist(f"Column key {column_key!r} is not valid.")
data = self._data
for row_metadata in self.ordered_rows:
row_key = row_metadata.key
yield data[row_key][column_key]
def get_column_at(self, column_index: int) -> Iterable[CellType]:
"""Get the values from the column at a given index.
Args:
column_index: The index of the column.
Returns:
A generator which yields the cells in the column.
Raises:
ColumnDoesNotExist: If there is no column with the given index.
"""
if not self.is_valid_column_index(column_index):
raise ColumnDoesNotExist(f"Column index {column_index!r} is not valid.")
column_key = self._column_locations.get_key(column_index)
yield from self.get_column(column_key)
def get_column_index(self, column_key: ColumnKey | str) -> int:
"""Return the current index for the column identified by column_key.
Args:
column_key: The column key to find the current index of.
Returns:
The current index of the specified column key.
Raises:
ColumnDoesNotExist: If the column key does not exist.
"""
if column_key not in self._column_locations:
raise ColumnDoesNotExist(f"No column exists for column_key={column_key!r}")
return self._column_locations.get(column_key)
def _clear_caches(self) -> None:
self._row_render_cache.clear()
self._cell_render_cache.clear()
self._row_renderable_cache.clear()
self._line_cache.clear()
self._styles_cache.clear()
self._offset_cache.clear()
self._ordered_row_cache.clear()
self._get_styles_to_render_cell.cache_clear()
def get_row_height(self, row_key: RowKey) -> int:
"""Given a row key, return the height of that row in terminal cells.
Args:
row_key: The key of the row.
Returns:
The height of the row, measured in terminal character cells.
"""
if row_key is self._header_row_key:
return self.header_height
return self.rows[row_key].height
def notify_style_update(self) -> None:
super().notify_style_update()
self._row_render_cache.clear()
self._cell_render_cache.clear()
self._row_renderable_cache.clear()
self._line_cache.clear()
self._styles_cache.clear()
self._get_styles_to_render_cell.cache_clear()
self.refresh()
def _on_resize(self, _: events.Resize) -> None:
self._update_count += 1
def watch_show_cursor(self, show_cursor: bool) -> None:
self._clear_caches()
if show_cursor and self.cursor_type != "none":
# When we re-enable the cursor, apply highlighting and
# post the appropriate [Row|Column|Cell]Highlighted event.
self._scroll_cursor_into_view(animate=False)
if self.cursor_type == "cell":
self._highlight_coordinate(self.cursor_coordinate)
elif self.cursor_type == "row":
self._highlight_row(self.cursor_row)
elif self.cursor_type == "column":
self._highlight_column(self.cursor_column)
def watch_show_header(self, show: bool) -> None:
width, height = self.virtual_size
height_change = self.header_height if show else -self.header_height
self.virtual_size = Size(width, height + height_change)
self._scroll_cursor_into_view()
self._clear_caches()
def watch_show_row_labels(self, show: bool) -> None:
width, height = self.virtual_size
column_width = self._label_column.get_render_width(self)
width_change = column_width if show else -column_width
self.virtual_size = Size(width + width_change, height)
self._scroll_cursor_into_view()
self._clear_caches()
def watch_fixed_rows(self) -> None:
self._clear_caches()
def watch_fixed_columns(self) -> None:
self._clear_caches()
def watch_zebra_stripes(self) -> None:
self._clear_caches()
def watch_header_height(self) -> None:
self._clear_caches()
def validate_cell_padding(self, cell_padding: int) -> int:
return max(cell_padding, 0)
def watch_cell_padding(self, old_padding: int, new_padding: int) -> None:
# A single side of a single cell will have its width changed by (new - old),
# so the total width change is double that per column, times the number of
# columns for the whole data table.
width_change = 2 * (new_padding - old_padding) * len(self.columns)
width, height = self.virtual_size
self.virtual_size = Size(width + width_change, height)
self._scroll_cursor_into_view()
self._clear_caches()
def watch_hover_coordinate(self, old: Coordinate, value: Coordinate) -> None:
self.refresh_coordinate(old)
self.refresh_coordinate(value)
def watch_cursor_coordinate(
self, old_coordinate: Coordinate, new_coordinate: Coordinate
) -> None:
if old_coordinate != new_coordinate:
# Refresh the old and the new cell, and post the appropriate
# message to tell users of the newly highlighted row/cell/column.
if self.cursor_type == "cell":
self.refresh_coordinate(old_coordinate)
self._highlight_coordinate(new_coordinate)
elif self.cursor_type == "row":
self.refresh_row(old_coordinate.row)
self._highlight_row(new_coordinate.row)
elif self.cursor_type == "column":
self.refresh_column(old_coordinate.column)
self._highlight_column(new_coordinate.column)
if self._require_update_dimensions:
self.call_after_refresh(self._scroll_cursor_into_view)
else:
self._scroll_cursor_into_view()
def move_cursor(
self,
*,
row: int | None = None,
column: int | None = None,
animate: bool = False,
scroll: bool = True,
) -> None:
"""Move the cursor to the given position.
Example:
```py
datatable = app.query_one(DataTable)
datatable.move_cursor(row=4, column=6)
# datatable.cursor_coordinate == Coordinate(4, 6)
datatable.move_cursor(row=3)
# datatable.cursor_coordinate == Coordinate(3, 6)
```
Args:
row: The new row to move the cursor to.
column: The new column to move the cursor to.
animate: Whether to animate the change of coordinates.
scroll: Scroll the cursor into view after moving.
"""
cursor_row, cursor_column = self.cursor_coordinate
if row is not None:
cursor_row = row
if column is not None:
cursor_column = column
destination = Coordinate(cursor_row, cursor_column)
# Scroll the cursor after refresh to ensure the virtual height
# (calculated in on_idle) has settled. If we tried to scroll before
# the virtual size has been set, then it might fail if we added a bunch
# of rows then tried to immediately move the cursor.
# We do this before setting `cursor_coordinate` because its watcher will also
# schedule a call to `_scroll_cursor_into_view` without optionally animating.
if scroll:
if self._require_update_dimensions:
self.call_after_refresh(self._scroll_cursor_into_view, animate=animate)
else:
self._scroll_cursor_into_view(animate=animate)
self.cursor_coordinate = destination
def _highlight_coordinate(self, coordinate: Coordinate) -> None:
"""Apply highlighting to the cell at the coordinate, and post event."""
self.refresh_coordinate(coordinate)
try:
cell_value = self.get_cell_at(coordinate)
except CellDoesNotExist:
# The cell may not exist e.g. when the table is cleared.
# In that case, there's nothing for us to do here.
return
else:
cell_key = self.coordinate_to_cell_key(coordinate)
self.post_message(
DataTable.CellHighlighted(
self, cell_value, coordinate=coordinate, cell_key=cell_key
)
)
def coordinate_to_cell_key(self, coordinate: Coordinate) -> CellKey:
"""Return the key for the cell currently occupying this coordinate.
Args:
coordinate: The coordinate to exam the current cell key of.
Returns:
The key of the cell currently occupying this coordinate.
Raises:
CellDoesNotExist: If the coordinate is not valid.
"""
if not self.is_valid_coordinate(coordinate):
raise CellDoesNotExist(f"No cell exists at {coordinate!r}.")
row_index, column_index = coordinate
row_key = self._row_locations.get_key(row_index)
column_key = self._column_locations.get_key(column_index)
return CellKey(row_key, column_key)
def _highlight_row(self, row_index: int) -> None:
"""Apply highlighting to the row at the given index, and post event."""
self.refresh_row(row_index)
is_valid_row = row_index < len(self._data)
if is_valid_row:
row_key = self._row_locations.get_key(row_index)
self.post_message(DataTable.RowHighlighted(self, row_index, row_key))
def _highlight_column(self, column_index: int) -> None:
"""Apply highlighting to the column at the given index, and post event."""
self.refresh_column(column_index)
if column_index < len(self.columns):
column_key = self._column_locations.get_key(column_index)
self.post_message(
DataTable.ColumnHighlighted(self, column_index, column_key)
)
def validate_cursor_coordinate(self, value: Coordinate) -> Coordinate:
return self._clamp_cursor_coordinate(value)
def _clamp_cursor_coordinate(self, coordinate: Coordinate) -> Coordinate:
"""Clamp a coordinate such that it falls within the boundaries of the table."""
row, column = coordinate
row = clamp(row, 0, self.row_count - 1)
column = clamp(column, 0, len(self.columns) - 1)
return Coordinate(row, column)
def watch_cursor_type(self, old: str, new: str) -> None:
self._set_hover_cursor(False)
if self.show_cursor:
self._highlight_cursor()
# Refresh cells that were previously impacted by the cursor
# but may no longer be.
if old == "cell":
self.refresh_coordinate(self.cursor_coordinate)
elif old == "row":
row_index, _ = self.cursor_coordinate
self.refresh_row(row_index)
elif old == "column":
_, column_index = self.cursor_coordinate
self.refresh_column(column_index)
self._scroll_cursor_into_view()
def _highlight_cursor(self) -> None:
"""Applies the appropriate highlighting and raises the appropriate
[Row|Column|Cell]Highlighted event for the given cursor coordinate
and cursor type."""
row_index, column_index = self.cursor_coordinate
cursor_type = self.cursor_type
# Apply the highlighting to the newly relevant cells
if cursor_type == "cell":
self._highlight_coordinate(self.cursor_coordinate)
elif cursor_type == "row":
self._highlight_row(row_index)
elif cursor_type == "column":
self._highlight_column(column_index)
@property
def _row_label_column_width(self) -> int:
"""The render width of the column containing row labels"""
return (
self._label_column.get_render_width(self)
if self._should_render_row_labels
else 0
)
def _update_column_widths(self, updated_cells: set[CellKey]) -> None:
"""Update the widths of the columns based on the newly updated cell widths."""
for row_key, column_key in updated_cells:
column = self.columns.get(column_key)
row = self.rows.get(row_key)
if column is None or row is None:
continue
console = self.app.console
label_width = measure(console, column.label, 1)
content_width = column.content_width
cell_value = self._data[row_key][column_key]
render_height = row.height
new_content_width = measure(
console,
default_cell_formatter(
cell_value,
wrap=row.height != 1,
height=render_height,
),
1,
)
if new_content_width < content_width:
cells_in_column = self.get_column(column_key)
cell_widths = [
measure(
console,
default_cell_formatter(
cell,
wrap=row.height != 1,
height=render_height,
),
1,
)
for cell in cells_in_column
]
column.content_width = max([*cell_widths, label_width])
else:
column.content_width = max(new_content_width, label_width)
self._require_update_dimensions = True
def _update_dimensions(self, new_rows: Iterable[RowKey]) -> None:
"""Called to recalculate the virtual (scrollable) size.
This recomputes column widths and then checks if any of the new rows need
to have their height computed.
Args:
new_rows: The new rows that will affect the `DataTable` dimensions.
"""
console = self.app.console
auto_height_rows: list[tuple[int, Row, list[RenderableType]]] = []
for row_key in new_rows:
row_index = self._row_locations.get(row_key)
# The row could have been removed before on_idle was called, so we
# need to be quite defensive here and don't assume that the row exists.
if row_index is None:
continue
row = self.rows.get(row_key)
assert row is not None
if row.label is not None:
self._labelled_row_exists = True
row_label, cells_in_row = self._get_row_renderables(row_index)
label_content_width = measure(console, row_label, 1) if row_label else 0
self._label_column.content_width = max(
self._label_column.content_width, label_content_width
)
for column, renderable in zip(self.ordered_columns, cells_in_row):
content_width = measure(console, renderable, 1)
column.content_width = max(column.content_width, content_width)
if row.auto_height:
auto_height_rows.append((row_index, row, cells_in_row))
# If there are rows that need to have their height computed, render them correctly
# so that we can cache this rendering for later.
if auto_height_rows:
self._offset_cache.clear()
render_cell = self._render_cell # This method renders & caches.
should_highlight = self._should_highlight
cursor_type = self.cursor_type
cursor_location = self.cursor_coordinate
hover_location = self.hover_coordinate
base_style = self.rich_style
fixed_style = self.get_component_styles(
"datatable--fixed"
).rich_style + Style.from_meta({"fixed": True})
ordered_columns = self.ordered_columns
fixed_columns = self.fixed_columns
for row_index, row, cells_in_row in auto_height_rows:
height = 0
row_style = self._get_row_style(row_index, base_style)
# As we go through the cells, save their rendering, height, and
# column width. After we compute the height of the row, go over the cells
# that were rendered with the wrong height and append the missing padding.
rendered_cells: list[tuple[SegmentLines, int, int]] = []
for column_index, column in enumerate(ordered_columns):
style = fixed_style if column_index < fixed_columns else row_style
cell_location = Coordinate(row_index, column_index)
rendered_cell = render_cell(
row_index,
column_index,
style,
column.get_render_width(self),
cursor=should_highlight(
cursor_location, cell_location, cursor_type
),
hover=should_highlight(
hover_location, cell_location, cursor_type
),
)
cell_height = len(rendered_cell)
rendered_cells.append(
(rendered_cell, cell_height, column.get_render_width(self))
)
height = max(height, cell_height)
row.height = height
# Do surgery on the cache for cells that were rendered with the incorrect
# height during the first pass.
for cell_renderable, cell_height, column_width in rendered_cells:
if cell_height < height:
first_line_space_style = cell_renderable[0][0].style
cell_renderable.extend(
[
[Segment(" " * column_width, first_line_space_style)]
for _ in range(height - cell_height)
]
)
self._line_cache.clear()
self._styles_cache.clear()
data_cells_width = sum(
column.get_render_width(self) for column in self.columns.values()
)
total_width = data_cells_width + self._row_label_column_width
header_height = self.header_height if self.show_header else 0
self.virtual_size = Size(
total_width,
self._total_row_height + header_height,
)
def _get_cell_region(self, coordinate: Coordinate) -> Region:
"""Get the region of the cell at the given spatial coordinate."""
if not self.is_valid_coordinate(coordinate):
return Region(0, 0, 0, 0)
row_index, column_index = coordinate
row_key = self._row_locations.get_key(row_index)
row = self.rows[row_key]
# The x-coordinate of a cell is the sum of widths of the data cells to the left
# plus the width of the render width of the longest row label.
x = (
sum(
column.get_render_width(self)
for column in self.ordered_columns[:column_index]
)
+ self._row_label_column_width
)
column_key = self._column_locations.get_key(column_index)
width = self.columns[column_key].get_render_width(self)
height = row.height
y = sum(ordered_row.height for ordered_row in self.ordered_rows[:row_index])
if self.show_header:
y += self.header_height
cell_region = Region(x, y, width, height)
return cell_region
def _get_row_region(self, row_index: int) -> Region:
"""Get the region of the row at the given index."""
if not self.is_valid_row_index(row_index):
return Region(0, 0, 0, 0)
rows = self.rows
row_key = self._row_locations.get_key(row_index)
row = rows[row_key]
row_width = (
sum(column.get_render_width(self) for column in self.columns.values())
+ self._row_label_column_width
)
y = sum(ordered_row.height for ordered_row in self.ordered_rows[:row_index])
if self.show_header:
y += self.header_height
row_region = Region(0, y, row_width, row.height)
return row_region
def _get_column_region(self, column_index: int) -> Region:
"""Get the region of the column at the given index."""
if not self.is_valid_column_index(column_index):
return Region(0, 0, 0, 0)
columns = self.columns
x = (
sum(
column.get_render_width(self)
for column in self.ordered_columns[:column_index]
)
+ self._row_label_column_width
)
column_key = self._column_locations.get_key(column_index)
width = columns[column_key].get_render_width(self)
header_height = self.header_height if self.show_header else 0
height = self._total_row_height + header_height
full_column_region = Region(x, 0, width, height)
return full_column_region
def clear(self, columns: bool = False) -> Self:
"""Clear the table.
Args:
columns: Also clear the columns.
Returns:
The `DataTable` instance.
"""
self._clear_caches()
self._y_offsets.clear()
self._data.clear()
self.rows.clear()
self._row_locations = TwoWayDict({})
if columns:
self.columns.clear()
self._column_locations = TwoWayDict({})
self._require_update_dimensions = True
self.cursor_coordinate = Coordinate(0, 0)
self.hover_coordinate = Coordinate(0, 0)
self._label_column = Column(self._label_column_key, Text(), auto_width=True)
self._labelled_row_exists = False
self.refresh()
self.scroll_x = 0
self.scroll_y = 0
self.scroll_target_x = 0
self.scroll_target_y = 0
return self
def add_column(
self,
label: TextType,
*,
width: int | None = None,
key: str | None = None,
default: CellType | None = None,
) -> ColumnKey:
"""Add a column to the table.
Args:
label: A str or Text object containing the label (shown top of column).
width: Width of the column in cells or None to fit content.
key: A key which uniquely identifies this column.
If None, it will be generated for you.
default: The value to insert into pre-existing rows.
Returns:
Uniquely identifies this column. Can be used to retrieve this column
regardless of its current location in the DataTable (it could have moved
after being added due to sorting/insertion/deletion of other columns).
"""
column_key = ColumnKey(key)
if column_key in self._column_locations:
raise DuplicateKey(f"The column key {key!r} already exists.")
column_index = len(self.columns)
label = Text.from_markup(label) if isinstance(label, str) else label
content_width = measure(self.app.console, label, 1)
if width is None:
column = Column(
column_key,
label,
content_width,
content_width=content_width,
auto_width=True,
)
else:
column = Column(
column_key,
label,
width,
content_width=content_width,
)
self.columns[column_key] = column
self._column_locations[column_key] = column_index
# Update pre-existing rows to account for the new column.
for row_key in self.rows.keys():
self._data[row_key][column_key] = default
self._updated_cells.add(CellKey(row_key, column_key))
self._require_update_dimensions = True
self._update_count += 1
self.check_idle()
return column_key
def add_row(
self,
*cells: CellType,
height: int | None = 1,
key: str | None = None,
label: TextType | None = None,
) -> RowKey:
"""Add a row at the bottom of the DataTable.
Args:
*cells: Positional arguments should contain cell data.
height: The height of a row (in lines). Use `None` to auto-detect the optimal
height.
key: A key which uniquely identifies this row. If None, it will be generated
for you and returned.
label: The label for the row. Will be displayed to the left if supplied.
Returns:
Unique identifier for this row. Can be used to retrieve this row regardless
of its current location in the DataTable (it could have moved after
being added due to sorting or insertion/deletion of other rows).
"""
row_key = RowKey(key)
if row_key in self._row_locations:
raise DuplicateKey(f"The row key {row_key!r} already exists.")
# TODO: If there are no columns: do we generate them here?
# If we don't do this, users will be required to call add_column(s)
# Before they call add_row.
if len(cells) > len(self.ordered_columns):
raise ValueError("More values provided than there are columns.")
row_index = self.row_count
# Map the key of this row to its current index
self._row_locations[row_key] = row_index
self._data[row_key] = {
column.key: cell
for column, cell in zip_longest(self.ordered_columns, cells)
}
label = Text.from_markup(label, end="") if isinstance(label, str) else label
# Rows with auto-height get a height of 0 because 1) we need an integer height
# to do some intermediate computations and 2) because 0 doesn't impact the data
# table while we don't figure out how tall this row is.
self.rows[row_key] = Row(
row_key,
height or 0,
label,
height is None,
)
self._new_rows.add(row_key)
self._require_update_dimensions = True
self.cursor_coordinate = self.cursor_coordinate
# If a position has opened for the cursor to appear, where it previously
# could not (e.g. when there's no data in the table), then a highlighted
# event is posted, since there's now a highlighted cell when there wasn't
# before.
cell_now_available = self.row_count == 1 and len(self.columns) > 0
visible_cursor = self.show_cursor and self.cursor_type != "none"
if cell_now_available and visible_cursor:
self._highlight_cursor()
self._update_count += 1
self.check_idle()
return row_key
def add_columns(
self, *columns: Union[TextType, tuple[TextType, str]]
) -> list[ColumnKey]:
"""Add multiple columns to the DataTable.
Args:
*columns: Column specifications. Each can be either:
- A string or Text object (label only, auto-generated key)
- A tuple of (label, key) for manual key control
Returns:
A list of the keys for the columns that were added. See
the `add_column` method docstring for more information on how
these keys are used.
Examples:
```python
# Add columns with auto-generated keys
keys = table.add_columns("Name", "Age", "City")
# Add columns with manual keys
keys = table.add_columns(
("Name", "name_col"),
("Age", "age_col"),
"City" # Mixed with auto-generated key
)
```
"""
column_keys = []
for column in columns:
if isinstance(column, tuple):
label, key = column
column_key = self.add_column(label, width=None, key=key)
else:
column_key = self.add_column(column, width=None)
column_keys.append(column_key)
return column_keys
def add_rows(self, rows: Iterable[Iterable[CellType]]) -> list[RowKey]:
"""Add a number of rows at the bottom of the DataTable.
Args:
rows: Iterable of rows. A row is an iterable of cells.
Returns:
A list of the keys for the rows that were added. See
the `add_row` method docstring for more information on how
these keys are used.
"""
row_keys = []
for row in rows:
row_key = self.add_row(*row)
row_keys.append(row_key)
return row_keys
def remove_row(self, row_key: RowKey | str) -> None:
"""Remove a row (identified by a key) from the DataTable.
Args:
row_key: The key identifying the row to remove.
Raises:
RowDoesNotExist: If the row key does not exist.
"""
if row_key not in self._row_locations:
raise RowDoesNotExist(f"Row key {row_key!r} is not valid.")
self._require_update_dimensions = True
self.check_idle()
index_to_delete = self._row_locations.get(row_key)
new_row_locations = TwoWayDict({})
for row_location_key in self._row_locations:
row_index = self._row_locations.get(row_location_key)
if row_index > index_to_delete:
new_row_locations[row_location_key] = row_index - 1
elif row_index < index_to_delete:
new_row_locations[row_location_key] = row_index
self._row_locations = new_row_locations
# Prevent the removed cells from triggering dimension updates
for column_key in self._data.get(row_key):
self._updated_cells.discard(CellKey(row_key, column_key))
del self.rows[row_key]
del self._data[row_key]
self.cursor_coordinate = self.cursor_coordinate
self.hover_coordinate = self.hover_coordinate
self._update_count += 1
self.refresh(layout=True)
def remove_column(self, column_key: ColumnKey | str) -> None:
"""Remove a column (identified by a key) from the DataTable.
Args:
column_key: The key identifying the column to remove.
Raises:
ColumnDoesNotExist: If the column key does not exist.
"""
if column_key not in self._column_locations:
raise ColumnDoesNotExist(f"Column key {column_key!r} is not valid.")
self._require_update_dimensions = True
self.check_idle()
index_to_delete = self._column_locations.get(column_key)
new_column_locations = TwoWayDict({})
for column_location_key in self._column_locations:
column_index = self._column_locations.get(column_location_key)
if column_index > index_to_delete:
new_column_locations[column_location_key] = column_index - 1
elif column_index < index_to_delete:
new_column_locations[column_location_key] = column_index
self._column_locations = new_column_locations
del self.columns[column_key]
for row_key in self._data:
self._updated_cells.discard(CellKey(row_key, column_key))
del self._data[row_key][column_key]
self.cursor_coordinate = self.cursor_coordinate
self.hover_coordinate = self.hover_coordinate
self._update_count += 1
self.refresh(layout=True)
async def _on_idle(self, _: events.Idle) -> None:
"""Runs when the message pump is empty.
We use this for some expensive calculations like re-computing dimensions of the
whole DataTable and re-computing column widths after some cells
have been updated. This is more efficient in the case of high
frequency updates, ensuring we only do expensive computations once."""
if self._updated_cells:
# Cell contents have already been updated at this point.
# Now we only need to worry about measuring column widths.
updated_cells = self._updated_cells.copy()
self._updated_cells.clear()
self._update_column_widths(updated_cells)
if self._require_update_dimensions:
# Add the new rows *before* updating the column widths, since
# cells in a new row may influence the final width of a column.
# Only then can we compute optimal height of rows with "auto" height.
self._require_update_dimensions = False
new_rows = self._new_rows.copy()
self._new_rows.clear()
self._update_dimensions(new_rows)
def refresh_coordinate(self, coordinate: Coordinate) -> Self:
"""Refresh the cell at a coordinate.
Args:
coordinate: The coordinate to refresh.
Returns:
The `DataTable` instance.
"""
if not self.is_valid_coordinate(coordinate):
return self
region = self._get_cell_region(coordinate)
self._refresh_region(region)
return self
def refresh_row(self, row_index: int) -> Self:
"""Refresh the row at the given index.
Args:
row_index: The index of the row to refresh.
Returns:
The `DataTable` instance.
"""
if not self.is_valid_row_index(row_index):
return self
region = self._get_row_region(row_index)
self._refresh_region(region)
return self
def refresh_column(self, column_index: int) -> Self:
"""Refresh the column at the given index.
Args:
column_index: The index of the column to refresh.
Returns:
The `DataTable` instance.
"""
if not self.is_valid_column_index(column_index):
return self
region = self._get_column_region(column_index)
self._refresh_region(region)
return self
def _refresh_region(self, region: Region) -> Self:
"""Refresh a region of the DataTable, if it's visible within the window.
This method will translate the region to account for scrolling.
Returns:
The `DataTable` instance.
"""
if not self.window_region.overlaps(region):
return self
region = region.translate(-self.scroll_offset)
self.refresh(region)
return self
def is_valid_row_index(self, row_index: int) -> bool:
"""Return a boolean indicating whether the row_index is within table bounds.
Args:
row_index: The row index to check.
Returns:
True if the row index is within the bounds of the table.
"""
return 0 <= row_index < len(self.rows)
def is_valid_column_index(self, column_index: int) -> bool:
"""Return a boolean indicating whether the column_index is within table bounds.
Args:
column_index: The column index to check.
Returns:
True if the column index is within the bounds of the table.
"""
return 0 <= column_index < len(self.columns)
def is_valid_coordinate(self, coordinate: Coordinate) -> bool:
"""Return a boolean indicating whether the given coordinate is valid.
Args:
coordinate: The coordinate to validate.
Returns:
True if the coordinate is within the bounds of the table.
"""
row_index, column_index = coordinate
return self.is_valid_row_index(row_index) and self.is_valid_column_index(
column_index
)
@property
def ordered_columns(self) -> list[Column]:
"""The list of Columns in the DataTable, ordered as they appear on screen."""
column_indices = range(len(self.columns))
column_keys = [
self._column_locations.get_key(index) for index in column_indices
]
ordered_columns = [self.columns[key] for key in column_keys]
return ordered_columns
@property
def ordered_rows(self) -> list[Row]:
"""The list of Rows in the DataTable, ordered as they appear on screen."""
num_rows = self.row_count
update_count = self._update_count
cache_key = (num_rows, update_count)
if cache_key in self._ordered_row_cache:
ordered_rows = self._ordered_row_cache[cache_key]
else:
row_indices = range(num_rows)
ordered_rows = []
for row_index in row_indices:
row_key = self._row_locations.get_key(row_index)
row = self.rows[row_key]
ordered_rows.append(row)
self._ordered_row_cache[cache_key] = ordered_rows
return ordered_rows
@property
def _should_render_row_labels(self) -> bool:
"""Whether row labels should be rendered or not."""
return self._labelled_row_exists and self.show_row_labels
def _get_row_renderables(self, row_index: int) -> RowRenderables:
"""Get renderables for the row currently at the given row index. The renderables
returned here have already been passed through the default_cell_formatter.
Args:
row_index: Index of the row.
Returns:
A RowRenderables containing the optional label and the rendered cells.
"""
update_count = self._update_count
cache_key = (update_count, row_index)
if cache_key in self._row_renderable_cache:
row_renderables = self._row_renderable_cache[cache_key]
else:
row_renderables = self._compute_row_renderables(row_index)
self._row_renderable_cache[cache_key] = row_renderables
return row_renderables
def _compute_row_renderables(self, row_index: int) -> RowRenderables:
"""Actual computation for _get_row_renderables"""
ordered_columns = self.ordered_columns
if row_index == -1:
header_row: list[RenderableType] = [
column.label for column in ordered_columns
]
# This is the cell where header and row labels intersect
return RowRenderables(None, header_row)
ordered_row = self.get_row_at(row_index)
row_key = self._row_locations.get_key(row_index)
if row_key is None:
return RowRenderables(None, [])
row_metadata = self.rows.get(row_key)
if row_metadata is None:
return RowRenderables(None, [])
formatted_row_cells: list[RenderableType] = [
(
_EMPTY_TEXT
if datum is None
else default_cell_formatter(
datum,
wrap=row_metadata.height != 1,
height=row_metadata.height,
)
or _EMPTY_TEXT
)
for datum, _ in zip_longest(ordered_row, range(len(self.columns)))
]
label = None
if self._should_render_row_labels:
label = (
default_cell_formatter(
row_metadata.label,
wrap=row_metadata.height != 1,
height=row_metadata.height,
)
if row_metadata.label
else None
)
return RowRenderables(label, formatted_row_cells)
def _render_cell(
self,
row_index: int,
column_index: int,
base_style: Style,
width: int,
cursor: bool = False,
hover: bool = False,
) -> SegmentLines:
"""Render the given cell.
Args:
row_index: Index of the row.
column_index: Index of the column.
base_style: Style to apply.
width: Width of the cell.
cursor: Is this cell affected by cursor highlighting?
hover: Is this cell affected by hover cursor highlighting?
Returns:
A list of segments per line.
"""
is_header_cell = row_index == -1
is_row_label_cell = column_index == -1
is_fixed_style_cell = (
not is_header_cell
and not is_row_label_cell
and (row_index < self.fixed_rows or column_index < self.fixed_columns)
)
if is_header_cell:
row_key = self._header_row_key
else:
row_key = self._row_locations.get_key(row_index)
column_key = self._column_locations.get_key(column_index)
cell_cache_key: CellCacheKey = (
row_key,
column_key,
base_style,
cursor,
hover,
self._show_hover_cursor,
self._update_count,
self._pseudo_class_state,
)
if cell_cache_key not in self._cell_render_cache:
base_style += Style.from_meta({"row": row_index, "column": column_index})
row_label, row_cells = self._get_row_renderables(row_index)
if is_row_label_cell:
cell = row_label if row_label is not None else ""
else:
cell = row_cells[column_index]
component_style, post_style = self._get_styles_to_render_cell(
is_header_cell,
is_row_label_cell,
is_fixed_style_cell,
hover,
cursor,
self.show_cursor,
self._show_hover_cursor,
self.cursor_foreground_priority == "css",
self.cursor_background_priority == "css",
)
if is_header_cell:
row_height = self.header_height
options = self.app.console_options.update_dimensions(width, row_height)
else:
# If an auto-height row hasn't had its height calculated, we don't fix
# the value for `height` so that we can measure the height of the cell.
row = self.rows[row_key]
if row.auto_height and row.height == 0:
row_height = 0
options = self.app.console_options.update_width(width)
else:
row_height = row.height
options = self.app.console_options.update_dimensions(
width, row_height
)
# If the row height is explicitly set to 1, then we don't wrap.
if row_height == 1:
options = options.update(no_wrap=True)
lines = self.app.console.render_lines(
Styled(
Padding(cell, (0, self.cell_padding)),
pre_style=base_style + component_style,
post_style=post_style,
),
options,
)
self._cell_render_cache[cell_cache_key] = lines
return self._cell_render_cache[cell_cache_key]
@functools.lru_cache(maxsize=32)
def _get_styles_to_render_cell(
self,
is_header_cell: bool,
is_row_label_cell: bool,
is_fixed_style_cell: bool,
hover: bool,
cursor: bool,
show_cursor: bool,
show_hover_cursor: bool,
has_css_foreground_priority: bool,
has_css_background_priority: bool,
) -> tuple[Style, Style]:
"""Auxiliary method to compute styles used to render a given cell.
Args:
is_header_cell: Is this a cell from a header?
is_row_label_cell: Is this the label of any given row?
is_fixed_style_cell: Should this cell be styled like a fixed cell?
hover: Does this cell have the hover pseudo class?
cursor: Is this cell covered by the cursor?
show_cursor: Do we want to show the cursor in the data table?
show_hover_cursor: Do we want to show the mouse hover when using the keyboard
to move the cursor?
has_css_foreground_priority: `self.cursor_foreground_priority == "css"`?
has_css_background_priority: `self.cursor_background_priority == "css"`?
"""
get_component = self.get_component_rich_style
component_style = Style()
if hover and show_cursor and show_hover_cursor:
component_style += get_component("datatable--hover")
if is_header_cell or is_row_label_cell:
# Apply subtle variation in style for the header/label (blue background by
# default) rows and columns affected by the cursor, to ensure we can
# still differentiate between the labels and the data.
component_style += get_component("datatable--header-hover")
if cursor and show_cursor:
cursor_style = get_component("datatable--cursor")
component_style += cursor_style
if is_header_cell or is_row_label_cell:
component_style += get_component("datatable--header-cursor")
elif is_fixed_style_cell:
component_style += get_component("datatable--fixed-cursor")
post_foreground = (
Style.from_color(color=component_style.color)
if has_css_foreground_priority
else Style.null()
)
post_background = (
Style.from_color(bgcolor=component_style.bgcolor)
if has_css_background_priority
else Style.null()
)
return component_style, post_foreground + post_background
def _render_line_in_row(
self,
row_key: RowKey,
line_no: int,
base_style: Style,
cursor_location: Coordinate,
hover_location: Coordinate,
) -> tuple[SegmentLines, SegmentLines]:
"""Render a single line from a row in the DataTable.
Args:
row_key: The identifying key for this row.
line_no: Line number (y-coordinate) within row. 0 is the first strip of
cells in the row, line_no=1 is the next line in the row, and so on...
base_style: Base style of row.
cursor_location: The location of the cursor in the DataTable.
hover_location: The location of the hover cursor in the DataTable.
Returns:
Lines for fixed cells, and Lines for scrollable cells.
"""
cursor_type = self.cursor_type
show_cursor = self.show_cursor
cache_key = (
row_key,
line_no,
base_style,
cursor_location,
hover_location,
cursor_type,
show_cursor,
self._show_hover_cursor,
self._update_count,
self._pseudo_class_state,
)
if cache_key in self._row_render_cache:
return self._row_render_cache[cache_key]
should_highlight = self._should_highlight
render_cell = self._render_cell
header_style = self.get_component_styles("datatable--header").rich_style
if row_key in self._row_locations:
row_index = self._row_locations.get(row_key)
else:
row_index = -1
# If the row has a label, add it to fixed_row here with correct style.
fixed_row = []
if self._labelled_row_exists and self.show_row_labels:
# The width of the row label is updated again on idle
cell_location = Coordinate(row_index, -1)
label_cell_lines = render_cell(
row_index,
-1,
header_style,
width=self._row_label_column_width,
cursor=should_highlight(cursor_location, cell_location, cursor_type),
hover=should_highlight(hover_location, cell_location, cursor_type),
)[line_no]
fixed_row.append(label_cell_lines)
if self.fixed_columns:
if row_key is self._header_row_key:
fixed_style = header_style # We use the header style either way.
else:
fixed_style = self.get_component_styles("datatable--fixed").rich_style
fixed_style += Style.from_meta({"fixed": True})
for column_index, column in enumerate(
self.ordered_columns[: self.fixed_columns]
):
cell_location = Coordinate(row_index, column_index)
fixed_cell_lines = render_cell(
row_index,
column_index,
fixed_style,
column.get_render_width(self),
cursor=should_highlight(
cursor_location, cell_location, cursor_type
),
hover=should_highlight(hover_location, cell_location, cursor_type),
)[line_no]
fixed_row.append(fixed_cell_lines)
row_style = self._get_row_style(row_index, base_style)
scrollable_row = []
for column_index, column in enumerate(self.ordered_columns):
cell_location = Coordinate(row_index, column_index)
cell_lines = render_cell(
row_index,
column_index,
row_style,
column.get_render_width(self),
cursor=should_highlight(cursor_location, cell_location, cursor_type),
hover=should_highlight(hover_location, cell_location, cursor_type),
)[line_no]
scrollable_row.append(cell_lines)
# Extending the styling out horizontally to fill the container
widget_width = self.size.width
table_width = (
sum(
column.get_render_width(self)
for column in self.ordered_columns[self.fixed_columns :]
)
+ self._row_label_column_width
)
remaining_space = max(0, widget_width - table_width)
background_color = self.background_colors[1]
if row_style.bgcolor is not None:
# TODO: This should really be in a component class
faded_color = Color.from_rich_color(row_style.bgcolor).blend(
background_color, factor=0.25
)
faded_style = Style.from_color(
color=row_style.color, bgcolor=faded_color.rich_color
)
else:
faded_style = Style.from_color(row_style.color, row_style.bgcolor)
scrollable_row.append([Segment(" " * remaining_space, faded_style)])
row_pair = (fixed_row, scrollable_row)
self._row_render_cache[cache_key] = row_pair
return row_pair
def _get_offsets(self, y: int) -> tuple[RowKey, int]:
"""Get row key and line offset for a given line.
Args:
y: Y coordinate relative to DataTable top.
Returns:
Row key and line (y) offset within cell.
"""
header_height = self.header_height
y_offsets = self._y_offsets
if self.show_header:
if y < header_height:
return self._header_row_key, y
y -= header_height
if y > len(y_offsets):
raise LookupError("Y coord {y!r} is greater than total height")
return y_offsets[y]
def _render_line(self, y: int, x1: int, x2: int, base_style: Style) -> Strip:
"""Render a (possibly cropped) line into a Strip (a list of segments
representing a horizontal line).
Args:
y: Y coordinate of line
x1: X start crop.
x2: X end crop (exclusive).
base_style: Style to apply to line.
Returns:
The Strip which represents this cropped line.
"""
width = self.size.width
try:
row_key, y_offset_in_row = self._get_offsets(y)
except LookupError:
return Strip.blank(width, base_style)
cache_key = (
y,
x1,
x2,
width,
self.cursor_coordinate,
self.hover_coordinate,
base_style,
self.cursor_type,
self._show_hover_cursor,
self._update_count,
self._pseudo_class_state,
)
if cache_key in self._line_cache:
return self._line_cache[cache_key]
fixed, scrollable = self._render_line_in_row(
row_key,
y_offset_in_row,
base_style,
cursor_location=self.cursor_coordinate,
hover_location=self.hover_coordinate,
)
fixed_width = sum(
column.get_render_width(self)
for column in self.ordered_columns[: self.fixed_columns]
)
fixed_line: list[Segment] = list(chain.from_iterable(fixed)) if fixed else []
scrollable_line: list[Segment] = list(chain.from_iterable(scrollable))
segments = fixed_line + line_crop(scrollable_line, x1 + fixed_width, x2, width)
strip = Strip(segments).adjust_cell_length(width, base_style).simplify()
self._line_cache[cache_key] = strip
return strip
def render_lines(self, crop: Region) -> list[Strip]:
self._pseudo_class_state = self.get_pseudo_class_state()
return super().render_lines(crop)
def render_line(self, y: int) -> Strip:
width, height = self.size
scroll_x, scroll_y = self.scroll_offset
fixed_row_keys: list[RowKey] = [
self._row_locations.get_key(row_index)
for row_index in range(self.fixed_rows)
]
fixed_rows_height = sum(
self.get_row_height(row_key) for row_key in fixed_row_keys
)
if self.show_header:
fixed_rows_height += self.get_row_height(self._header_row_key)
if y >= fixed_rows_height:
y += scroll_y
return self._render_line(y, scroll_x, scroll_x + width, self.rich_style)
def _should_highlight(
self,
cursor: Coordinate,
target_cell: Coordinate,
type_of_cursor: CursorType,
) -> bool:
"""Determine if the given cell should be highlighted because of the cursor.
This auxiliary method takes the cursor position and type into account when
determining whether the cell should be highlighted.
Args:
cursor: The current position of the cursor.
target_cell: The cell we're checking for the need to highlight.
type_of_cursor: The type of cursor that is currently active.
Returns:
Whether or not the given cell should be highlighted.
"""
if type_of_cursor == "cell":
return cursor == target_cell
elif type_of_cursor == "row":
cursor_row, _ = cursor
cell_row, _ = target_cell
return cursor_row == cell_row
elif type_of_cursor == "column":
_, cursor_column = cursor
_, cell_column = target_cell
return cursor_column == cell_column
else:
return False
def _get_row_style(self, row_index: int, base_style: Style) -> Style:
"""Gets the Style that should be applied to the row at the given index.
Args:
row_index: The index of the row to style.
base_style: The base style to use by default.
Returns:
The appropriate style.
"""
if row_index == -1:
row_style = self.get_component_styles("datatable--header").rich_style
elif row_index < self.fixed_rows:
row_style = self.get_component_styles("datatable--fixed").rich_style
else:
if self.zebra_stripes:
component_row_style = (
"datatable--odd-row" if row_index % 2 else "datatable--even-row"
)
row_style = self.get_component_styles(component_row_style).rich_style
else:
row_style = base_style
return row_style
def _on_mouse_move(self, event: events.MouseMove):
"""If the hover cursor is visible, display it by extracting the row
and column metadata from the segments present in the cells."""
self._set_hover_cursor(True)
meta = event.style.meta
if not meta:
self._set_hover_cursor(False)
return
if self.show_cursor and self.cursor_type != "none":
try:
self.hover_coordinate = Coordinate(meta["row"], meta["column"])
except KeyError:
pass
def _on_leave(self, _: events.Leave) -> None:
self._set_hover_cursor(False)
def _get_fixed_offset(self) -> Spacing:
"""Calculate the "fixed offset", that is the space to the top and left
that is occupied by fixed rows and columns respectively. Fixed rows and columns
are rows and columns that do not participate in scrolling."""
top = self.header_height if self.show_header else 0
top += sum(row.height for row in self.ordered_rows[: self.fixed_rows])
left = (
sum(
column.get_render_width(self)
for column in self.ordered_columns[: self.fixed_columns]
)
+ self._row_label_column_width
)
return Spacing(top, 0, 0, left)
def sort(
self,
*columns: ColumnKey | str,
key: Callable[[Any], Any] | None = None,
reverse: bool = False,
) -> Self:
"""Sort the rows in the `DataTable` by one or more column keys or a
key function (or other callable). If both columns and a key function
are specified, only data from those columns will sent to the key function.
Args:
columns: One or more columns to sort by the values in.
key: A function (or other callable) that returns a key to
use for sorting purposes.
reverse: If True, the sort order will be reversed.
Returns:
The `DataTable` instance.
"""
def key_wrapper(row: tuple[RowKey, dict[ColumnKey | str, CellType]]) -> Any:
_, row_data = row
if columns:
result = itemgetter(*columns)(row_data)
else:
result = tuple(row_data.values())
if key is not None:
return key(result)
return result
ordered_rows = sorted(
self._data.items(),
key=key_wrapper,
reverse=reverse,
)
self._row_locations = TwoWayDict(
{row_key: new_index for new_index, (row_key, _) in enumerate(ordered_rows)}
)
self._update_count += 1
self.refresh()
return self
def _scroll_cursor_into_view(self, animate: bool = False) -> None:
"""When the cursor is at a boundary of the DataTable and moves out
of view, this method handles scrolling to ensure it remains visible."""
fixed_offset = self._get_fixed_offset()
top, _, _, left = fixed_offset
if self.cursor_type == "row":
x, y, width, height = self._get_row_region(self.cursor_row)
region = Region(int(self.scroll_x) + left, y, width - left, height)
elif self.cursor_type == "column":
x, y, width, height = self._get_column_region(self.cursor_column)
region = Region(x, int(self.scroll_y) + top, width, height - top)
else:
region = self._get_cell_region(self.cursor_coordinate)
self.scroll_to_region(region, animate=animate, spacing=fixed_offset, force=True)
def _set_hover_cursor(self, active: bool) -> None:
"""Set whether the hover cursor (the faint cursor you see when you
hover the mouse cursor over a cell) is visible or not. Typically,
when you interact with the keyboard, you want to switch the hover
cursor off.
Args:
active: Display the hover cursor.
"""
self._show_hover_cursor = active
cursor_type = self.cursor_type
if cursor_type == "column":
self.refresh_column(self.hover_column)
elif cursor_type == "row":
self.refresh_row(self.hover_row)
elif cursor_type == "cell":
self.refresh_coordinate(self.hover_coordinate)
async def _on_click(self, event: events.Click) -> None:
self._set_hover_cursor(True)
meta = event.style.meta
if "row" not in meta or "column" not in meta:
return
row_index = meta["row"]
column_index = meta["column"]
is_header_click = self.show_header and row_index == -1
is_row_label_click = self.show_row_labels and column_index == -1
if is_header_click:
# Header clicks work even if cursor is off, and doesn't move the cursor.
column = self.ordered_columns[column_index]
message = DataTable.HeaderSelected(
self, column.key, column_index, label=column.label
)
self.post_message(message)
elif is_row_label_click:
row = self.ordered_rows[row_index]
message = DataTable.RowLabelSelected(
self, row.key, row_index, label=row.label
)
self.post_message(message)
elif self.show_cursor and self.cursor_type != "none":
# Only post selection events if there is a visible row/col/cell cursor.
self.cursor_coordinate = Coordinate(row_index, column_index)
self._post_selected_message()
self._scroll_cursor_into_view(animate=True)
event.stop()
def action_page_down(self) -> None:
"""Move the cursor one page down."""
self._set_hover_cursor(False)
if self.show_cursor and self.cursor_type in ("cell", "row"):
height = self.scrollable_content_region.height - (
self.header_height if self.show_header else 0
)
# Determine how many rows constitutes a "page"
offset = 0
rows_to_scroll = 0
row_index, _ = self.cursor_coordinate
for ordered_row in self.ordered_rows[row_index:]:
offset += ordered_row.height
rows_to_scroll += 1
if offset > height:
break
target_row = row_index + rows_to_scroll - 1
self.scroll_relative(y=height, animate=False, force=True)
self.move_cursor(row=target_row, scroll=False)
else:
super().action_page_down()
def action_page_up(self) -> None:
"""Move the cursor one page up."""
self._set_hover_cursor(False)
if self.show_cursor and self.cursor_type in ("cell", "row"):
height = self.scrollable_content_region.height - (
self.header_height if self.show_header else 0
)
# Determine how many rows constitutes a "page"
offset = 0
rows_to_scroll = 0
row_index, _ = self.cursor_coordinate
for ordered_row in self.ordered_rows[: row_index + 1]:
offset += ordered_row.height
rows_to_scroll += 1
if offset > height:
break
target_row = row_index - rows_to_scroll + 1
self.scroll_relative(y=-height, animate=False)
self.move_cursor(row=target_row, scroll=False)
else:
super().action_page_up()
def action_page_left(self) -> None:
"""Move the cursor one page left."""
self._set_hover_cursor(False)
super().scroll_page_left()
def action_page_right(self) -> None:
"""Move the cursor one page right."""
self._set_hover_cursor(False)
super().scroll_page_right()
def action_scroll_top(self) -> None:
"""Move the cursor and scroll to the top."""
self._set_hover_cursor(False)
cursor_type = self.cursor_type
if self.show_cursor and (cursor_type == "cell" or cursor_type == "row"):
_, column_index = self.cursor_coordinate
self.cursor_coordinate = Coordinate(0, column_index)
else:
super().action_scroll_home()
def action_scroll_bottom(self) -> None:
"""Move the cursor and scroll to the bottom."""
self._set_hover_cursor(False)
cursor_type = self.cursor_type
if self.show_cursor and (cursor_type == "cell" or cursor_type == "row"):
_, column_index = self.cursor_coordinate
self.cursor_coordinate = Coordinate(self.row_count - 1, column_index)
else:
super().action_scroll_end()
def action_scroll_home(self) -> None:
"""Move the cursor and scroll to the leftmost column."""
self._set_hover_cursor(False)
cursor_type = self.cursor_type
if self.show_cursor and (cursor_type == "cell" or cursor_type == "column"):
self.move_cursor(column=0)
else:
self.scroll_x = 0
def action_scroll_end(self) -> None:
"""Move the cursor and scroll to the rightmost column."""
self._set_hover_cursor(False)
cursor_type = self.cursor_type
if self.show_cursor and (cursor_type == "cell" or cursor_type == "column"):
self.move_cursor(column=len(self.columns) - 1)
else:
self.scroll_x = self.max_scroll_x
def action_cursor_up(self) -> None:
self._set_hover_cursor(False)
cursor_type = self.cursor_type
if self.show_cursor and (cursor_type == "cell" or cursor_type == "row"):
self.cursor_coordinate = self.cursor_coordinate.up()
else:
# If the cursor doesn't move up (e.g. column cursor can't go up),
# then ensure that we instead scroll the DataTable.
super().action_scroll_up()
def action_cursor_down(self) -> None:
self._set_hover_cursor(False)
cursor_type = self.cursor_type
if self.show_cursor and (cursor_type == "cell" or cursor_type == "row"):
self.cursor_coordinate = self.cursor_coordinate.down()
else:
super().action_scroll_down()
def action_cursor_right(self) -> None:
self._set_hover_cursor(False)
cursor_type = self.cursor_type
if self.show_cursor and (cursor_type == "cell" or cursor_type == "column"):
self.cursor_coordinate = self.cursor_coordinate.right()
self._scroll_cursor_into_view(animate=True)
else:
super().action_scroll_right()
def action_cursor_left(self) -> None:
self._set_hover_cursor(False)
cursor_type = self.cursor_type
if self.show_cursor and (cursor_type == "cell" or cursor_type == "column"):
self.cursor_coordinate = self.cursor_coordinate.left()
self._scroll_cursor_into_view(animate=True)
else:
super().action_scroll_left()
def action_select_cursor(self) -> None:
self._set_hover_cursor(False)
if self.show_cursor and self.cursor_type != "none":
self._post_selected_message()
def _post_selected_message(self):
"""Post the appropriate message for a selection based on the `cursor_type`."""
cursor_coordinate = self.cursor_coordinate
cursor_type = self.cursor_type
if len(self._data) == 0:
return
cell_key = self.coordinate_to_cell_key(cursor_coordinate)
if cursor_type == "cell":
self.post_message(
DataTable.CellSelected(
self,
self.get_cell_at(cursor_coordinate),
coordinate=cursor_coordinate,
cell_key=cell_key,
)
)
elif cursor_type == "row":
row_index, _ = cursor_coordinate
row_key, _ = cell_key
self.post_message(DataTable.RowSelected(self, row_index, row_key))
elif cursor_type == "column":
_, column_index = cursor_coordinate
_, column_key = cell_key
self.post_message(DataTable.ColumnSelected(self, column_index, column_key))
|
DataTable
|
python
|
explosion__spaCy
|
spacy/lang/hi/__init__.py
|
{
"start": 117,
"end": 215
}
|
class ____(BaseDefaults):
stop_words = STOP_WORDS
lex_attr_getters = LEX_ATTRS
|
HindiDefaults
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/executors/ecs/boto_schema.py
|
{
"start": 2955,
"end": 3353
}
|
class ____(Schema):
"""Botocore Serialization Object for ECS ``RunTask`` Operation output."""
tasks = fields.List(fields.Nested(BotoTaskSchema), required=True)
failures = fields.List(fields.Nested(BotoFailureSchema), required=True)
class Meta:
"""Options object for a Schema. See Schema.Meta for more details and valid values."""
unknown = EXCLUDE
|
BotoRunTaskSchema
|
python
|
pyca__cryptography
|
tests/x509/test_x509_ext.py
|
{
"start": 206280,
"end": 207973
}
|
class ____:
def test_load(self, backend):
cert = _load_cert(
os.path.join("x509", "cryptography.io.precert.pem"),
x509.load_pem_x509_certificate,
)
poison = cert.extensions.get_extension_for_oid(
ExtensionOID.PRECERT_POISON
).value
assert isinstance(poison, x509.PrecertPoison)
poison = cert.extensions.get_extension_for_class(
x509.PrecertPoison
).value
assert isinstance(poison, x509.PrecertPoison)
def test_generate(self, rsa_key_2048: rsa.RSAPrivateKey, backend):
private_key = rsa_key_2048
cert = (
_make_certbuilder(private_key)
.add_extension(x509.PrecertPoison(), critical=True)
.sign(private_key, hashes.SHA256(), backend)
)
poison = cert.extensions.get_extension_for_oid(
ExtensionOID.PRECERT_POISON
).value
assert isinstance(poison, x509.PrecertPoison)
def test_eq(self):
pcp1 = x509.PrecertPoison()
pcp2 = x509.PrecertPoison()
assert pcp1 == pcp2
def test_hash(self):
pcp1 = x509.PrecertPoison()
pcp2 = x509.PrecertPoison()
assert hash(pcp1) == hash(pcp2)
def test_ne(self):
pcp1 = x509.PrecertPoison()
pcp2 = x509.PrecertPoison()
assert pcp1 == pcp2
assert (pcp1 != pcp2) is False
assert pcp1 != object()
def test_repr(self):
pcp = x509.PrecertPoison()
assert repr(pcp) == "<PrecertPoison()>"
def test_public_bytes(self):
ext = x509.PrecertPoison()
assert ext.public_bytes() == b"\x05\x00"
|
TestPrecertPoisonExtension
|
python
|
apache__airflow
|
airflow-core/docs/empty_plugin/empty_plugin.py
|
{
"start": 1605,
"end": 1834
}
|
class ____(AirflowPlugin):
"""Defining the plugin class"""
name = "Empty Plugin"
flask_blueprints = [bp]
appbuilder_views = [{"name": "Empty Plugin", "category": "Extra Views", "view": EmptyPluginView()}]
|
EmptyPlugin
|
python
|
apache__airflow
|
task-sdk/src/airflow/sdk/execution_time/supervisor.py
|
{
"start": 14498,
"end": 34945
}
|
class ____:
"""
Base class for managing subprocesses in Airflow's TaskSDK.
This class handles common functionalities required for subprocess management, such as
socket handling, process monitoring, and request handling.
"""
id: UUID
pid: int
"""The process ID of the child process"""
stdin: socket
"""The handle connected to stdin of the child process"""
decoder: ClassVar[TypeAdapter]
"""The decoder to use for incoming messages from the child process."""
_process: psutil.Process = attrs.field(repr=False)
"""File descriptor for request handling."""
_exit_code: int | None = attrs.field(default=None, init=False)
_process_exit_monotonic: float | None = attrs.field(default=None, init=False)
_open_sockets: weakref.WeakKeyDictionary[socket, str] = attrs.field(
factory=weakref.WeakKeyDictionary, init=False
)
selector: selectors.BaseSelector = attrs.field(factory=selectors.DefaultSelector, repr=False)
_frame_encoder: msgspec.msgpack.Encoder = attrs.field(factory=comms._new_encoder, repr=False)
process_log: FilteringBoundLogger = attrs.field(repr=False)
subprocess_logs_to_stdout: bool = False
"""Duplicate log messages to stdout, or only send them to ``self.process_log``."""
start_time: float = attrs.field(factory=time.monotonic)
"""The start time of the child process."""
@classmethod
def start(
cls,
*,
target: Callable[[], None] = _subprocess_main,
logger: FilteringBoundLogger | None = None,
**constructor_kwargs,
) -> Self:
"""Fork and start a new subprocess with the specified target function."""
# Create socketpairs/"pipes" to connect to the stdin and out from the subprocess
child_stdout, read_stdout = socketpair()
child_stderr, read_stderr = socketpair()
# Place for child to send requests/read responses, and the server side to read/respond
child_requests, read_requests = socketpair()
# Open the socketpair before forking off the child, so that it is open when we fork.
child_logs, read_logs = socketpair()
pid = os.fork()
if pid == 0:
# Close and delete of the parent end of the sockets.
cls._close_unused_sockets(read_requests, read_stdout, read_stderr, read_logs)
# Python GC should delete these for us, but lets make double sure that we don't keep anything
# around in the forked processes, especially things that might involve open files or sockets!
del constructor_kwargs
del logger
try:
# Run the child entrypoint
_fork_main(child_requests, child_stdout, child_stderr, child_logs.fileno(), target)
except BaseException as e:
import traceback
with suppress(BaseException):
# We can't use log here, as if we except out of _fork_main something _weird_ went on.
print("Exception in _fork_main, exiting with code 124", file=sys.stderr)
traceback.print_exception(type(e), e, e.__traceback__, file=sys.stderr)
# It's really super super important we never exit this block. We are in the forked child, and if we
# do then _THINGS GET WEIRD_.. (Normally `_fork_main` itself will `_exit()` so we never get here)
os._exit(124)
# Close the remaining parent-end of the sockets we've passed to the child via fork. We still have the
# other end of the pair open
cls._close_unused_sockets(child_stdout, child_stderr, child_logs)
logger = logger or cast("FilteringBoundLogger", structlog.get_logger(logger_name="task").bind())
proc = cls(
pid=pid,
stdin=read_requests,
process=psutil.Process(pid),
process_log=logger,
start_time=time.monotonic(),
**constructor_kwargs,
)
proc._register_pipe_readers(
stdout=read_stdout,
stderr=read_stderr,
requests=read_requests,
logs=read_logs,
)
return proc
def _register_pipe_readers(self, stdout: socket, stderr: socket, requests: socket, logs: socket):
"""Register handlers for subprocess communication channels."""
# self.selector is a way of registering a handler/callback to be called when the given IO channel has
# activity to read on (https://www.man7.org/linux/man-pages/man2/select.2.html etc, but better
# alternatives are used automatically) -- this is a way of having "event-based" code, but without
# needing full async, to read and process output from each socket as it is received.
# Track the open sockets, and for debugging what type each one is
self._open_sockets.update(
(
(stdout, "stdout"),
(stderr, "stderr"),
(logs, "logs"),
(requests, "requests"),
)
)
target_loggers: tuple[FilteringBoundLogger, ...] = (self.process_log,)
if self.subprocess_logs_to_stdout:
target_loggers += (log,)
self.selector.register(
stdout, selectors.EVENT_READ, self._create_log_forwarder(target_loggers, "task.stdout")
)
self.selector.register(
stderr,
selectors.EVENT_READ,
self._create_log_forwarder(target_loggers, "task.stderr", log_level=logging.ERROR),
)
self.selector.register(
logs,
selectors.EVENT_READ,
make_buffered_socket_reader(
process_log_messages_from_subprocess(target_loggers), on_close=self._on_socket_closed
),
)
self.selector.register(
requests,
selectors.EVENT_READ,
length_prefixed_frame_reader(self.handle_requests(log), on_close=self._on_socket_closed),
)
def _create_log_forwarder(self, loggers, name, log_level=logging.INFO) -> Callable[[socket], bool]:
"""Create a socket handler that forwards logs to a logger."""
loggers = tuple(
reconfigure_logger(
log,
structlog.processors.CallsiteParameterAdder,
)
for log in loggers
)
return make_buffered_socket_reader(
forward_to_log(loggers, logger=name, level=log_level), on_close=self._on_socket_closed
)
def _on_socket_closed(self, sock: socket):
# We want to keep servicing this process until we've read up to EOF from all the sockets.
with suppress(KeyError):
self.selector.unregister(sock)
del self._open_sockets[sock]
def send_msg(
self, msg: BaseModel | None, request_id: int, error: ErrorResponse | None = None, **dump_opts
):
"""
Send the msg as a length-prefixed response frame.
``request_id`` is the ID that the client sent in it's request, and has no meaning to the server
"""
if msg:
frame = _ResponseFrame(id=request_id, body=msg.model_dump(**dump_opts))
else:
err_resp = error.model_dump() if error else None
frame = _ResponseFrame(id=request_id, error=err_resp)
self.stdin.sendall(frame.as_bytes())
def handle_requests(self, log: FilteringBoundLogger) -> Generator[None, _RequestFrame, None]:
"""Handle incoming requests from the task process, respond with the appropriate data."""
while True:
request = yield
try:
msg = self.decoder.validate_python(request.body)
except Exception:
log.exception("Unable to decode message", body=request.body)
continue
try:
self._handle_request(msg, log, request.id)
except ServerResponseError as e:
error_details = e.response.json() if e.response else None
log.error(
"API server error",
status_code=e.response.status_code,
detail=error_details,
message=str(e),
)
# Send error response back to task so that the error appears in the task logs
self.send_msg(
msg=None,
error=ErrorResponse(
error=ErrorType.API_SERVER_ERROR,
detail={
"status_code": e.response.status_code,
"message": str(e),
"detail": error_details,
},
),
request_id=request.id,
)
def _handle_request(self, msg, log: FilteringBoundLogger, req_id: int) -> None:
raise NotImplementedError()
@staticmethod
def _close_unused_sockets(*sockets):
"""Close unused ends of sockets after fork."""
for sock in sockets:
sock.close()
def _cleanup_open_sockets(self):
"""Force-close any sockets that never reported EOF."""
# In extremely busy environments the selector can fail to deliver a
# final read event before the subprocess exits. Without closing these
# sockets the supervisor would wait forever thinking they are still
# active. This cleanup ensures we always release resources and exit.
stuck_sockets = []
for sock, socket_type in self._open_sockets.items():
fileno = "unknown"
with suppress(Exception):
fileno = sock.fileno()
sock.close()
stuck_sockets.append(f"{socket_type}(fd={fileno})")
if stuck_sockets:
log.warning("Force-closed stuck sockets", pid=self.pid, sockets=stuck_sockets)
self.selector.close()
self.stdin.close()
def kill(
self,
signal_to_send: signal.Signals = signal.SIGINT,
escalation_delay: float = 5.0,
force: bool = False,
):
"""
Attempt to terminate the subprocess with a given signal.
If the process does not exit within `escalation_delay` seconds, escalate to SIGTERM and eventually SIGKILL if necessary.
:param signal_to_send: The signal to send initially (default is SIGINT).
:param escalation_delay: Time in seconds to wait before escalating to a stronger signal.
:param force: If True, ensure escalation through all signals without skipping.
"""
if self._exit_code is not None:
return
# Escalation sequence: SIGINT -> SIGTERM -> SIGKILL
escalation_path: list[signal.Signals] = [signal.SIGINT, signal.SIGTERM, signal.SIGKILL]
if force and signal_to_send in escalation_path:
# Start from `signal_to_send` and escalate to the end of the escalation path
escalation_path = escalation_path[escalation_path.index(signal_to_send) :]
else:
escalation_path = [signal_to_send]
for sig in escalation_path:
try:
self._process.send_signal(sig)
start = time.monotonic()
end = start + escalation_delay
now = start
while now < end:
# Service subprocess events during the escalation delay. This will return as soon as it's
# read from any of the sockets, so we need to re-run it if the process is still alive
if (
exit_code := self._service_subprocess(
max_wait_time=end - now, raise_on_timeout=False, expect_signal=sig
)
) is not None:
log.info("Process exited", pid=self.pid, exit_code=exit_code, signal_sent=sig.name)
return
now = time.monotonic()
msg = "Process did not terminate in time"
if sig != escalation_path[-1]:
msg += "; escalating"
log.warning(msg, pid=self.pid, signal=sig.name)
except psutil.NoSuchProcess:
log.debug("Process already terminated", pid=self.pid)
self._exit_code = -1
return
log.error("Failed to terminate process after full escalation", pid=self.pid)
def wait(self) -> int:
raise NotImplementedError()
def __rich_repr__(self):
yield "id", self.id
yield "pid", self.pid
# only include this if it's not the default (third argument)
yield "exit_code", self._exit_code, None
__rich_repr__.angular = True # type: ignore[attr-defined]
def __repr__(self) -> str:
rep = f"<{type(self).__name__} id={self.id} pid={self.pid}"
if self._exit_code is not None:
rep += f" exit_code={self._exit_code}"
return rep + " >"
def _service_subprocess(
self, max_wait_time: float, raise_on_timeout: bool = False, expect_signal: None | int = None
):
"""
Service subprocess events by processing socket activity and checking for process exit.
This method:
- Waits for activity on the registered file objects (via `self.selector.select`).
- Processes any events triggered on these file objects.
- Checks if the subprocess has exited during the wait.
:param max_wait_time: Maximum time to block while waiting for events, in seconds.
:param raise_on_timeout: If True, raise an exception if the subprocess does not exit within the timeout.
:param expect_signal: Signal not to log if the task exits with this code.
:returns: The process exit code, or None if it's still alive
"""
# Ensure minimum timeout to prevent CPU spike with tight loop when timeout is 0 or negative
timeout = max(0.01, max_wait_time)
events = self.selector.select(timeout=timeout)
for key, _ in events:
# Retrieve the handler responsible for processing this file object (e.g., stdout, stderr)
socket_handler, on_close = key.data
# Example of handler behavior:
# If the subprocess writes "Hello, World!" to stdout:
# - `socket_handler` reads and processes the message.
# - If EOF is reached, the handler returns False to signal no more reads are expected.
# - BrokenPipeError should be caught and treated as if the handler returned false, similar
# to EOF case
try:
need_more = socket_handler(key.fileobj)
except (BrokenPipeError, ConnectionResetError):
need_more = False
# If the handler signals that the file object is no longer needed (EOF, closed, etc.)
# unregister it from the selector to stop monitoring; `wait()` blocks until all selectors
# are removed.
if not need_more:
sock: socket = key.fileobj # type: ignore[assignment]
on_close(sock)
sock.close()
# Check if the subprocess has exited
return self._check_subprocess_exit(raise_on_timeout=raise_on_timeout, expect_signal=expect_signal)
def _check_subprocess_exit(
self, raise_on_timeout: bool = False, expect_signal: None | int = None
) -> int | None:
"""Check if the subprocess has exited."""
if self._exit_code is not None:
return self._exit_code
try:
self._exit_code = self._process.wait(timeout=0)
except psutil.TimeoutExpired:
if raise_on_timeout:
raise
else:
self._process_exit_monotonic = time.monotonic()
if expect_signal is not None and self._exit_code == -expect_signal:
# Bypass logging, the caller expected us to exit with this
return self._exit_code
# Put a message in the viewable task logs
if self._exit_code == -signal.SIGSEGV:
self.process_log.critical(SIGSEGV_MESSAGE)
# psutil turns signal exit codes into an enum for us. Handy. (Otherwise it's a plain integer) if exit_code and (name := getattr(exit_code, "name")):
elif name := getattr(self._exit_code, "name", None):
message = "Process terminated by signal."
level = logging.ERROR
if self._exit_code == -signal.SIGKILL:
message += " Likely out of memory error (OOM)."
level = logging.CRITICAL
message += " For more information, see https://airflow.apache.org/docs/apache-airflow/stable/troubleshooting.html#process-terminated-by-signal."
self.process_log.log(level, message, signal=int(self._exit_code), signal_name=name)
elif self._exit_code:
# Run of the mill exit code (1, 42, etc).
# Most task errors should be caught in the task runner and _that_ exits with 0.
self.process_log.warning("Process exited abnormally", exit_code=self._exit_code)
return self._exit_code
_REMOTE_LOGGING_CONN_CACHE: dict[str, Connection | None] = {}
def _fetch_remote_logging_conn(conn_id: str, client: Client) -> Connection | None:
"""
Fetch and cache connection for remote logging.
Args:
conn_id: Connection ID to fetch
client: API client for making requests
Returns:
Connection object or None if not found.
"""
# Since we need to use the API Client directly, we can't use Connection.get as that would try to use
# SUPERVISOR_COMMS
# TODO: Store in the SecretsCache if its enabled - see #48858
if conn_id in _REMOTE_LOGGING_CONN_CACHE:
return _REMOTE_LOGGING_CONN_CACHE[conn_id]
backends = ensure_secrets_backend_loaded()
for secrets_backend in backends:
try:
conn = secrets_backend.get_connection(conn_id=conn_id)
if conn:
_REMOTE_LOGGING_CONN_CACHE[conn_id] = conn
return conn
except Exception:
log.exception(
"Unable to retrieve connection from secrets backend (%s). "
"Checking subsequent secrets backend.",
type(secrets_backend).__name__,
)
conn = client.connections.get(conn_id)
if isinstance(conn, ConnectionResponse):
conn_result = ConnectionResult.from_conn_response(conn)
from airflow.sdk.definitions.connection import Connection
result: Connection | None = Connection(**conn_result.model_dump(exclude={"type"}, by_alias=True))
else:
result = None
_REMOTE_LOGGING_CONN_CACHE[conn_id] = result
return result
@contextlib.contextmanager
def _remote_logging_conn(client: Client):
"""
Pre-fetch the needed remote logging connection with caching.
If a remote logger is in use, and has the logging/remote_logging option set, we try to fetch the
connection it needs, now, directly from the API client, and store it in an env var, so that when the logging
hook tries to get the connection it can find it easily from the env vars.
This is needed as the BaseHook.get_connection looks for SUPERVISOR_COMMS, but we are still in the
supervisor process when this is needed, so that doesn't exist yet.
The connection details are fetched eagerly on every invocation to avoid retaining
per-task API client instances in global caches.
"""
from airflow.sdk.log import load_remote_conn_id, load_remote_log_handler
if load_remote_log_handler() is None or not (conn_id := load_remote_conn_id()):
# Nothing to do
yield
return
# Fetch connection details on-demand without caching the entire API client instance
conn = _fetch_remote_logging_conn(conn_id, client)
if conn:
key = f"AIRFLOW_CONN_{conn_id.upper()}"
old = os.getenv(key)
os.environ[key] = conn.get_uri()
try:
yield
finally:
if old is None:
del os.environ[key]
else:
os.environ[key] = old
@attrs.define(kw_only=True)
|
WatchedSubprocess
|
python
|
docker__docker-py
|
docker/types/containers.py
|
{
"start": 23806,
"end": 27412
}
|
class ____(dict):
def __init__(
self, version, image, command, hostname=None, user=None, detach=False,
stdin_open=False, tty=False, ports=None, environment=None,
volumes=None, network_disabled=False, entrypoint=None,
working_dir=None, domainname=None, host_config=None, mac_address=None,
labels=None, stop_signal=None, networking_config=None,
healthcheck=None, stop_timeout=None, runtime=None
):
if stop_timeout is not None and version_lt(version, '1.25'):
raise errors.InvalidVersion(
'stop_timeout was only introduced in API version 1.25'
)
if healthcheck is not None:
if version_lt(version, '1.24'):
raise errors.InvalidVersion(
'Health options were only introduced in API version 1.24'
)
if version_lt(version, '1.29') and 'StartPeriod' in healthcheck:
raise errors.InvalidVersion(
'healthcheck start period was introduced in API '
'version 1.29'
)
if isinstance(command, str):
command = split_command(command)
if isinstance(entrypoint, str):
entrypoint = split_command(entrypoint)
if isinstance(environment, dict):
environment = format_environment(environment)
if isinstance(labels, list):
labels = {lbl: '' for lbl in labels}
if isinstance(ports, list):
exposed_ports = {}
for port_definition in ports:
port = port_definition
proto = 'tcp'
if isinstance(port_definition, tuple):
if len(port_definition) == 2:
proto = port_definition[1]
port = port_definition[0]
exposed_ports[f'{port}/{proto}'] = {}
ports = exposed_ports
if isinstance(volumes, str):
volumes = [volumes, ]
if isinstance(volumes, list):
volumes_dict = {}
for vol in volumes:
volumes_dict[vol] = {}
volumes = volumes_dict
if healthcheck and isinstance(healthcheck, dict):
healthcheck = Healthcheck(**healthcheck)
attach_stdin = False
attach_stdout = False
attach_stderr = False
stdin_once = False
if not detach:
attach_stdout = True
attach_stderr = True
if stdin_open:
attach_stdin = True
stdin_once = True
self.update({
'Hostname': hostname,
'Domainname': domainname,
'ExposedPorts': ports,
'User': str(user) if user is not None else None,
'Tty': tty,
'OpenStdin': stdin_open,
'StdinOnce': stdin_once,
'AttachStdin': attach_stdin,
'AttachStdout': attach_stdout,
'AttachStderr': attach_stderr,
'Env': environment,
'Cmd': command,
'Image': image,
'Volumes': volumes,
'NetworkDisabled': network_disabled,
'Entrypoint': entrypoint,
'WorkingDir': working_dir,
'HostConfig': host_config,
'NetworkingConfig': networking_config,
'MacAddress': mac_address,
'Labels': labels,
'StopSignal': stop_signal,
'Healthcheck': healthcheck,
'StopTimeout': stop_timeout,
'Runtime': runtime
})
|
ContainerConfig
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/hwloc/package.py
|
{
"start": 217,
"end": 260
}
|
class ____(Package):
version("2.0.3")
|
Hwloc
|
python
|
pytest-dev__pytest
|
src/_pytest/warning_types.py
|
{
"start": 3345,
"end": 4398
}
|
class ____(PytestWarning):
"""When the lsof plugin finds leaked fds."""
__module__ = "pytest"
def warn_explicit_for(method: FunctionType, message: PytestWarning) -> None:
"""
Issue the warning :param:`message` for the definition of the given :param:`method`
this helps to log warnings for functions defined prior to finding an issue with them
(like hook wrappers being marked in a legacy mechanism)
"""
lineno = method.__code__.co_firstlineno
filename = inspect.getfile(method)
module = method.__module__
mod_globals = method.__globals__
try:
warnings.warn_explicit(
message,
type(message),
filename=filename,
module=module,
registry=mod_globals.setdefault("__warningregistry__", {}),
lineno=lineno,
)
except Warning as w:
# If warnings are errors (e.g. -Werror), location information gets lost, so we add it to the message.
raise type(w)(f"{w}\n at {filename}:{lineno}") from None
|
PytestFDWarning
|
python
|
bokeh__bokeh
|
src/bokeh/core/serialization.py
|
{
"start": 3196,
"end": 3314
}
|
class ____(TypedDict):
type: Literal["object"]
name: str
attributes: NotRequired[dict[str, AnyRep]]
|
ObjectRep
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-kyriba/source_kyriba/source.py
|
{
"start": 8616,
"end": 8696
}
|
class ____(BankBalancesStream):
balance_type = "INTRADAY"
|
BankBalancesIntraday
|
python
|
python__mypy
|
mypy/nodes.py
|
{
"start": 1465,
"end": 1514
}
|
class ____(Enum):
VALUE = "NotParsed"
|
NotParsed
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.