language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
rushter__MLAlgorithms
|
mla/ensemble/gbm.py
|
{
"start": 4237,
"end": 4484
}
|
class ____(GradientBoosting):
def fit(self, X, y=None):
# Convert labels from {0, 1} to {-1, 1}
y = (y * 2) - 1
self.loss = LogisticLoss()
super(GradientBoostingClassifier, self).fit(X, y)
|
GradientBoostingClassifier
|
python
|
pytorch__pytorch
|
torch/_functorch/_aot_autograd/descriptors.py
|
{
"start": 23976,
"end": 24220
}
|
class ____(AOTOutput):
"""The final offset from the functionalized RNG calls, forward only"""
def expr(self) -> str:
return "__philox_updated_forward_offset"
@dataclasses.dataclass(frozen=True)
|
PhiloxUpdatedForwardOffsetAOTOutput
|
python
|
apache__airflow
|
helm-tests/tests/helm_tests/statsd/test_labels_networkpolicy.py
|
{
"start": 900,
"end": 4044
}
|
class ____:
"""Tests statsd network policy labels."""
TEMPLATE_FILE = "templates/statsd/statsd-networkpolicy.yaml"
def test_should_add_global_labels(self):
"""Test adding only .Values.labels."""
docs = render_chart(
values={
"statsd": {"enabled": True},
"networkPolicies": {"enabled": True},
"labels": {"test_global_label": "test_global_label_value"},
},
show_only=[self.TEMPLATE_FILE],
)
assert "test_global_label" in jmespath.search("metadata.labels", docs[0])
assert jmespath.search("metadata.labels", docs[0])["test_global_label"] == "test_global_label_value"
def test_should_add_component_specific_labels(self):
"""Test adding only .Values.statsd.labels."""
docs = render_chart(
values={
"statsd": {
"enabled": True,
"labels": {"test_component_label": "test_component_label_value"},
},
"networkPolicies": {"enabled": True},
},
show_only=[self.TEMPLATE_FILE],
)
assert "test_component_label" in jmespath.search("metadata.labels", docs[0])
assert (
jmespath.search("metadata.labels", docs[0])["test_component_label"]
== "test_component_label_value"
)
def test_should_merge_global_and_component_specific_labels(self):
"""Test adding both .Values.labels and .Values.statsd.labels."""
docs = render_chart(
values={
"statsd": {
"enabled": True,
"labels": {"test_component_label": "test_component_label_value"},
},
"networkPolicies": {"enabled": True},
"labels": {"test_global_label": "test_global_label_value"},
},
show_only=[self.TEMPLATE_FILE],
)
assert "test_global_label" in jmespath.search("metadata.labels", docs[0])
assert jmespath.search("metadata.labels", docs[0])["test_global_label"] == "test_global_label_value"
assert "test_component_label" in jmespath.search("metadata.labels", docs[0])
assert (
jmespath.search("metadata.labels", docs[0])["test_component_label"]
== "test_component_label_value"
)
def test_component_specific_labels_should_override_global_labels(self):
"""Test that component-specific labels take precedence over global labels with the same key."""
docs = render_chart(
values={
"statsd": {
"enabled": True,
"labels": {"common_label": "component_value"},
},
"networkPolicies": {"enabled": True},
"labels": {"common_label": "global_value"},
},
show_only=[self.TEMPLATE_FILE],
)
assert "common_label" in jmespath.search("metadata.labels", docs[0])
assert jmespath.search("metadata.labels", docs[0])["common_label"] == "component_value"
|
TestStatsdNetworkPolicy
|
python
|
pandas-dev__pandas
|
pandas/tests/frame/indexing/test_xs.py
|
{
"start": 763,
"end": 3991
}
|
class ____:
def test_xs(self, float_frame):
idx = float_frame.index[5]
xs = float_frame.xs(idx)
for item, value in xs.items():
if np.isnan(value):
assert np.isnan(float_frame[item][idx])
else:
assert value == float_frame[item][idx]
def test_xs_mixed(self):
# mixed-type xs
test_data = {"A": {"1": 1, "2": 2}, "B": {"1": "1", "2": "2", "3": "3"}}
frame = DataFrame(test_data)
xs = frame.xs("1")
assert xs.dtype == np.object_
assert xs["A"] == 1
assert xs["B"] == "1"
def test_xs_dt_error(self, datetime_frame):
with pytest.raises(
KeyError, match=re.escape("Timestamp('1999-12-31 00:00:00')")
):
datetime_frame.xs(datetime_frame.index[0] - BDay())
def test_xs_other(self, float_frame):
float_frame_orig = float_frame.copy()
# xs get column
series = float_frame.xs("A", axis=1)
expected = float_frame["A"]
tm.assert_series_equal(series, expected)
# view is returned if possible
series = float_frame.xs("A", axis=1)
series[:] = 5
# The view shouldn't propagate mutations
tm.assert_series_equal(float_frame["A"], float_frame_orig["A"])
assert not (expected == 5).all()
def test_xs_corner(self):
# pathological mixed-type reordering case
df = DataFrame(index=[0], columns=Index([], dtype="str"))
df["A"] = 1.0
df["B"] = "foo"
df["C"] = 2.0
df["D"] = "bar"
df["E"] = 3.0
xs = df.xs(0)
exp = Series([1.0, "foo", 2.0, "bar", 3.0], index=list("ABCDE"), name=0)
tm.assert_series_equal(xs, exp)
# no columns but Index(dtype=object)
df = DataFrame(index=["a", "b", "c"])
result = df.xs("a")
expected = Series([], name="a", dtype=np.float64)
tm.assert_series_equal(result, expected)
def test_xs_duplicates(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 2)),
index=["b", "b", "c", "b", "a"],
)
cross = df.xs("c")
exp = df.iloc[2]
tm.assert_series_equal(cross, exp)
def test_xs_keep_level(self):
df = DataFrame(
{
"day": {0: "sat", 1: "sun"},
"flavour": {0: "strawberry", 1: "strawberry"},
"sales": {0: 10, 1: 12},
"year": {0: 2008, 1: 2008},
}
).set_index(["year", "flavour", "day"])
result = df.xs("sat", level="day", drop_level=False)
expected = df[:1]
tm.assert_frame_equal(result, expected)
result = df.xs((2008, "sat"), level=["year", "day"], drop_level=False)
tm.assert_frame_equal(result, expected)
def test_xs_view(self):
# in 0.14 this will return a view if possible a copy otherwise, but
# this is numpy dependent
dm = DataFrame(np.arange(20.0).reshape(4, 5), index=range(4), columns=range(5))
df_orig = dm.copy()
with tm.raises_chained_assignment_error():
dm.xs(2)[:] = 20
tm.assert_frame_equal(dm, df_orig)
|
TestXS
|
python
|
bokeh__bokeh
|
src/bokeh/models/dom.py
|
{
"start": 6232,
"end": 6412
}
|
class ____(Placeholder):
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
|
Index
|
python
|
pypa__pip
|
src/pip/_vendor/packaging/metadata.py
|
{
"start": 1161,
"end": 1722
}
|
class ____(ValueError):
"""A metadata field contains invalid data."""
field: str
"""The name of the field that contains invalid data."""
def __init__(self, field: str, message: str) -> None:
self.field = field
super().__init__(message)
# The RawMetadata class attempts to make as few assumptions about the underlying
# serialization formats as possible. The idea is that as long as a serialization
# formats offer some very basic primitives in *some* way then we can support
# serializing to and from that format.
|
InvalidMetadata
|
python
|
aio-libs__aiohttp
|
aiohttp/helpers.py
|
{
"start": 7074,
"end": 9660
}
|
class ____:
proxy: URL
proxy_auth: BasicAuth | None
def basicauth_from_netrc(netrc_obj: netrc.netrc | None, host: str) -> BasicAuth:
"""
Return :py:class:`~aiohttp.BasicAuth` credentials for ``host`` from ``netrc_obj``.
:raises LookupError: if ``netrc_obj`` is :py:data:`None` or if no
entry is found for the ``host``.
"""
if netrc_obj is None:
raise LookupError("No .netrc file found")
auth_from_netrc = netrc_obj.authenticators(host)
if auth_from_netrc is None:
raise LookupError(f"No entry for {host!s} found in the `.netrc` file.")
login, account, password = auth_from_netrc
# TODO(PY311): username = login or account
# Up to python 3.10, account could be None if not specified,
# and login will be empty string if not specified. From 3.11,
# login and account will be empty string if not specified.
username = login if (login or account is None) else account
# TODO(PY311): Remove this, as password will be empty string
# if not specified
if password is None:
password = "" # type: ignore[unreachable]
return BasicAuth(username, password)
def proxies_from_env() -> dict[str, ProxyInfo]:
proxy_urls = {
k: URL(v)
for k, v in getproxies().items()
if k in ("http", "https", "ws", "wss")
}
netrc_obj = netrc_from_env()
stripped = {k: strip_auth_from_url(v) for k, v in proxy_urls.items()}
ret = {}
for proto, val in stripped.items():
proxy, auth = val
if proxy.scheme in ("https", "wss"):
client_logger.warning(
"%s proxies %s are not supported, ignoring", proxy.scheme.upper(), proxy
)
continue
if netrc_obj and auth is None:
if proxy.host is not None:
try:
auth = basicauth_from_netrc(netrc_obj, proxy.host)
except LookupError:
auth = None
ret[proto] = ProxyInfo(proxy, auth)
return ret
def get_env_proxy_for_url(url: URL) -> tuple[URL, BasicAuth | None]:
"""Get a permitted proxy for the given URL from the env."""
if url.host is not None and proxy_bypass(url.host):
raise LookupError(f"Proxying is disallowed for `{url.host!r}`")
proxies_in_env = proxies_from_env()
try:
proxy_info = proxies_in_env[url.scheme]
except KeyError:
raise LookupError(f"No proxies found for `{url!s}` in the env")
else:
return proxy_info.proxy, proxy_info.proxy_auth
@frozen_dataclass_decorator
|
ProxyInfo
|
python
|
openai__openai-python
|
src/openai/types/shared/response_format_text_python.py
|
{
"start": 201,
"end": 342
}
|
class ____(BaseModel):
type: Literal["python"]
"""The type of response format being defined. Always `python`."""
|
ResponseFormatTextPython
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster_tests/components_tests/component_tree_tests/test_get_all_components.py
|
{
"start": 3468,
"end": 5356
}
|
class ____(dg.Component):
def build_defs(self, context: dg.ComponentLoadContext) -> dg.Definitions:
return dg.Definitions(assets=[dg.AssetSpec("bar_asset")])
@dg.component_instance
def load_foo(context: dg.ComponentLoadContext) -> FooComponent:
return FooComponent()
@dg.component_instance
def load_foo_subclass(context: dg.ComponentLoadContext) -> FooSubclassComponent:
return FooSubclassComponent()
@dg.component_instance
def load_bar(context: dg.ComponentLoadContext) -> BarComponent:
return BarComponent()
@dg.component_instance
def unannotated(context: dg.ComponentLoadContext):
# note: won't be detected
return BarComponent()
@dg.component_instance
def error_component(context: dg.ComponentLoadContext):
raise Exception("Can't load this component!")
""")
with sandbox.build_component_tree() as tree:
loaders_module = importlib.import_module(
f"{sandbox.project_name}.defs.python_components"
)
FooComponent = loaders_module.FooComponent
FooSubclassComponent = loaders_module.FooSubclassComponent
BarComponent = loaders_module.BarComponent
# Test getting all FooComponent instances
foo_components = tree.get_all_components(of_type=FooComponent)
assert len(foo_components) == 2
assert isinstance(foo_components[0], FooComponent)
assert isinstance(foo_components[1], FooSubclassComponent)
# Test getting all BarComponent instances
bar_components = tree.get_all_components(of_type=BarComponent)
assert len(bar_components) == 1
assert isinstance(bar_components[0], BarComponent)
# Error if you get all components
with pytest.raises(ComponentTreeException):
tree.get_all_components(of_type=dg.Component)
|
BarComponent
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_set_print_scale01.py
|
{
"start": 315,
"end": 1215
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("set_print_scale01.xlsx")
self.ignore_files = [
"xl/printerSettings/printerSettings1.bin",
"xl/worksheets/_rels/sheet1.xml.rels",
]
self.ignore_elements = {
"[Content_Types].xml": ['<Default Extension="bin"'],
"xl/worksheets/sheet1.xml": ["<pageMargins"],
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with printer settings."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_print_scale(110)
worksheet.set_paper(9)
worksheet.write("A1", "Foo")
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
tensorflow__tensorflow
|
tensorflow/python/eager/polymorphic_function/polymorphic_function_xla_jit_test.py
|
{
"start": 1951,
"end": 43540
}
|
class ____(xla_test.XLATestCase):
def _compareTwoMethodsCompilerIROutput(self, f, args, kwargs):
"""Assert the two different methods (tensor_spec inputs or tensor inputs) experimental_get_compiler give same HLO text."""
flat_args = list(args) + list(kwargs.values())
if not all([isinstance(x, tensor.Tensor) for x in flat_args]):
self.skipTest('It only support args and kwargs are all tf.Tensor types.')
args_spec = nest.map_structure(tensor.TensorSpec.from_tensor, args)
kwargs_spec = nest.map_structure(tensor.TensorSpec.from_tensor, kwargs)
hlo_1 = f.experimental_get_compiler_ir(*args, **kwargs)()
hlo_2 = f.experimental_get_compiler_ir(*args_spec, **kwargs_spec)()
if hlo_1 != hlo_2:
self.fail(
'The tensor_spec way experimental_get_compiler_ir give diff result to'
f' normal experimental_get_compiler_ir. \nhlo_1:\n{hlo_1}'
f'\nhlo_2:\n{hlo_2}\n'
)
def testAutoclusteringWithTfFunction(self):
if 'tpu' in self.device.lower():
self.skipTest('Autoclustering does not run on TPU')
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=False)
def outer(a, b, c):
return a * inner(b, c) + c
@polymorphic_function.function(jit_compile=True)
def inner(b, c):
return b + c * b
i1 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0])
i2 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0])
i3 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0])
with context.collect_graphs(optimized=True) as graphs:
outer(i1, i2, i3)
if test_util.is_xla_enabled():
self.assertIn('_XlaRun', [n.op for n in graphs[0].node])
else:
self.assertNotIn('_XlaRun', [n.op for n in graphs[0].node])
def testBasic(self):
with ops.device('device:{}:0'.format(self.device)):
def fn(x, a):
return x + a
func = polymorphic_function.function(fn, jit_compile=False)
xla_func = polymorphic_function.function(fn, jit_compile=True)
inputs = constant_op.constant([1, 2, 2, 3, 3])
self.assertAllClose([2, 3, 3, 4, 4], func(inputs, 1))
self.assertAllClose([2, 3, 3, 4, 4], xla_func(inputs, 1))
def testBasicInt32(self):
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def fn(x, a):
return x + a
inputs = constant_op.constant([1, 2, 2, 3, 3], dtype=dtypes.int32)
self.assertAllClose([2, 3, 3, 4, 4], fn(inputs, 1))
def testDerivative(self):
with ops.device('device:{}:0'.format(self.device)):
def fn(x, a):
return 2 * x + a
xla_func = polymorphic_function.function(fn, jit_compile=True)
with backprop.GradientTape() as tape:
inputs = constant_op.constant([1., 2., 2., 3., 3.])
tape.watch(inputs)
outputs = xla_func(inputs, 1)
self.assertAllClose([2, 2, 2, 2, 2], tape.gradient(outputs, inputs))
# pylint: disable=protected-access
(forward, backward) = xla_func.get_concrete_function(
inputs, 1)._delayed_rewrite_functions.forward_backward()
# Check that the must-compile attribute gets correctly propagated to the
# created derivatives.
self.assertTrue(backward.function_def.attr['_XlaMustCompile'])
self.assertTrue(forward.cached_definition.attr['_XlaMustCompile'])
# Calling function with jit_compile=True from
# jit_compile=False should compile the inner func.
def testNestedCall(self):
if 'tpu' in self.device.lower():
self.skipTest('b/162800687: Inner function runs on host')
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def fn(x, a):
return x + a
@polymorphic_function.function(jit_compile=False)
def fn2(x, a):
return fn(x, a)
inputs = constant_op.constant([1, 2, 2, 3, 3])
self.assertAllClose([2, 3, 3, 4, 4], fn2(inputs, 1))
def testNestedCallUnsupportedOps(self):
if 'tpu' in self.device.lower():
self.skipTest('Outside compilation will extract string_length to CPU')
with ops.device('device:{}:0'.format(self.device)):
def fn(x):
return string_ops.string_length(
string_ops.string_format('{}', x))
xla_func = polymorphic_function.function(fn, jit_compile=True)
def fn2(x):
return xla_func(x)
func = polymorphic_function.function(fn2, jit_compile=False)
inputs = constant_op.constant([1, 2, 2, 3, 3])
with self.assertRaisesRegex(
errors.InvalidArgumentError, 'unsupported operations'
):
func(inputs)
def testUnsupportedOps(self):
with ops.device('device:{}:0'.format(self.device)):
def fn(x):
return string_ops.string_length(
string_ops.string_format('{}', x))
xla_func = polymorphic_function.function(fn, jit_compile=True)
with self.assertRaisesRegex(
errors.InvalidArgumentError, 'unsupported operations'
):
xla_func(constant_op.constant([3.1, 3.2]))
def testCollectiveReduceChannelId(self):
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def fn(x, y):
t0 = collective_ops.all_reduce_v2(
t=x, group_size=2, group_key=1, instance_key=1)
t1 = collective_ops.all_reduce_v2(
t=y, group_size=2, group_key=1, instance_key=1)
return t0 + t1
inputs = constant_op.constant([1.0, 2.0, 3.0])
# Make sure 2 different channel ids are assigned to the 2 all-reduce
# instructions generated by XLA.
hlo_str = fn.experimental_get_compiler_ir(inputs, inputs)()
matches = re.findall('channel_id=([0-9]*),', hlo_str)
self.assertLen(matches, 2)
self.assertNotEqual(matches[0], matches[1])
self._compareTwoMethodsCompilerIROutput(fn, [inputs, inputs], {})
def testCollectiveReduceReplicaGroups(self):
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def fn(x):
t0 = collective_ops.all_reduce_v2(
t=x, group_size=2, group_key=1, instance_key=1)
return t0
inputs = constant_op.constant([1.0, 2.0, 3.0])
# Make sure replica groups are assigned
hlo_str = fn.experimental_get_compiler_ir(inputs)()
self.assertIn('replica_groups={{', hlo_str)
self._compareTwoMethodsCompilerIROutput(fn, [inputs], {})
def testCollectiveReduceGroupAssignment(self):
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def fn(x):
group_size, group_key = collective_ops.assign_group_v2(
group_assignment=[[0]], device_index=0, base_key=1000)
t0 = collective_ops.all_reduce_v2(
t=x, group_size=group_size, group_key=group_key, instance_key=1)
return t0
inputs = constant_op.constant([1.0, 2.0, 3.0])
# Make sure 2 different channel ids are assigned to the 2 all-reduce
# instructions generated by XLA.
hlo_str = fn.experimental_get_compiler_ir(inputs)()
self.assertIn('replica_groups={{0}}', hlo_str)
self._compareTwoMethodsCompilerIROutput(fn, [inputs], {})
@test_util.disable_mlir_bridge('TODO(b/155782411): MLIR bridge does not'
'support stack traces')
def testPythonLocationInMetadata(self):
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def add_fn(x, y):
return x + y
inputs = constant_op.constant([1, 2, 2, 3, 3])
self.assertIn(
'add_fn', add_fn.experimental_get_compiler_ir(inputs, inputs)()
)
self._compareTwoMethodsCompilerIROutput(add_fn, [inputs, inputs], {})
@test_util.disable_mlir_bridge('TODO(b/155782411): MLIR bridge does not'
'support stack traces')
def testPythonLocationNestedInMetadata(self):
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def add_f(x, y):
return x + y
@polymorphic_function.function(jit_compile=True)
def add_g(x, y):
return add_f(x, y)
inputs = constant_op.constant([1, 2, 2, 3, 3])
self.assertIn(
'add_g', add_g.experimental_get_compiler_ir(inputs, inputs)()
)
self._compareTwoMethodsCompilerIROutput(add_g, [inputs, inputs], {})
def testPythonStackTrace(self):
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def failure_fn(x):
return string_ops.string_length(string_ops.string_format('{}', x))
inputs = constant_op.constant([1, 2, 2, 3, 3])
with self.assertRaisesRegex(errors.InvalidArgumentError, 'failure_fn'):
failure_fn(inputs)
def testPythonStackTraceUncompiledWithinCompiled(self):
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function
def failure_fn(x):
return string_ops.string_length(string_ops.string_format('{}', x))
@polymorphic_function.function(jit_compile=True)
def outer(x):
return failure_fn(x)
inputs = constant_op.constant([1, 2, 2, 3, 3])
with self.assertRaisesRegex(errors.InvalidArgumentError, 'outer'):
outer(inputs)
@test_util.disable_mlir_bridge('TODO(b/155782411): MLIR bridge does not'
'support stack traces')
def testPythonStackTraceCompiledWithinUncompiled(self):
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def failure_fn(x):
return string_ops.string_length(string_ops.string_format('{}', x))
@polymorphic_function.function
def outer(x):
return failure_fn(x)
inputs = constant_op.constant([1, 2, 2, 3, 3])
with self.assertRaisesRegex(errors.InvalidArgumentError, 'failure_fn'):
outer(inputs)
@test_util.disable_mlir_bridge('TODO(b/155782411): MLIR bridge does not'
'support stack traces')
def testPythonStackTraceCompiledWithinCompiled(self):
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def failure_fn(x):
return string_ops.string_length(string_ops.string_format('{}', x))
@polymorphic_function.function
def outer(x):
return failure_fn(x)
inputs = constant_op.constant([1, 2, 2, 3, 3])
with self.assertRaisesRegex(errors.InvalidArgumentError, 'failure_fn'):
outer(inputs)
def testFunctionGradient(self):
with ops.device('device:{}:0'.format(self.device)):
v = resource_variable_ops.ResourceVariable(2.0)
def fn(x):
return v * x
func = polymorphic_function.function(fn, jit_compile=False)
xla_func = polymorphic_function.function(fn, jit_compile=True)
def run_and_check(test_func):
x = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
y = test_func(x)
dy = tape.gradient(y, v)
self.assertAllClose(6.0, y)
self.assertAllClose(3.0, dy)
run_and_check(func)
run_and_check(xla_func)
@test_util.disable_mlir_bridge('TODO(b/162521846): MLIR bridge fails'
' msan, function library not found')
def testControlFlow(self):
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def f(x):
assert control_flow_util.GraphOrParentsInXlaContext(
ops.get_default_graph())
x = ops.convert_to_tensor(x)
def body(i, a):
return i + 1, cond.cond(i > 2, lambda: a + (x**2),
lambda: a + 3)
return while_loop.while_loop(
lambda i, *_: i < 10,
body, (constant_op.constant(0), constant_op.constant(3.)),
maximum_iterations=10)[1]
@polymorphic_function.function(jit_compile=True)
def g(x):
x = ops.convert_to_tensor(x)
with backprop.GradientTape() as tape:
tape.watch(x)
y = f(x)
return y, tape.gradient(y, x)
# Test that XLA context gets correctly propagated.
g._get_concrete_function_garbage_collected(2.0)(2.0)
self.assertAllClose(40.0, f(2.0))
self.assertAllClose([40.0, 28.0], g(2.0))
self.assertAllClose(40.0, f.get_concrete_function(2.0)(2.0))
self.assertAllClose([40.0, 28.0], g.get_concrete_function(2.0)(2.0))
def testWhileLoopWithUnmodifiedCarriedShape(self):
with ops.device('device:{}:0'.format(self.device)):
signature = [tensor.TensorSpec(shape=[None], dtype=dtypes.float32)]
# We define a signature that specifies unknown vector shape, then test
# that tf.shape constness gets properly propagated into the while_loop
# even when carried as part of the loop state.
@polymorphic_function.function(
input_signature=signature, jit_compile=True)
def g(x):
return while_loop.while_loop_v2(
lambda *_: True,
lambda y, shp: (y + random_ops.random_normal(shp)**2, shp),
(x, array_ops.shape(x)),
maximum_iterations=3)[0]
self.assertAllGreater(g(array_ops.zeros([7])), 0.)
def testNestedWhileLoopWithUnmodifiedCarriedShape(self):
with ops.device('device:{}:0'.format(self.device)):
signature = [tensor.TensorSpec(shape=[None], dtype=dtypes.float32)]
@polymorphic_function.function(
input_signature=signature, jit_compile=True)
def g(x):
def inner(z, shp):
return z + random_ops.random_normal(shp)**2, shp
def outer(y, shp):
y, shp = while_loop.while_loop_v2(
lambda *_: True, inner, (y, shp), maximum_iterations=3)
y, shp = array_ops.identity_n([y, shp])
return while_loop.while_loop_v2(
lambda *_: True, inner, (y, shp), maximum_iterations=5)
shp = array_ops.shape(x, name='x_shp')
return while_loop.while_loop_v2(
lambda *_: True, outer, (x, shp), maximum_iterations=4)[0]
self.assertAllGreater(g(array_ops.zeros([7])), 0.)
def testNestedWhileLoopWithUnmodifiedCarriedShapeSlice(self):
with ops.device('device:{}:0'.format(self.device)):
signature = [
tensor.TensorSpec(shape=[None, None], dtype=dtypes.float32)
]
@polymorphic_function.function(
input_signature=signature, jit_compile=True)
def g(x):
def inner(z, shp):
return z + random_ops.random_normal(shp)**2, shp
def outer(y, shp):
y, shp = while_loop.while_loop_v2(
lambda *_: True, inner, (y, shp), maximum_iterations=3)
return while_loop.while_loop_v2(
lambda *_: True, inner, (y, shp), maximum_iterations=4)
shp = array_ops.shape(x, name='x_shp')
x = while_loop.while_loop_v2(
lambda *_: True, outer, (x, shp), maximum_iterations=5)[0]
shp2 = array_ops.shape(x, name='x_shp_after')[1:]
w = while_loop.while_loop_v2(
lambda *_: True,
outer, (array_ops.zeros_like(x[0]), shp2),
maximum_iterations=6)[0]
return x + w
self.assertAllGreater(g(array_ops.zeros([7, 13])), 0.)
def testMethodCompilation(self):
with ops.device('device:{}:0'.format(self.device)):
class C(object):
@polymorphic_function.function(jit_compile=True)
def f1(self, x, a):
return x + a
inputs = constant_op.constant([1, 2, 2, 3, 3])
c = C()
self.assertAllClose([2, 3, 3, 4, 4], c.f1(inputs, 1))
def testMethodCompilationUnsupportedFunc(self):
with ops.device('device:{}:0'.format(self.device)):
class C(object):
@polymorphic_function.function(jit_compile=True)
def f1(self, x):
return string_ops.string_length(
string_ops.string_format('{}', x))
inputs = constant_op.constant([1, 2, 2, 3, 3])
c = C()
with self.assertRaisesRegex(
errors.InvalidArgumentError, 'unsupported operations'
):
c.f1(inputs)
def testMustBeConstantPropagation(self):
if 'tpu' in self.device.lower():
self.skipTest('b/162799319: Cannot resolve constant on TPU')
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def f():
return constant_op.constant([0, 2, 1], dtype=dtypes.int32)
@polymorphic_function.function(jit_compile=True)
def g(a, b):
return array_ops.transpose(a, b)
@polymorphic_function.function
def z():
return g(array_ops.ones([3, 4, 3], dtype=dtypes.float32), f())
z()
def testArgMinMax(self):
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def argmax(x):
return math_ops.argmax(x)
@polymorphic_function.function(jit_compile=True)
def argmin(x):
return math_ops.argmin(x)
self.assertAllClose(0, argmax(array_ops.ones([10], dtype=dtypes.float32)))
self.assertAllClose(0, argmax(array_ops.ones([10])))
self.assertAllClose(0, argmin(array_ops.ones([10], dtype=dtypes.float32)))
self.assertAllClose(0, argmin(array_ops.ones([10])))
@test_util.disable_mlir_bridge('TensorArray support not implemented')
def testErrorMessagePassingTensorArray(self):
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=1, element_shape=[])
ta = ta.write(0, 2 * x)
y = ta.read(0)
return y
x = constant_op.constant(3.14)
with backprop.GradientTape() as tape:
tape.watch(x)
with self.assertRaisesRegex(errors.UnimplementedError,
'TensorList crossing the XLA/TF boundary'):
y = f(x)
tape.gradient(y, x)
@test_util.disable_mlir_bridge('TODO(b/162281863): MLIR bridge errors out'
' lowering TensorListConcatV2')
def testTensorListConcatV2(self):
with ops.device('device:{}:0'.format(self.device)):
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[3])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
compiled_f = polymorphic_function.function(jit_compile=True)(f)
inputs = constant_op.constant([3.14, 2.68, 7.69])
self.assertAllClose([6.28, 5.36, 15.38, 9.42, 8.04, 23.07], f(inputs))
self.assertAllClose(compiled_f(inputs), f(inputs))
@test_util.disable_mlir_bridge('TODO(b/162281863): MLIR bridge errors out'
' lowering TensorListConcatV2')
def testTensorListConcatV2Multidim(self):
with ops.device('device:{}:0'.format(self.device)):
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[3, 2])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
compiled_f = polymorphic_function.function(jit_compile=True)(f)
inputs = constant_op.constant([[3.14, 21.1], [2.68, 22.2], [7.69, 23.3]])
self.assertAllClose(f(inputs), compiled_f(inputs))
@test_util.disable_mlir_bridge('TODO(b/162281863): MLIR bridge errors out'
' lowering TensorListConcatV2')
def testTensorListConcatV2Scalars(self):
with ops.device('device:{}:0'.format(self.device)):
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[1])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
compiled_f = polymorphic_function.function(jit_compile=True)(f)
inputs = constant_op.constant([3.14])
self.assertAllClose(f(inputs), compiled_f(inputs))
@test_util.disable_mlir_bridge('TODO(b/162281863): MLIR bridge errors out'
' lowering TensorListConcatV2')
def testTensorListConcatGrad(self):
with ops.device('device:{}:0'.format(self.device)):
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[3])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
def g():
x = constant_op.constant([3.14, 2.68, 7.69])
with backprop.GradientTape() as tape:
tape.watch(x)
y = f(x)
return tape.gradient(y, x)
compiled_g = polymorphic_function.function(jit_compile=True)(g)
self.assertAllClose([5.0, 5.0, 5.0], g())
self.assertAllClose(compiled_g(), g())
@test_util.disable_mlir_bridge('TODO(b/162281863): MLIR bridge errors out'
' lowering TensorListConcatV2')
def testTensorListConcatGradNestedCompile(self):
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[3])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
@polymorphic_function.function(jit_compile=True)
def g():
x = constant_op.constant([3.14, 2.68, 7.69])
with backprop.GradientTape() as tape:
tape.watch(x)
y = f(x)
out = tape.gradient(y, x)
return out
self.assertAllClose([5.0, 5.0, 5.0], g())
def testCumsum(self):
if 'tpu' in self.device.lower():
self.skipTest('b/162771302: 64bit rewrite of cumsum not supported')
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def f(x):
return math_ops.cumsum(x)
f64_input = constant_op.constant([1.1, 2.2, 3.3], dtype=dtypes.float64)
self.assertAllClose([1.1, 3.3, 6.6], f(f64_input))
def testNoExcessiveRetracing(self):
with ops.device('device:{}:0'.format(self.device)):
inner_retracings = 0
@polymorphic_function.function(jit_compile=True)
def inner(a, b):
nonlocal inner_retracings
inner_retracings += 1
return a * b + a
def outer(a, b):
return inner(a, b)
func_input = random_ops.random_normal([10, 10])
for _ in range(2):
polymorphic_function.function(outer)(func_input, func_input)
self.assertEqual(inner_retracings, 1)
def testUpdateVariable(self):
with ops.device('device:{}:0'.format(self.device)):
v = variables.Variable([0.0, 0.0])
@polymorphic_function.function(jit_compile=True)
def f():
v.assign([3.1, 2.3])
f()
self.assertAllClose(v, [3.1, 2.3])
@test_util.disable_mlir_bridge('MLIR does not support resource update for'
' signature with compile-time constant.')
def testUniqueDifferentSizes(self):
if not 'gpu' in self.device.lower():
self.skipTest('Currently works only on GPU')
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def f(x, y):
return array_ops.unique(x).y + array_ops.unique(y).y
f(constant_op.constant([3.1, 3.2]), constant_op.constant([3.3, 3.2]))
with self.assertRaisesRegex(errors.InternalError, 'different size'):
f(
constant_op.constant([3.1, 3.2]),
constant_op.constant([3.1, 3.2, 3.3]))
def testUniqueCompilability(self):
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def f(x):
return array_ops.unique(x).y
self.assertAllClose(f(constant_op.constant([3.1, 3.2, 3.2])), [3.1, 3.2])
def testUpdateVariableMemoryUsage(self):
with ops.device('device:{}:0'.format(self.device)):
on_gpu = 'gpu' in self.device.lower()
v = variables.Variable([3.1, 3.2])
@polymorphic_function.function(jit_compile=True)
def update_var(a, b):
v.assign_add(a * b)
arg1 = random_ops.random_normal([2])
arg2 = random_ops.random_normal([2])
gc.collect()
initial_usage = context.context().get_memory_info(
v.device)['current'] if on_gpu else 0
update_var(arg1, arg2)
gc.collect()
final_usage = context.context().get_memory_info(
v.device)['current'] if on_gpu else 0
self.assertEqual(initial_usage, final_usage)
@test_util.disable_mlir_bridge('TODO(b/162381930): MLIR bridge renames '
' functions')
def testUpdateVariableInClass(self):
with ops.device('device:{}:0'.format(self.device)):
class C(object):
@polymorphic_function.function(jit_compile=True)
def update_var(self, a, b):
if not hasattr(self, 'v'):
self.v = variables.Variable(3.1)
self.v.assign_add(a * b)
c = C()
@polymorphic_function.function
def outer():
c.update_var(constant_op.constant(0.7), constant_op.constant(0.6))
outer()
self.assertAllClose(c.v, 3.52)
def testUpdateVariableMultipleOutputs(self):
with ops.device('device:{}:0'.format(self.device)):
v = variables.Variable(3.1)
@polymorphic_function.function(jit_compile=True)
def update_var(a, b):
v.assign_add(a * b)
return a * b + v
out = update_var(constant_op.constant(0.7), constant_op.constant(0.6))
self.assertAllClose(v, 3.52)
self.assertAllClose(out, 3.94)
def testReturnIdentity(self):
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def f(a, b):
return (a, b)
a = random_ops.random_normal([10, 10])
b = random_ops.random_normal([10, 10])
on_gpu = 'gpu' in self.device.lower()
gc.collect()
initial_usage = context.context().get_memory_info(
b.backing_device)['current'] if on_gpu else 0
f(a, b)
gc.collect()
final_usage = context.context().get_memory_info(
b.backing_device)['current'] if on_gpu else 0
self.assertEqual(initial_usage, final_usage)
def testGetCompilerIrConstants(self):
if 'tpu' in self.device.lower():
self.skipTest('TPU generates different HLO')
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def f(a, b):
return array_ops.transpose(a, b)
a = array_ops.ones([3, 4, 3], dtype=dtypes.float32)
b = constant_op.constant([0, 2, 1], dtype=dtypes.int32)
self.assertIn('{2,1,0}',
f.experimental_get_compiler_ir(a, b)(stage='optimized_hlo'))
@test_util.disable_mlir_bridge('TODO(b/168732524): MLIR bridge does not '
' optimize single-element tuples to scalars')
def testGetCompilerIrResourceVars(self):
with ops.device('device:{}:0'.format(self.device)):
v = variables.Variable([3.1, 3.2])
@polymorphic_function.function(jit_compile=True)
def f(a, b):
v.assign_add(a * b)
a = random_ops.random_normal([2])
b = random_ops.random_normal([2])
self.assertIn('input_output_alias={ {}: (2, {}, may-alias) }',
f.experimental_get_compiler_ir(a, b)('optimized_hlo'))
def testGetCompilerIrNotCompiled(self):
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function
def f(x):
return x + 1
a = random_ops.random_normal([10, 10])
with self.assertRaisesRegex(ValueError,
'marked with \'jit_compile'):
f.experimental_get_compiler_ir(a)()
def testGetCompilerIrNested(self):
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def fn(x, a):
return x + a
@polymorphic_function.function(jit_compile=False)
def fn2(x, a):
fn.experimental_get_compiler_ir(x, a)()
return fn(x, a)
inputs = constant_op.constant([1, 2, 2, 3, 3])
with self.assertRaises(TypeError):
fn2(inputs, 1)
def testGetCompilerIrKwargs(self):
with ops.device('device:{}:0'.format(self.device)):
v = variables.Variable([0.1, 0.1])
@polymorphic_function.function(jit_compile=True)
def f(a, b):
return (a + b) * v
a = constant_op.constant([1.1, 1.1])
b = constant_op.constant([2.2, 2.2])
self.assertIn('multiply',
f.experimental_get_compiler_ir(b=a, a=b)(stage='hlo'))
def testGetCompilerIrDot(self):
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def f(a, b):
return a + b
a = constant_op.constant([1.1, 1.1])
b = constant_op.constant([2.2, 2.2])
self.assertIn(
'label',
f.experimental_get_compiler_ir(a, b)(stage='optimized_hlo_dot'))
self._compareTwoMethodsCompilerIROutput(f, [a, b], {})
def testGetCompilerIrNoDevicePlacement(self):
if 'gpu' not in self.device.lower():
self.skipTest('Testing get_compiler_ir on GPUs without placement')
@polymorphic_function.function(jit_compile=True)
def f(a, b):
return a + b
a = constant_op.constant([1.1, 1.1])
b = constant_op.constant([2.2, 2.2])
self.assertIn(
'label',
f.experimental_get_compiler_ir(a, b)(stage='optimized_hlo_dot'))
self._compareTwoMethodsCompilerIROutput(f, [a, b], {})
def testGetCompilerIrNonTensors(self):
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def f(l):
return l[0] + l[1]
l = [constant_op.constant(1.1), constant_op.constant(2.2)]
self.assertIn('tuple',
f.experimental_get_compiler_ir(l)())
self._compareTwoMethodsCompilerIROutput(f, [l], {})
def testGetCompilerIrSerialized(self):
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def fn(x):
return x - x
inputs = constant_op.constant([1, 2, 2, 3, 3])
for stage in ('hlo_serialized', 'optimized_hlo_serialized'):
hlo = fn.experimental_get_compiler_ir(inputs)(
stage=stage, device_name=f'/device:{self.device}:0')
self.assertIsInstance(hlo, bytes)
self._compareTwoMethodsCompilerIROutput(fn, [inputs], {})
def testDotOptimizedHlo(self):
with ops.device('device:{}:0'.format(self.device)):
a = random_ops.random_normal([100, 100])
b = random_ops.random_normal([100, 100])
@polymorphic_function.function(jit_compile=True)
def f(a, b):
return math_ops.matmul(a, b)
if not test_util.IsMklEnabled():
self.assertRegex(
f.experimental_get_compiler_ir(a, b)('optimized_hlo'),
'(dot)|(convolution)',
)
else:
self.assertRegex(
f.experimental_get_compiler_ir(a, b)('optimized_hlo'),
'(dot)|(convolution)|(custom-call)',
)
def testConstantOnWrongDevice(self):
with ops.device('device:{}:0'.format(self.device)):
s = random_ops.random_uniform([2], 1, 10, dtypes.int32)
l = random_ops.random_normal([s[0] * s[1]])
@polymorphic_function.function(jit_compile=True)
def f(l):
return array_ops.reshape(l, s)
self.assertIn('tuple',
f.experimental_get_compiler_ir(l)())
@test_util.disable_mlir_bridge('TODO(b/172845417): MLIR bridge does not '
'support getting constants out of resources')
def testGetConstantOutOfResourceVariable(self):
with ops.device('device:{}:0'.format(self.device)):
# Use floats to force device placement.
a = variables.Variable(50.0)
b = variables.Variable(2.0)
@polymorphic_function.function(jit_compile=True)
def f(x):
return array_ops.reshape(
x, [math_ops.cast(a, dtypes.int32),
math_ops.cast(b, dtypes.int32)])
# OK since the value is known at compile time.
out = f(random_ops.random_normal([10, 10]))
self.assertEqual(out.shape[0], 50)
self.assertEqual(out.shape[1], 2)
@test_util.disable_mlir_bridge('TODO(b/172845417): MLIR bridge does not '
'support getting constants out of resources')
def testGetConstantOutOfResourceVariableAfterWrite(self):
with ops.device('device:{}:0'.format(self.device)):
# Use floats to force device placement.
a = variables.Variable(50.0)
b = variables.Variable(2.0)
@polymorphic_function.function(jit_compile=True)
def f(x, val1, val2):
a.assign(math_ops.cast(val1, dtypes.float32))
b.assign(math_ops.cast(val2, dtypes.float32))
return array_ops.reshape(
x, [math_ops.cast(a, dtypes.int32),
math_ops.cast(b, dtypes.int32)])
val1 = constant_op.constant(2)
val2 = constant_op.constant(50)
# Returns an error, since the value known at compile time was overridden.
with self.assertRaisesRegex(errors.InvalidArgumentError,
'concrete values at compile time'):
f(random_ops.random_normal([10, 10]), val1, val2)
@test_util.disable_mlir_bridge('TODO(b/172845417): MLIR bridge does not '
'support getting constants out of resources')
def testGetConstantOutOfResourceVariableBeforeWrite(self):
with ops.device('device:{}:0'.format(self.device)):
# Use floats to force device placement.
a = variables.Variable(50.0)
b = variables.Variable(2.0)
@polymorphic_function.function(jit_compile=True)
def f(x, val1, val2):
out = array_ops.reshape(
x, [math_ops.cast(a, dtypes.int32),
math_ops.cast(b, dtypes.int32)])
a.assign(math_ops.cast(val1, dtypes.float32))
b.assign(math_ops.cast(val2, dtypes.float32))
return out
val1 = constant_op.constant(2)
val2 = constant_op.constant(50)
# OK since the write happens after the reshape.
out = f(random_ops.random_normal([10, 10]), val1, val2)
self.assertEqual(out.shape[0], 50)
self.assertEqual(out.shape[1], 2)
def testTfAssert(self):
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def f(x):
control_flow_assert.Assert(x == 1, ['Wrong value'])
f(constant_op.constant(1))
def testTensorArrayErrorMessage(self):
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def failure_fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
size=2,
dynamic_size=True,
element_shape=(None,),
)
return ta.concat()
with self.assertRaisesRegex(errors.InvalidArgumentError, 'failure_fn'):
failure_fn()
def testCounter(self):
cell_nojit = polymorphic_function._tf_function_counter.get_cell('0')
cell_jit = polymorphic_function._tf_function_counter.get_cell('1')
orig_nojit = cell_nojit.value()
orig_jit = cell_jit.value()
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function
def f(a):
return a + a
f(constant_op.constant(1))
self.assertEqual(cell_nojit.value(), orig_nojit + 1)
self.assertEqual(cell_jit.value(), orig_jit)
f(constant_op.constant(1.)) # Calling again does not increment
self.assertEqual(cell_nojit.value(), orig_nojit + 1)
@polymorphic_function.function(jit_compile=True)
def f1(a):
return a + a
f1(constant_op.constant(1))
self.assertEqual(cell_nojit.value(), orig_nojit + 1)
self.assertEqual(cell_jit.value(), orig_jit + 1)
@polymorphic_function.function
def f2(a):
@polymorphic_function.function
def g(a):
return a + a
@polymorphic_function.function(jit_compile=True)
def h(a):
return a + a
return g(a) + h(a)
f2(constant_op.constant(1))
self.assertEqual(cell_nojit.value(), orig_nojit + 2)
self.assertEqual(cell_jit.value(), orig_jit + 2)
@polymorphic_function.function(jit_compile=True)
def f3(a):
@polymorphic_function.function
def g(a):
return a + a
@polymorphic_function.function(jit_compile=True)
def h(a):
return a + a
return g(a) + h(a)
f3(constant_op.constant(1))
self.assertEqual(cell_nojit.value(), orig_nojit + 2)
self.assertEqual(cell_jit.value(), orig_jit + 3)
@test_util.disable_mlir_bridge('TODO(b/162272821): MLIR bridge returns '
' wrong status type')
def testResourceWrongDevice(self):
if 'gpu' not in self.device.lower():
self.skipTest('Need a GPU to have non-trivial device placement')
with ops.device('device:CPU:0'):
v = variables.Variable([3.1, 3.2])
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(experimental_compile=True)
def update_var(a):
v.assign_add(a)
arg = random_ops.random_normal([2])
with self.assertRaisesRegex(errors.InvalidArgumentError,
'Trying to access resource .*'):
update_var(arg)
def testMustBeConstantInsideCondition(self):
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def f(x, d):
if math_ops.reduce_all(
math_ops.greater(x, random_ops.random_normal([10, 10]))):
return array_ops.reshape(x * 2, constant_op.constant([100]))
else:
return array_ops.reshape(x * 3, d)
f(random_ops.random_normal([10, 10]), constant_op.constant([100]))
def testConditionalGradientTapeMathRegression(self):
with ops.device('device:{}:0'.format(self.device)):
with backprop.GradientTape():
@polymorphic_function.function(jit_compile=True, autograph=False)
def f(x):
return cond.cond(
math_ops.reduce_all(x > 1), lambda: 1. / x, lambda: x)
v = variables.Variable([[2.]])
self.assertAllClose(f(v), constant_op.constant([[0.5]]))
@test_util.disable_mlir_bridge('TODO(b/190444466): MLIR bridge seems to '
'ignore resource assignments')
def testErrMsgAssignWrongShape(self):
with ops.device('device:{}:0'.format(self.device)):
v = variables.Variable([3.1, 3.2])
@polymorphic_function.function(jit_compile=True)
def failure_fn(samples):
v.assign(array_ops.zeros(samples))
with self.assertRaisesRegex(
errors.InvalidArgumentError,
'Shape .* cannot be changed after initialization'):
failure_fn(constant_op.constant(6))
with self.assertRaisesRegex(errors.InvalidArgumentError, 'failure_fn'):
failure_fn(constant_op.constant(6))
def testTfSummaryErrMsg(self):
if 'gpu' not in self.device.lower():
self.skipTest('Only runs on GPU')
with ops.device('device:{}:0'.format(self.device)):
writer = summary_ops_v2.create_file_writer(self.get_temp_dir())
@polymorphic_function.function(jit_compile=True)
def my_func_temp():
with writer.as_default():
summary_ops_v2.scalar('my_metric', 0.5, step=10)
with self.assertRaisesRegex(errors.InvalidArgumentError,
'Trying to access resource .*'):
my_func_temp()
def testSinglePassArgmax(self):
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def f(x):
return math_ops.argmax(x)
inputs = array_ops.ones([10], dtype=dtypes.float32)
hlo = f.experimental_get_compiler_ir(inputs)(stage='hlo')
# Test that reduction occurs only once.
self.assertEqual(hlo.count('reduce'), 1)
self._compareTwoMethodsCompilerIROutput(f, [inputs], {})
def testExperimentalGetCompilerIRBasic(self):
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True)
def inner_tf_func(x):
return math_ops.sin(x)
x = constant_op.constant([2.0, 3.0])
self._compareTwoMethodsCompilerIROutput(inner_tf_func, [x], {})
def testExperimentalGetCompilerIRAutograph(self):
with ops.device('device:{}:0'.format(self.device)):
@polymorphic_function.function(jit_compile=True, autograph=True)
def f(x, y):
if x[0] > 1:
return y[0]
else:
return y[1]
x, y = constant_op.constant([2, 3]), constant_op.constant([2, 3])
self._compareTwoMethodsCompilerIROutput(f, [x, y], {})
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
FunctionTest
|
python
|
spyder-ide__spyder
|
spyder/plugins/completion/providers/fallback/tests/conftest.py
|
{
"start": 367,
"end": 1051
}
|
class ____(QObject):
sig_recv_tokens = Signal(list)
def handle_response(self, client, req_id, response):
tokens = list(response['params'])
self.sig_recv_tokens.emit(list(tokens))
@pytest.fixture(scope='module')
def fallback_completions(qtbot_module, request):
fallback = FallbackProvider(None, {})
completions = CompletionManagerMock(None)
with qtbot_module.waitSignal(fallback.sig_provider_ready, timeout=30000):
fallback.start()
def teardown():
fallback.shutdown()
request.addfinalizer(teardown)
fallback.sig_response_ready.connect(completions.handle_response)
return fallback, completions
|
CompletionManagerMock
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/ddl.py
|
{
"start": 38670,
"end": 38843
}
|
class ____(_CreateDropBase["Constraint"]):
"""Represent a COMMENT ON CONSTRAINT IS NULL statement."""
__visit_name__ = "drop_constraint_comment"
|
DropConstraintComment
|
python
|
PrefectHQ__prefect
|
src/prefect/server/schemas/filters.py
|
{
"start": 85030,
"end": 85977
}
|
class ____(PrefectOperatorFilterBaseModel):
"""Filter by `Variable.tags`."""
all_: Optional[list[str]] = Field(
default=None,
examples=[["tag-1", "tag-2"]],
description=(
"A list of tags. Variables will be returned only if their tags are a"
" superset of the list"
),
)
is_null_: Optional[bool] = Field(
default=None, description="If true, only include Variables without tags"
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnElement[bool]] = []
if self.all_ is not None:
filters.append(db.Variable.tags.has_all(_as_array(self.all_)))
if self.is_null_ is not None:
filters.append(
db.Variable.tags == [] if self.is_null_ else db.Variable.tags != []
)
return filters
|
VariableFilterTags
|
python
|
ray-project__ray
|
release/ray_release/exception.py
|
{
"start": 3745,
"end": 3787
}
|
class ____(CommandError):
pass
|
LogsError
|
python
|
ray-project__ray
|
python/ray/serve/schema.py
|
{
"start": 6568,
"end": 8785
}
|
class ____(BaseModel):
"""Options with which to start a replica actor."""
runtime_env: dict = Field(
default={},
description=(
"This deployment's runtime_env. working_dir and "
"py_modules may contain only remote URIs."
),
)
num_cpus: float = Field(
default=None,
description=(
"The number of CPUs required by the deployment's "
"application per replica. This is the same as a ray "
"actor's num_cpus. Uses a default if null."
),
ge=0,
)
num_gpus: float = Field(
default=None,
description=(
"The number of GPUs required by the deployment's "
"application per replica. This is the same as a ray "
"actor's num_gpus. Uses a default if null."
),
ge=0,
)
memory: float = Field(
default=None,
description=(
"Restrict the heap memory usage of each replica. Uses a default if null."
),
ge=0,
)
resources: Dict = Field(
default={},
description=("The custom resources required by each replica."),
)
accelerator_type: str = Field(
default=None,
description=(
"Forces replicas to run on nodes with the specified accelerator type."
"See :ref:`accelerator types <accelerator_types>`."
),
)
@validator("runtime_env")
def runtime_env_contains_remote_uris(cls, v):
# Ensure that all uris in py_modules and working_dir are remote
if v is None:
return
uris = v.get("py_modules", [])
if "working_dir" in v:
uris = [*uris, v["working_dir"]]
for uri in uris:
if uri is not None:
try:
parse_uri(uri)
except ValueError as e:
raise ValueError(
"runtime_envs in the Serve config support only "
"remote URIs in working_dir and py_modules. Got "
f"error when parsing URI: {e}"
)
return v
@PublicAPI(stability="stable")
|
RayActorOptionsSchema
|
python
|
pytorch__pytorch
|
torch/ao/nn/quantized/modules/conv.py
|
{
"start": 25886,
"end": 30662
}
|
class ____(_ConvNd):
_FLOAT_MODULE: ClassVar[type[nn.modules.conv._ConvNd]]
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
bias,
padding_mode,
device=None,
dtype=None,
):
if padding_mode != "zeros":
raise ValueError(
f'Only "zeros" padding mode is supported for {self.__class__.__name__}'
)
factory_kwargs = {"device": device, "dtype": dtype}
# Subclasses of _ConvNd need to call _init rather than __init__. See
# discussion on PR #49702
super()._init(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
bias,
padding_mode,
**factory_kwargs,
)
def _input_padding(
self, kernel_size: list[int], dilation: list[int], padding: list[int]
) -> list[int]:
res = torch.jit.annotate(list[int], [])
for kdx in range(len(kernel_size)):
pad = dilation[kdx] * (kernel_size[kdx] - 1) - padding[kdx]
res.append(pad)
return res
@classmethod
def from_float(cls, mod, use_precomputed_fake_quant=False): # type: ignore[override]
r"""Creates a quantized module from a float module or qparams_dict.
Args:
mod (Module): a float module, either produced by torch.ao.quantization
utilities or provided by the user
"""
# derived classes override cls._FLOAT_MODULE attribute
msg = (
" nnq."
+ cls.__name__
+ ".from_float only works for "
+ cls._FLOAT_MODULE.__name__ # type: ignore[attr-defined]
)
assert type(mod) is cls._FLOAT_MODULE, msg
assert hasattr(mod, "qconfig"), "Input float module must have qconfig defined."
weight_post_process = mod.qconfig.weight() # type: ignore[operator, union-attr]
weight_post_process(mod.weight)
assert weight_post_process.dtype == torch.qint8, (
"Weight observer must have a dtype of qint8"
)
qweight = _quantize_weight(mod.weight.float(), weight_post_process)
# the __init__ call used is the one from derived classes and not the one from _ConvTransposeNd
qconv = cls(
mod.in_channels,
mod.out_channels,
mod.kernel_size, # type: ignore[call-arg]
mod.stride,
mod.padding,
mod.output_padding,
mod.groups,
mod.bias is not None,
mod.dilation,
mod.padding_mode,
)
qconv.set_weight_bias(qweight, mod.bias)
if (
not hasattr(mod, "activation_post_process")
or mod.activation_post_process.dtype == torch.float
):
return qconv # dynamic quantization doesn't need scale/zero_point
else:
act_scale, act_zp = mod.activation_post_process.calculate_qparams() # type: ignore[operator, union-attr]
qconv.scale = float(act_scale)
qconv.zero_point = int(act_zp)
return qconv
@staticmethod
def from_reference(cls, ref_qconvt, output_scale, output_zero_point): # type: ignore[override]
r"""Create a (fbgemm/qnnpack) quantized module from a reference quantized module
Args:
ref_qconvt (Module): a reference quantized module, either produced by torch.ao.quantization
utilities or provided by the user
output_scale (float): scale for output Tensor
output_zero_point (int): zero point for output Tensor
"""
qconv = cls(
ref_qconvt.in_channels,
ref_qconvt.out_channels,
ref_qconvt.kernel_size, # type: ignore[arg-type]
ref_qconvt.stride, # type: ignore[arg-type]
ref_qconvt.padding, # type: ignore[arg-type]
ref_qconvt.output_padding, # type: ignore[arg-type]
ref_qconvt.groups,
ref_qconvt.bias is not None, # type: ignore[arg-type]
ref_qconvt.dilation, # type: ignore[arg-type]
ref_qconvt.padding_mode,
device=ref_qconvt.weight.device,
dtype=ref_qconvt.weight.dtype,
)
qweight = ref_qconvt.get_quantized_weight()
qconv.set_weight_bias(qweight, ref_qconvt.bias)
qconv.scale = float(output_scale)
qconv.zero_point = int(output_zero_point)
return qconv
|
_ConvTransposeNd
|
python
|
MongoEngine__mongoengine
|
mongoengine/fields.py
|
{
"start": 39000,
"end": 45831
}
|
class ____(BaseField):
"""A reference to a document that will be automatically dereferenced on
access (lazily).
Note this means you will get a database I/O access everytime you access
this field. This is necessary because the field returns a :class:`~mongoengine.Document`
which precise type can depend of the value of the `_cls` field present in the
document in database.
In short, using this type of field can lead to poor performances (especially
if you access this field only to retrieve it `pk` field which is already
known before dereference). To solve this you should consider using the
:class:`~mongoengine.fields.LazyReferenceField`.
Use the `reverse_delete_rule` to handle what should happen if the document
the field is referencing is deleted. EmbeddedDocuments, DictFields and
MapFields does not support reverse_delete_rule and an `InvalidDocumentError`
will be raised if trying to set on one of these Document / Field types.
The options are:
* DO_NOTHING (0) - don't do anything (default).
* NULLIFY (1) - Updates the reference to null.
* CASCADE (2) - Deletes the documents associated with the reference.
* DENY (3) - Prevent the deletion of the reference object.
* PULL (4) - Pull the reference from a :class:`~mongoengine.fields.ListField` of references
Alternative syntax for registering delete rules (useful when implementing
bi-directional delete rules)
.. code-block:: python
class Org(Document):
owner = ReferenceField('User')
class User(Document):
org = ReferenceField('Org', reverse_delete_rule=CASCADE)
User.register_delete_rule(Org, 'owner', DENY)
"""
def __init__(
self, document_type, dbref=False, reverse_delete_rule=DO_NOTHING, **kwargs
):
"""Initialises the Reference Field.
:param document_type: The type of Document that will be referenced
:param dbref: Store the reference as :class:`~pymongo.dbref.DBRef`
or as the :class:`~pymongo.objectid.ObjectId`.
:param reverse_delete_rule: Determines what to do when the referring
object is deleted
:param kwargs: Keyword arguments passed into the parent :class:`~mongoengine.BaseField`
.. note ::
A reference to an abstract document type is always stored as a
:class:`~pymongo.dbref.DBRef`, regardless of the value of `dbref`.
"""
# XXX ValidationError raised outside of the "validate" method.
if not (
isinstance(document_type, str)
or (isclass(document_type) and issubclass(document_type, Document))
):
self.error(
"Argument to ReferenceField constructor must be a "
"document class or a string"
)
self.dbref = dbref
self.document_type_obj = document_type
self.reverse_delete_rule = reverse_delete_rule
super().__init__(**kwargs)
@property
def document_type(self):
if isinstance(self.document_type_obj, str):
if self.document_type_obj == RECURSIVE_REFERENCE_CONSTANT:
self.document_type_obj = self.owner_document
else:
self.document_type_obj = _DocumentRegistry.get(self.document_type_obj)
return self.document_type_obj
@staticmethod
def _lazy_load_ref(ref_cls, dbref):
dereferenced_son = ref_cls._get_db().dereference(dbref, session=_get_session())
if dereferenced_son is None:
raise DoesNotExist(f"Trying to dereference unknown document {dbref}")
return ref_cls._from_son(dereferenced_son)
def __get__(self, instance, owner):
"""Descriptor to allow lazy dereferencing."""
if instance is None:
# Document class being used rather than a document object
return self
# Get value from document instance if available
ref_value = instance._data.get(self.name)
auto_dereference = instance._fields[self.name]._auto_dereference
# Dereference DBRefs
if auto_dereference and isinstance(ref_value, DBRef):
if hasattr(ref_value, "cls"):
# Dereference using the class type specified in the reference
cls = _DocumentRegistry.get(ref_value.cls)
else:
cls = self.document_type
instance._data[self.name] = self._lazy_load_ref(cls, ref_value)
return super().__get__(instance, owner)
def to_mongo(self, document):
if isinstance(document, DBRef):
if not self.dbref:
return document.id
return document
if isinstance(document, Document):
# We need the id from the saved object to create the DBRef
id_ = document.pk
# XXX ValidationError raised outside of the "validate" method.
if id_ is None:
self.error(_unsaved_object_error(document.__class__.__name__))
# Use the attributes from the document instance, so that they
# override the attributes of this field's document type
cls = document
else:
id_ = document
cls = self.document_type
id_field_name = cls._meta["id_field"]
id_field = cls._fields[id_field_name]
id_ = id_field.to_mongo(id_)
if self.document_type._meta.get("abstract"):
collection = cls._get_collection_name()
return DBRef(collection, id_, cls=cls._class_name)
elif self.dbref:
collection = cls._get_collection_name()
return DBRef(collection, id_)
return id_
def to_python(self, value):
"""Convert a MongoDB-compatible type to a Python type."""
if not self.dbref and not isinstance(
value, (DBRef, Document, EmbeddedDocument)
):
collection = self.document_type._get_collection_name()
value = DBRef(collection, self.document_type.id.to_python(value))
return value
def prepare_query_value(self, op, value):
if value is None:
return None
super().prepare_query_value(op, value)
return self.to_mongo(value)
def validate(self, value):
if not isinstance(value, (self.document_type, LazyReference, DBRef, ObjectId)):
self.error(
"A ReferenceField only accepts DBRef, LazyReference, ObjectId or documents"
)
if isinstance(value, Document) and value.id is None:
self.error(_unsaved_object_error(value.__class__.__name__))
def lookup_member(self, member_name):
return self.document_type._fields.get(member_name)
|
ReferenceField
|
python
|
boto__boto3
|
boto3/resources/action.py
|
{
"start": 6333,
"end": 8079
}
|
class ____:
"""
A class representing a callable waiter action on a resource, for example
``s3.Bucket('foo').wait_until_bucket_exists()``.
The waiter action may construct parameters from existing resource
identifiers.
:type waiter_model: :py:class`~boto3.resources.model.Waiter`
:param waiter_model: The action waiter.
:type waiter_resource_name: string
:param waiter_resource_name: The name of the waiter action for the
resource. It usually begins with a
``wait_until_``
"""
def __init__(self, waiter_model, waiter_resource_name):
self._waiter_model = waiter_model
self._waiter_resource_name = waiter_resource_name
def __call__(self, parent, *args, **kwargs):
"""
Perform the wait operation after building operation
parameters.
:type parent: :py:class:`~boto3.resources.base.ServiceResource`
:param parent: The resource instance to which this action is attached.
"""
client_waiter_name = xform_name(self._waiter_model.waiter_name)
# First, build predefined params and then update with the
# user-supplied kwargs, which allows overriding the pre-built
# params if needed.
params = create_request_parameters(parent, self._waiter_model)
params.update(kwargs)
logger.debug(
'Calling %s:%s with %r',
parent.meta.service_name,
self._waiter_resource_name,
params,
)
client = parent.meta.client
waiter = client.get_waiter(client_waiter_name)
response = waiter.wait(**params)
logger.debug('Response: %r', response)
|
WaiterAction
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 57330,
"end": 57678
}
|
class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("id", "client_mutation_id")
id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="id")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
|
ApproveVerifiableDomainInput
|
python
|
PrefectHQ__prefect
|
tests/server/models/test_csrf_token.py
|
{
"start": 2473,
"end": 3686
}
|
class ____:
async def test_can_get_token_for_client(
self, session: AsyncSession, csrf_token: core.CsrfToken
):
token = await models.csrf_token.read_token_for_client(
session=session, client="client123"
)
assert token
assert token.client == csrf_token.client
assert token.token == csrf_token.token
assert token.expiration == csrf_token.expiration
async def test_none_no_token_for_client(
self, session: AsyncSession, csrf_token: core.CsrfToken
):
token = await models.csrf_token.read_token_for_client(
session=session, client="unknown-client"
)
assert token is None
async def test_none_expired_token(
self, session: AsyncSession, csrf_token: core.CsrfToken
):
await session.execute(
sa.update(orm_models.CsrfToken)
.where(orm_models.CsrfToken.client == csrf_token.client)
.values(expiration=datetime.now(timezone.utc) - timedelta(days=1))
)
token = await models.csrf_token.read_token_for_client(
session=session, client=csrf_token.client
)
assert token is None
|
TestTokenForClient
|
python
|
pytorch__pytorch
|
torch/distributed/checkpoint/default_planner.py
|
{
"start": 10846,
"end": 15995
}
|
class ____(LoadPlanner):
"""
DefaultLoadPlanner that adds multiple features on top of LoadPlanner.
In particular it adds the following:
flatten_state_dict: Handle state_dict with nested dicts
flatten_sharded_tensors: For FSDP in 2D parallel mode
allow_partial_load: If False, will raise a runtime error if a key is present in state_dict, but not in the checkpoint.
"""
original_state_dict: STATE_DICT_TYPE
mappings: FLATTEN_MAPPING
def __init__(
self,
flatten_state_dict: bool = True,
flatten_sharded_tensors: bool = True,
allow_partial_load: bool = False,
) -> None:
self.flatten_state_dict = flatten_state_dict
self.flatten_sharded_tensors = flatten_sharded_tensors
self.original_state_dict = {}
self.mappings = {}
self.allow_partial_load = allow_partial_load
def set_up_planner(
self,
state_dict: STATE_DICT_TYPE,
metadata: Optional[Metadata] = None,
is_coordinator: bool = False,
) -> None:
_init_state_dict(state_dict)
self.original_state_dict = state_dict
if self.flatten_sharded_tensors:
state_dict = _flatten_sharded_tensors(state_dict)
if self.flatten_state_dict:
state_dict, self.mappings = flatten_state_dict(state_dict)
self.state_dict = state_dict
self.metadata = metadata
self.is_coordinator = is_coordinator
def create_local_plan(self) -> LoadPlan:
if self.metadata is None:
raise AssertionError("self.metadata is not None")
if self.flatten_state_dict:
# To support checkpoints that are saved before v2.4, we have to
# differentiate if the missing keys are due to old checkpoints.
# The contracts are:
# 1. There are 3 cases when we found a missing key.
# 1.1 Actual missing key, but allow_partial_load is False
# 1.2 Actual missing key, but allow_partial load is True
# 1.3 Old checkpoint, but allow_partial_load is False
# 1.4 Old checkpoint, but allow_partial_load is True
# 2. If we found a missing key, we first convert the keys back to
# the key format of v2.3
# 3. If the previous missing keys are in the v2.3 keys, we assume
# this is a old checkpoint.
# 4. Pass the state_dict to `create_default_local_load_plan()`,
# which has the logic to check missing for allow_partial_load.
# So for 1.2 and 1.4 cases, we delegate allow_partial_load check to
# `create_default_local_load_plan()`. The logic here is to determine
# whether the checkpoint belong to 2.3 (or before) or 2.4 (or after).
current_keys = set(self.state_dict.keys())
load_keys = set(self.metadata.state_dict_metadata.keys())
missing_keys = load_keys - current_keys
if missing_keys:
_version._derived_version = "2_3"
old_state_dict, old_mappings = flatten_state_dict(
self.original_state_dict
)
old_keys = set(old_state_dict.keys())
if old_keys & missing_keys:
self.state_dict, self.mappings = old_state_dict, old_mappings
# _derived_version is only used by flatten_state_dict now.
# Set it back to None so that later we can save to a new version.
_version._derived_version = None
return create_default_local_load_plan(
self.state_dict, self.metadata, not self.allow_partial_load
)
def create_global_plan(self, global_plan: list[LoadPlan]) -> list[LoadPlan]:
return create_default_global_load_plan(global_plan)
def finish_plan(self, new_plan: LoadPlan) -> LoadPlan:
return new_plan
def load_bytes(self, read_item: ReadItem, value: io.BytesIO) -> None:
if self.flatten_state_dict:
set_element(
self.original_state_dict,
self.mappings[read_item.dest_index.fqn],
torch.load(value, weights_only=False),
)
else:
self.state_dict[read_item.dest_index.fqn] = torch.load(
value, weights_only=False
)
def resolve_tensor(self, read_item: ReadItem):
tensor = self.lookup_tensor(read_item.dest_index)
return self.transform_tensor(read_item, tensor)
def commit_tensor(self, read_item: ReadItem, tensor: torch.Tensor) -> None:
pass
def lookup_tensor(self, index: MetadataIndex) -> torch.Tensor:
"""Extension from the planner interface to make it easy to extend the default planner."""
return find_state_dict_object(self.state_dict, index)
def transform_tensor(self, read_item: ReadItem, tensor: torch.Tensor):
"""Extension from the planner interface to make it easy to extend the default planner."""
return narrow_tensor_by_index(tensor, read_item.dest_offsets, read_item.lengths)
|
DefaultLoadPlanner
|
python
|
huggingface__transformers
|
src/transformers/models/emu3/modeling_emu3.py
|
{
"start": 43748,
"end": 46758
}
|
class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: Emu3Config, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[Emu3Config] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
@auto_docstring
|
Emu3RotaryEmbedding
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 655486,
"end": 655918
}
|
class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("EnterpriseServerUserAccountsUpload", graphql_name="node")
"""The item at the end of the edge."""
|
EnterpriseServerUserAccountsUploadEdge
|
python
|
lepture__authlib
|
authlib/jose/rfc7515/models.py
|
{
"start": 1979,
"end": 2448
}
|
class ____(dict):
"""A dict instance to represent a JWS object."""
def __init__(self, header, payload, type="compact"):
super().__init__(
header=header,
payload=payload,
)
self.header = header
self.payload = payload
self.type = type
@property
def headers(self):
"""Alias of ``header`` for JSON typed JWS."""
if self.type == "json":
return self["header"]
|
JWSObject
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/axes/_axes.py
|
{
"start": 2827,
"end": 370944
}
|
class ____(_AxesBase):
"""
An Axes object encapsulates all the elements of an individual (sub-)plot in
a figure.
It contains most of the (sub-)plot elements: `~.axis.Axis`,
`~.axis.Tick`, `~.lines.Line2D`, `~.text.Text`, `~.patches.Polygon`, etc.,
and sets the coordinate system.
Like all visible elements in a figure, Axes is an `.Artist` subclass.
The `Axes` instance supports callbacks through a callbacks attribute which
is a `~.cbook.CallbackRegistry` instance. The events you can connect to
are 'xlim_changed' and 'ylim_changed' and the callback will be called with
func(*ax*) where *ax* is the `Axes` instance.
.. note::
As a user, you do not instantiate Axes directly, but use Axes creation
methods instead; e.g. from `.pyplot` or `.Figure`:
`~.pyplot.subplots`, `~.pyplot.subplot_mosaic` or `.Figure.add_axes`.
"""
### Labelling, legend and texts
def get_title(self, loc="center"):
"""
Get an Axes title.
Get one of the three available Axes titles. The available titles
are positioned above the Axes in the center, flush with the left
edge, and flush with the right edge.
Parameters
----------
loc : {'center', 'left', 'right'}, str, default: 'center'
Which title to return.
Returns
-------
str
The title text string.
"""
titles = {'left': self._left_title,
'center': self.title,
'right': self._right_title}
title = _api.check_getitem(titles, loc=loc.lower())
return title.get_text()
def set_title(self, label, fontdict=None, loc=None, pad=None, *, y=None,
**kwargs):
"""
Set a title for the Axes.
Set one of the three available Axes titles. The available titles
are positioned above the Axes in the center, flush with the left
edge, and flush with the right edge.
Parameters
----------
label : str
Text to use for the title
fontdict : dict
.. admonition:: Discouraged
The use of *fontdict* is discouraged. Parameters should be passed as
individual keyword arguments or using dictionary-unpacking
``set_title(..., **fontdict)``.
A dictionary controlling the appearance of the title text,
the default *fontdict* is::
{'fontsize': rcParams['axes.titlesize'],
'fontweight': rcParams['axes.titleweight'],
'color': rcParams['axes.titlecolor'],
'verticalalignment': 'baseline',
'horizontalalignment': loc}
loc : {'center', 'left', 'right'}, default: :rc:`axes.titlelocation`
Which title to set.
y : float, default: :rc:`axes.titley`
Vertical Axes location for the title (1.0 is the top). If
None (the default) and :rc:`axes.titley` is also None, y is
determined automatically to avoid decorators on the Axes.
pad : float, default: :rc:`axes.titlepad`
The offset of the title from the top of the Axes, in points.
Returns
-------
`.Text`
The matplotlib text instance representing the title
Other Parameters
----------------
**kwargs : `~matplotlib.text.Text` properties
Other keyword arguments are text properties, see `.Text` for a list
of valid text properties.
"""
loc = mpl._val_or_rc(loc, 'axes.titlelocation').lower()
y = mpl._val_or_rc(y, 'axes.titley')
if y is None:
y = 1.0
else:
self._autotitlepos = False
kwargs['y'] = y
titles = {'left': self._left_title,
'center': self.title,
'right': self._right_title}
title = _api.check_getitem(titles, loc=loc)
default = {
'fontsize': mpl.rcParams['axes.titlesize'],
'fontweight': mpl.rcParams['axes.titleweight'],
'verticalalignment': 'baseline',
'horizontalalignment': loc}
titlecolor = mpl.rcParams['axes.titlecolor']
if not cbook._str_lower_equal(titlecolor, 'auto'):
default["color"] = titlecolor
self._set_title_offset_trans(float(mpl._val_or_rc(pad, 'axes.titlepad')))
title.set_text(label)
title.update(default)
if fontdict is not None:
title.update(fontdict)
title._internal_update(kwargs)
return title
def get_legend_handles_labels(self, legend_handler_map=None):
"""
Return handles and labels for legend
``ax.legend()`` is equivalent to ::
h, l = ax.get_legend_handles_labels()
ax.legend(h, l)
"""
# pass through to legend.
handles, labels = mlegend._get_legend_handles_labels(
[self], legend_handler_map)
return handles, labels
@_docstring.interpd
def legend(self, *args, **kwargs):
"""
Place a legend on the Axes.
Call signatures::
legend()
legend(handles, labels)
legend(handles=handles)
legend(labels)
The call signatures correspond to the following different ways to use
this method:
**1. Automatic detection of elements to be shown in the legend**
The elements to be added to the legend are automatically determined,
when you do not pass in any extra arguments.
In this case, the labels are taken from the artist. You can specify
them either at artist creation or by calling the
:meth:`~.Artist.set_label` method on the artist::
ax.plot([1, 2, 3], label='Inline label')
ax.legend()
or::
line, = ax.plot([1, 2, 3])
line.set_label('Label via method')
ax.legend()
.. note::
Specific artists can be excluded from the automatic legend element
selection by using a label starting with an underscore, "_".
A string starting with an underscore is the default label for all
artists, so calling `.Axes.legend` without any arguments and
without setting the labels manually will result in a ``UserWarning``
and an empty legend being drawn.
**2. Explicitly listing the artists and labels in the legend**
For full control of which artists have a legend entry, it is possible
to pass an iterable of legend artists followed by an iterable of
legend labels respectively::
ax.legend([line1, line2, line3], ['label1', 'label2', 'label3'])
**3. Explicitly listing the artists in the legend**
This is similar to 2, but the labels are taken from the artists'
label properties. Example::
line1, = ax.plot([1, 2, 3], label='label1')
line2, = ax.plot([1, 2, 3], label='label2')
ax.legend(handles=[line1, line2])
**4. Labeling existing plot elements**
.. admonition:: Discouraged
This call signature is discouraged, because the relation between
plot elements and labels is only implicit by their order and can
easily be mixed up.
To make a legend for all artists on an Axes, call this function with
an iterable of strings, one for each legend item. For example::
ax.plot([1, 2, 3])
ax.plot([5, 6, 7])
ax.legend(['First line', 'Second line'])
Parameters
----------
handles : list of (`.Artist` or tuple of `.Artist`), optional
A list of Artists (lines, patches) to be added to the legend.
Use this together with *labels*, if you need full control on what
is shown in the legend and the automatic mechanism described above
is not sufficient.
The length of handles and labels should be the same in this
case. If they are not, they are truncated to the smaller length.
If an entry contains a tuple, then the legend handler for all Artists in the
tuple will be placed alongside a single label.
labels : list of str, optional
A list of labels to show next to the artists.
Use this together with *handles*, if you need full control on what
is shown in the legend and the automatic mechanism described above
is not sufficient.
Returns
-------
`~matplotlib.legend.Legend`
Other Parameters
----------------
%(_legend_kw_axes)s
See Also
--------
.Figure.legend
Notes
-----
Some artists are not supported by this function. See
:ref:`legend_guide` for details.
Examples
--------
.. plot:: gallery/text_labels_and_annotations/legend.py
"""
handles, labels, kwargs = mlegend._parse_legend_args([self], *args, **kwargs)
self.legend_ = mlegend.Legend(self, handles, labels, **kwargs)
self.legend_._remove_method = self._remove_legend
return self.legend_
def _remove_legend(self, legend):
self.legend_ = None
def inset_axes(self, bounds, *, transform=None, zorder=5, **kwargs):
"""
Add a child inset Axes to this existing Axes.
Parameters
----------
bounds : [x0, y0, width, height]
Lower-left corner of inset Axes, and its width and height.
transform : `.Transform`
Defaults to `!ax.transAxes`, i.e. the units of *rect* are in
Axes-relative coordinates.
projection : {None, 'aitoff', 'hammer', 'lambert', 'mollweide', \
'polar', 'rectilinear', str}, optional
The projection type of the inset `~.axes.Axes`. *str* is the name
of a custom projection, see `~matplotlib.projections`. The default
None results in a 'rectilinear' projection.
polar : bool, default: False
If True, equivalent to projection='polar'.
axes_class : subclass type of `~.axes.Axes`, optional
The `.axes.Axes` subclass that is instantiated. This parameter
is incompatible with *projection* and *polar*. See
:ref:`axisartist_users-guide-index` for examples.
zorder : number
Defaults to 5 (same as `.Axes.legend`). Adjust higher or lower
to change whether it is above or below data plotted on the
parent Axes.
**kwargs
Other keyword arguments are passed on to the inset Axes class.
Returns
-------
ax
The created `~.axes.Axes` instance.
Examples
--------
This example makes two inset Axes, the first is in Axes-relative
coordinates, and the second in data-coordinates::
fig, ax = plt.subplots()
ax.plot(range(10))
axin1 = ax.inset_axes([0.8, 0.1, 0.15, 0.15])
axin2 = ax.inset_axes(
[5, 7, 2.3, 2.3], transform=ax.transData)
"""
if transform is None:
transform = self.transAxes
kwargs.setdefault('label', 'inset_axes')
# This puts the rectangle into figure-relative coordinates.
inset_locator = _TransformedBoundsLocator(bounds, transform)
bounds = inset_locator(self, None).bounds
fig = self.get_figure(root=False)
projection_class, pkw = fig._process_projection_requirements(**kwargs)
inset_ax = projection_class(fig, bounds, zorder=zorder, **pkw)
# this locator lets the axes move if in data coordinates.
# it gets called in `ax.apply_aspect() (of all places)
inset_ax.set_axes_locator(inset_locator)
self.add_child_axes(inset_ax)
return inset_ax
@_docstring.interpd
def indicate_inset(self, bounds=None, inset_ax=None, *, transform=None,
facecolor='none', edgecolor='0.5', alpha=0.5,
zorder=None, **kwargs):
"""
Add an inset indicator to the Axes. This is a rectangle on the plot
at the position indicated by *bounds* that optionally has lines that
connect the rectangle to an inset Axes (`.Axes.inset_axes`).
Warnings
--------
This method is experimental as of 3.0, and the API may change.
Parameters
----------
bounds : [x0, y0, width, height], optional
Lower-left corner of rectangle to be marked, and its width
and height. If not set, the bounds will be calculated from the
data limits of *inset_ax*, which must be supplied.
inset_ax : `.Axes`, optional
An optional inset Axes to draw connecting lines to. Two lines are
drawn connecting the indicator box to the inset Axes on corners
chosen so as to not overlap with the indicator box.
transform : `.Transform`
Transform for the rectangle coordinates. Defaults to
``ax.transData``, i.e. the units of *rect* are in the Axes' data
coordinates.
facecolor : :mpltype:`color`, default: 'none'
Facecolor of the rectangle.
edgecolor : :mpltype:`color`, default: '0.5'
Color of the rectangle and color of the connecting lines.
alpha : float or None, default: 0.5
Transparency of the rectangle and connector lines. If not
``None``, this overrides any alpha value included in the
*facecolor* and *edgecolor* parameters.
zorder : float, default: 4.99
Drawing order of the rectangle and connector lines. The default,
4.99, is just below the default level of inset Axes.
**kwargs
Other keyword arguments are passed on to the `.Rectangle` patch:
%(Rectangle:kwdoc)s
Returns
-------
inset_indicator : `.inset.InsetIndicator`
An artist which contains
inset_indicator.rectangle : `.Rectangle`
The indicator frame.
inset_indicator.connectors : 4-tuple of `.patches.ConnectionPatch`
The four connector lines connecting to (lower_left, upper_left,
lower_right upper_right) corners of *inset_ax*. Two lines are
set with visibility to *False*, but the user can set the
visibility to True if the automatic choice is not deemed correct.
.. versionchanged:: 3.10
Previously the rectangle and connectors tuple were returned.
"""
# to make the Axes connectors work, we need to apply the aspect to
# the parent Axes.
self.apply_aspect()
if transform is None:
transform = self.transData
kwargs.setdefault('label', '_indicate_inset')
indicator_patch = minset.InsetIndicator(
bounds, inset_ax=inset_ax,
facecolor=facecolor, edgecolor=edgecolor, alpha=alpha,
zorder=zorder, transform=transform, **kwargs)
self.add_artist(indicator_patch)
return indicator_patch
def indicate_inset_zoom(self, inset_ax, **kwargs):
"""
Add an inset indicator rectangle to the Axes based on the axis
limits for an *inset_ax* and draw connectors between *inset_ax*
and the rectangle.
Warnings
--------
This method is experimental as of 3.0, and the API may change.
Parameters
----------
inset_ax : `.Axes`
Inset Axes to draw connecting lines to. Two lines are
drawn connecting the indicator box to the inset Axes on corners
chosen so as to not overlap with the indicator box.
**kwargs
Other keyword arguments are passed on to `.Axes.indicate_inset`
Returns
-------
inset_indicator : `.inset.InsetIndicator`
An artist which contains
inset_indicator.rectangle : `.Rectangle`
The indicator frame.
inset_indicator.connectors : 4-tuple of `.patches.ConnectionPatch`
The four connector lines connecting to (lower_left, upper_left,
lower_right upper_right) corners of *inset_ax*. Two lines are
set with visibility to *False*, but the user can set the
visibility to True if the automatic choice is not deemed correct.
.. versionchanged:: 3.10
Previously the rectangle and connectors tuple were returned.
"""
return self.indicate_inset(None, inset_ax, **kwargs)
@_docstring.interpd
def secondary_xaxis(self, location, functions=None, *, transform=None, **kwargs):
"""
Add a second x-axis to this `~.axes.Axes`.
For example if we want to have a second scale for the data plotted on
the xaxis.
%(_secax_docstring)s
Examples
--------
The main axis shows frequency, and the secondary axis shows period.
.. plot::
fig, ax = plt.subplots()
ax.loglog(range(1, 360, 5), range(1, 360, 5))
ax.set_xlabel('frequency [Hz]')
def invert(x):
# 1/x with special treatment of x == 0
x = np.array(x).astype(float)
near_zero = np.isclose(x, 0)
x[near_zero] = np.inf
x[~near_zero] = 1 / x[~near_zero]
return x
# the inverse of 1/x is itself
secax = ax.secondary_xaxis('top', functions=(invert, invert))
secax.set_xlabel('Period [s]')
plt.show()
To add a secondary axis relative to your data, you can pass a transform
to the new axis.
.. plot::
fig, ax = plt.subplots()
ax.plot(range(0, 5), range(-1, 4))
# Pass 'ax.transData' as a transform to place the axis
# relative to your data at y=0
secax = ax.secondary_xaxis(0, transform=ax.transData)
"""
if not (location in ['top', 'bottom'] or isinstance(location, Real)):
raise ValueError('secondary_xaxis location must be either '
'a float or "top"/"bottom"')
secondary_ax = SecondaryAxis(self, 'x', location, functions,
transform, **kwargs)
self.add_child_axes(secondary_ax)
return secondary_ax
@_docstring.interpd
def secondary_yaxis(self, location, functions=None, *, transform=None, **kwargs):
"""
Add a second y-axis to this `~.axes.Axes`.
For example if we want to have a second scale for the data plotted on
the yaxis.
%(_secax_docstring)s
Examples
--------
Add a secondary Axes that converts from radians to degrees
.. plot::
fig, ax = plt.subplots()
ax.plot(range(1, 360, 5), range(1, 360, 5))
ax.set_ylabel('degrees')
secax = ax.secondary_yaxis('right', functions=(np.deg2rad,
np.rad2deg))
secax.set_ylabel('radians')
To add a secondary axis relative to your data, you can pass a transform
to the new axis.
.. plot::
fig, ax = plt.subplots()
ax.plot(range(0, 5), range(-1, 4))
# Pass 'ax.transData' as a transform to place the axis
# relative to your data at x=3
secax = ax.secondary_yaxis(3, transform=ax.transData)
"""
if not (location in ['left', 'right'] or isinstance(location, Real)):
raise ValueError('secondary_yaxis location must be either '
'a float or "left"/"right"')
secondary_ax = SecondaryAxis(self, 'y', location, functions,
transform, **kwargs)
self.add_child_axes(secondary_ax)
return secondary_ax
@_docstring.interpd
def text(self, x, y, s, fontdict=None, **kwargs):
"""
Add text to the Axes.
Add the text *s* to the Axes at location *x*, *y* in data coordinates,
with a default ``horizontalalignment`` on the ``left`` and
``verticalalignment`` at the ``baseline``. See
:doc:`/gallery/text_labels_and_annotations/text_alignment`.
Parameters
----------
x, y : float
The position to place the text. By default, this is in data
coordinates. The coordinate system can be changed using the
*transform* parameter.
s : str
The text.
fontdict : dict, default: None
.. admonition:: Discouraged
The use of *fontdict* is discouraged. Parameters should be passed as
individual keyword arguments or using dictionary-unpacking
``text(..., **fontdict)``.
A dictionary to override the default text properties. If fontdict
is None, the defaults are determined by `.rcParams`.
Returns
-------
`.Text`
The created `.Text` instance.
Other Parameters
----------------
**kwargs : `~matplotlib.text.Text` properties.
Other miscellaneous text parameters.
%(Text:kwdoc)s
Examples
--------
Individual keyword arguments can be used to override any given
parameter::
>>> text(x, y, s, fontsize=12)
The default transform specifies that text is in data coords,
alternatively, you can specify text in axis coords ((0, 0) is
lower-left and (1, 1) is upper-right). The example below places
text in the center of the Axes::
>>> text(0.5, 0.5, 'matplotlib', horizontalalignment='center',
... verticalalignment='center', transform=ax.transAxes)
You can put a rectangular box around the text instance (e.g., to
set a background color) by using the keyword *bbox*. *bbox* is
a dictionary of `~matplotlib.patches.Rectangle`
properties. For example::
>>> text(x, y, s, bbox=dict(facecolor='red', alpha=0.5))
"""
effective_kwargs = {
'verticalalignment': 'baseline',
'horizontalalignment': 'left',
'transform': self.transData,
'clip_on': False,
**(fontdict if fontdict is not None else {}),
**kwargs,
}
t = mtext.Text(x, y, text=s, **effective_kwargs)
if t.get_clip_path() is None:
t.set_clip_path(self.patch)
self._add_text(t)
return t
@_docstring.interpd
def annotate(self, text, xy, xytext=None, xycoords='data', textcoords=None,
arrowprops=None, annotation_clip=None, **kwargs):
# Signature must match Annotation. This is verified in
# test_annotate_signature().
a = mtext.Annotation(text, xy, xytext=xytext, xycoords=xycoords,
textcoords=textcoords, arrowprops=arrowprops,
annotation_clip=annotation_clip, **kwargs)
a.set_transform(mtransforms.IdentityTransform())
if kwargs.get('clip_on', False) and a.get_clip_path() is None:
a.set_clip_path(self.patch)
self._add_text(a)
return a
annotate.__doc__ = mtext.Annotation.__init__.__doc__
#### Lines and spans
@_docstring.interpd
def axhline(self, y=0, xmin=0, xmax=1, **kwargs):
"""
Add a horizontal line spanning the whole or fraction of the Axes.
Note: If you want to set x-limits in data coordinates, use
`~.Axes.hlines` instead.
Parameters
----------
y : float, default: 0
y position in :ref:`data coordinates <coordinate-systems>`.
xmin : float, default: 0
The start x-position in :ref:`axes coordinates <coordinate-systems>`.
Should be between 0 and 1, 0 being the far left of the plot,
1 the far right of the plot.
xmax : float, default: 1
The end x-position in :ref:`axes coordinates <coordinate-systems>`.
Should be between 0 and 1, 0 being the far left of the plot,
1 the far right of the plot.
Returns
-------
`~matplotlib.lines.Line2D`
A `.Line2D` specified via two points ``(xmin, y)``, ``(xmax, y)``.
Its transform is set such that *x* is in
:ref:`axes coordinates <coordinate-systems>` and *y* is in
:ref:`data coordinates <coordinate-systems>`.
This is still a generic line and the horizontal character is only
realized through using identical *y* values for both points. Thus,
if you want to change the *y* value later, you have to provide two
values ``line.set_ydata([3, 3])``.
Other Parameters
----------------
**kwargs
Valid keyword arguments are `.Line2D` properties, except for
'transform':
%(Line2D:kwdoc)s
See Also
--------
hlines : Add horizontal lines in data coordinates.
axhspan : Add a horizontal span (rectangle) across the axis.
axline : Add a line with an arbitrary slope.
Examples
--------
* draw a thick red hline at 'y' = 0 that spans the xrange::
>>> axhline(linewidth=4, color='r')
* draw a default hline at 'y' = 1 that spans the xrange::
>>> axhline(y=1)
* draw a default hline at 'y' = .5 that spans the middle half of
the xrange::
>>> axhline(y=.5, xmin=0.25, xmax=0.75)
"""
self._check_no_units([xmin, xmax], ['xmin', 'xmax'])
if "transform" in kwargs:
raise ValueError("'transform' is not allowed as a keyword "
"argument; axhline generates its own transform.")
ymin, ymax = self.get_ybound()
# Strip away the units for comparison with non-unitized bounds.
yy, = self._process_unit_info([("y", y)], kwargs)
scaley = (yy < ymin) or (yy > ymax)
trans = self.get_yaxis_transform(which='grid')
l = mlines.Line2D([xmin, xmax], [y, y], transform=trans, **kwargs)
self.add_line(l)
l.get_path()._interpolation_steps = mpl.axis.GRIDLINE_INTERPOLATION_STEPS
if scaley:
self._request_autoscale_view("y")
return l
@_docstring.interpd
def axvline(self, x=0, ymin=0, ymax=1, **kwargs):
"""
Add a vertical line spanning the whole or fraction of the Axes.
Note: If you want to set y-limits in data coordinates, use
`~.Axes.vlines` instead.
Parameters
----------
x : float, default: 0
x position in :ref:`data coordinates <coordinate-systems>`.
ymin : float, default: 0
The start y-position in :ref:`axes coordinates <coordinate-systems>`.
Should be between 0 and 1, 0 being the bottom of the plot, 1 the
top of the plot.
ymax : float, default: 1
The end y-position in :ref:`axes coordinates <coordinate-systems>`.
Should be between 0 and 1, 0 being the bottom of the plot, 1 the
top of the plot.
Returns
-------
`~matplotlib.lines.Line2D`
A `.Line2D` specified via two points ``(x, ymin)``, ``(x, ymax)``.
Its transform is set such that *x* is in
:ref:`data coordinates <coordinate-systems>` and *y* is in
:ref:`axes coordinates <coordinate-systems>`.
This is still a generic line and the vertical character is only
realized through using identical *x* values for both points. Thus,
if you want to change the *x* value later, you have to provide two
values ``line.set_xdata([3, 3])``.
Other Parameters
----------------
**kwargs
Valid keyword arguments are `.Line2D` properties, except for
'transform':
%(Line2D:kwdoc)s
See Also
--------
vlines : Add vertical lines in data coordinates.
axvspan : Add a vertical span (rectangle) across the axis.
axline : Add a line with an arbitrary slope.
Examples
--------
* draw a thick red vline at *x* = 0 that spans the yrange::
>>> axvline(linewidth=4, color='r')
* draw a default vline at *x* = 1 that spans the yrange::
>>> axvline(x=1)
* draw a default vline at *x* = .5 that spans the middle half of
the yrange::
>>> axvline(x=.5, ymin=0.25, ymax=0.75)
"""
self._check_no_units([ymin, ymax], ['ymin', 'ymax'])
if "transform" in kwargs:
raise ValueError("'transform' is not allowed as a keyword "
"argument; axvline generates its own transform.")
xmin, xmax = self.get_xbound()
# Strip away the units for comparison with non-unitized bounds.
xx, = self._process_unit_info([("x", x)], kwargs)
scalex = (xx < xmin) or (xx > xmax)
trans = self.get_xaxis_transform(which='grid')
l = mlines.Line2D([x, x], [ymin, ymax], transform=trans, **kwargs)
self.add_line(l)
l.get_path()._interpolation_steps = mpl.axis.GRIDLINE_INTERPOLATION_STEPS
if scalex:
self._request_autoscale_view("x")
return l
@staticmethod
def _check_no_units(vals, names):
# Helper method to check that vals are not unitized
for val, name in zip(vals, names):
if not munits._is_natively_supported(val):
raise ValueError(f"{name} must be a single scalar value, "
f"but got {val}")
@_docstring.interpd
def axline(self, xy1, xy2=None, *, slope=None, **kwargs):
"""
Add an infinitely long straight line.
The line can be defined either by two points *xy1* and *xy2*, or
by one point *xy1* and a *slope*.
This draws a straight line "on the screen", regardless of the x and y
scales, and is thus also suitable for drawing exponential decays in
semilog plots, power laws in loglog plots, etc. However, *slope*
should only be used with linear scales; It has no clear meaning for
all other scales, and thus the behavior is undefined. Please specify
the line using the points *xy1*, *xy2* for non-linear scales.
The *transform* keyword argument only applies to the points *xy1*,
*xy2*. The *slope* (if given) is always in data coordinates. This can
be used e.g. with ``ax.transAxes`` for drawing grid lines with a fixed
slope.
Parameters
----------
xy1, xy2 : (float, float)
Points for the line to pass through.
Either *xy2* or *slope* has to be given.
slope : float, optional
The slope of the line. Either *xy2* or *slope* has to be given.
Returns
-------
`.AxLine`
Other Parameters
----------------
**kwargs
Valid kwargs are `.Line2D` properties
%(Line2D:kwdoc)s
See Also
--------
axhline : for horizontal lines
axvline : for vertical lines
Examples
--------
Draw a thick red line passing through (0, 0) and (1, 1)::
>>> axline((0, 0), (1, 1), linewidth=4, color='r')
"""
if slope is not None and (self.get_xscale() != 'linear' or
self.get_yscale() != 'linear'):
raise TypeError("'slope' cannot be used with non-linear scales")
datalim = [xy1] if xy2 is None else [xy1, xy2]
if "transform" in kwargs:
# if a transform is passed (i.e. line points not in data space),
# data limits should not be adjusted.
datalim = []
line = mlines.AxLine(xy1, xy2, slope, **kwargs)
# Like add_line, but correctly handling data limits.
self._set_artist_props(line)
if line.get_clip_path() is None:
line.set_clip_path(self.patch)
if not line.get_label():
line.set_label(f"_child{len(self._children)}")
self._children.append(line)
line._remove_method = self._children.remove
self.update_datalim(datalim)
self._request_autoscale_view()
return line
@_docstring.interpd
def axhspan(self, ymin, ymax, xmin=0, xmax=1, **kwargs):
"""
Add a horizontal span (rectangle) across the Axes.
The rectangle spans from *ymin* to *ymax* vertically, and, by default,
the whole x-axis horizontally. The x-span can be set using *xmin*
(default: 0) and *xmax* (default: 1) which are in axis units; e.g.
``xmin = 0.5`` always refers to the middle of the x-axis regardless of
the limits set by `~.Axes.set_xlim`.
Parameters
----------
ymin : float
Lower y-coordinate of the span, in data units.
ymax : float
Upper y-coordinate of the span, in data units.
xmin : float, default: 0
Lower x-coordinate of the span, in x-axis (0-1) units.
xmax : float, default: 1
Upper x-coordinate of the span, in x-axis (0-1) units.
Returns
-------
`~matplotlib.patches.Rectangle`
Horizontal span (rectangle) from (xmin, ymin) to (xmax, ymax).
Other Parameters
----------------
**kwargs : `~matplotlib.patches.Rectangle` properties
%(Rectangle:kwdoc)s
See Also
--------
axvspan : Add a vertical span across the Axes.
"""
# Strip units away.
self._check_no_units([xmin, xmax], ['xmin', 'xmax'])
(ymin, ymax), = self._process_unit_info([("y", [ymin, ymax])], kwargs)
p = mpatches.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, **kwargs)
p.set_transform(self.get_yaxis_transform(which="grid"))
# For Rectangles and non-separable transforms, add_patch can be buggy
# and update the x limits even though it shouldn't do so for an
# yaxis_transformed patch, so undo that update.
ix = self.dataLim.intervalx.copy()
mx = self.dataLim.minposx
self.add_patch(p)
self.dataLim.intervalx = ix
self.dataLim.minposx = mx
p.get_path()._interpolation_steps = mpl.axis.GRIDLINE_INTERPOLATION_STEPS
self._request_autoscale_view("y")
return p
@_docstring.interpd
def axvspan(self, xmin, xmax, ymin=0, ymax=1, **kwargs):
"""
Add a vertical span (rectangle) across the Axes.
The rectangle spans from *xmin* to *xmax* horizontally, and, by
default, the whole y-axis vertically. The y-span can be set using
*ymin* (default: 0) and *ymax* (default: 1) which are in axis units;
e.g. ``ymin = 0.5`` always refers to the middle of the y-axis
regardless of the limits set by `~.Axes.set_ylim`.
Parameters
----------
xmin : float
Lower x-coordinate of the span, in data units.
xmax : float
Upper x-coordinate of the span, in data units.
ymin : float, default: 0
Lower y-coordinate of the span, in y-axis units (0-1).
ymax : float, default: 1
Upper y-coordinate of the span, in y-axis units (0-1).
Returns
-------
`~matplotlib.patches.Rectangle`
Vertical span (rectangle) from (xmin, ymin) to (xmax, ymax).
Other Parameters
----------------
**kwargs : `~matplotlib.patches.Rectangle` properties
%(Rectangle:kwdoc)s
See Also
--------
axhspan : Add a horizontal span across the Axes.
Examples
--------
Draw a vertical, green, translucent rectangle from x = 1.25 to
x = 1.55 that spans the yrange of the Axes.
>>> axvspan(1.25, 1.55, facecolor='g', alpha=0.5)
"""
# Strip units away.
self._check_no_units([ymin, ymax], ['ymin', 'ymax'])
(xmin, xmax), = self._process_unit_info([("x", [xmin, xmax])], kwargs)
p = mpatches.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, **kwargs)
p.set_transform(self.get_xaxis_transform(which="grid"))
# For Rectangles and non-separable transforms, add_patch can be buggy
# and update the y limits even though it shouldn't do so for an
# xaxis_transformed patch, so undo that update.
iy = self.dataLim.intervaly.copy()
my = self.dataLim.minposy
self.add_patch(p)
self.dataLim.intervaly = iy
self.dataLim.minposy = my
p.get_path()._interpolation_steps = mpl.axis.GRIDLINE_INTERPOLATION_STEPS
self._request_autoscale_view("x")
return p
@_api.make_keyword_only("3.10", "label")
@_preprocess_data(replace_names=["y", "xmin", "xmax", "colors"],
label_namer="y")
def hlines(self, y, xmin, xmax, colors=None, linestyles='solid',
label='', **kwargs):
"""
Plot horizontal lines at each *y* from *xmin* to *xmax*.
Parameters
----------
y : float or array-like
y-indexes where to plot the lines.
xmin, xmax : float or array-like
Respective beginning and end of each line. If scalars are
provided, all lines will have the same length.
colors : :mpltype:`color` or list of color , default: :rc:`lines.color`
linestyles : {'solid', 'dashed', 'dashdot', 'dotted'}, default: 'solid'
label : str, default: ''
Returns
-------
`~matplotlib.collections.LineCollection`
Other Parameters
----------------
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs : `~matplotlib.collections.LineCollection` properties.
See Also
--------
vlines : vertical lines
axhline : horizontal line across the Axes
"""
# We do the conversion first since not all unitized data is uniform
xmin, xmax, y = self._process_unit_info(
[("x", xmin), ("x", xmax), ("y", y)], kwargs)
if not np.iterable(y):
y = [y]
if not np.iterable(xmin):
xmin = [xmin]
if not np.iterable(xmax):
xmax = [xmax]
# Create and combine masked_arrays from input
y, xmin, xmax = cbook._combine_masks(y, xmin, xmax)
y = np.ravel(y)
xmin = np.ravel(xmin)
xmax = np.ravel(xmax)
masked_verts = np.ma.empty((len(y), 2, 2))
masked_verts[:, 0, 0] = xmin
masked_verts[:, 0, 1] = y
masked_verts[:, 1, 0] = xmax
masked_verts[:, 1, 1] = y
lines = mcoll.LineCollection(masked_verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(lines, autolim=False)
lines._internal_update(kwargs)
if len(y) > 0:
# Extreme values of xmin/xmax/y. Using masked_verts here handles
# the case of y being a masked *object* array (as can be generated
# e.g. by errorbar()), which would make nanmin/nanmax stumble.
updatex = True
updatey = True
if self.name == "rectilinear":
datalim = lines.get_datalim(self.transData)
t = lines.get_transform()
updatex, updatey = t.contains_branch_separately(self.transData)
minx = np.nanmin(datalim.xmin)
maxx = np.nanmax(datalim.xmax)
miny = np.nanmin(datalim.ymin)
maxy = np.nanmax(datalim.ymax)
else:
minx = np.nanmin(masked_verts[..., 0])
maxx = np.nanmax(masked_verts[..., 0])
miny = np.nanmin(masked_verts[..., 1])
maxy = np.nanmax(masked_verts[..., 1])
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners, updatex, updatey)
self._request_autoscale_view()
return lines
@_api.make_keyword_only("3.10", "label")
@_preprocess_data(replace_names=["x", "ymin", "ymax", "colors"],
label_namer="x")
def vlines(self, x, ymin, ymax, colors=None, linestyles='solid',
label='', **kwargs):
"""
Plot vertical lines at each *x* from *ymin* to *ymax*.
Parameters
----------
x : float or array-like
x-indexes where to plot the lines.
ymin, ymax : float or array-like
Respective beginning and end of each line. If scalars are
provided, all lines will have the same length.
colors : :mpltype:`color` or list of color, default: :rc:`lines.color`
linestyles : {'solid', 'dashed', 'dashdot', 'dotted'}, default: 'solid'
label : str, default: ''
Returns
-------
`~matplotlib.collections.LineCollection`
Other Parameters
----------------
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs : `~matplotlib.collections.LineCollection` properties.
See Also
--------
hlines : horizontal lines
axvline : vertical line across the Axes
"""
# We do the conversion first since not all unitized data is uniform
x, ymin, ymax = self._process_unit_info(
[("x", x), ("y", ymin), ("y", ymax)], kwargs)
if not np.iterable(x):
x = [x]
if not np.iterable(ymin):
ymin = [ymin]
if not np.iterable(ymax):
ymax = [ymax]
# Create and combine masked_arrays from input
x, ymin, ymax = cbook._combine_masks(x, ymin, ymax)
x = np.ravel(x)
ymin = np.ravel(ymin)
ymax = np.ravel(ymax)
masked_verts = np.ma.empty((len(x), 2, 2))
masked_verts[:, 0, 0] = x
masked_verts[:, 0, 1] = ymin
masked_verts[:, 1, 0] = x
masked_verts[:, 1, 1] = ymax
lines = mcoll.LineCollection(masked_verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(lines, autolim=False)
lines._internal_update(kwargs)
if len(x) > 0:
# Extreme values of x/ymin/ymax. Using masked_verts here handles
# the case of x being a masked *object* array (as can be generated
# e.g. by errorbar()), which would make nanmin/nanmax stumble.
updatex = True
updatey = True
if self.name == "rectilinear":
datalim = lines.get_datalim(self.transData)
t = lines.get_transform()
updatex, updatey = t.contains_branch_separately(self.transData)
minx = np.nanmin(datalim.xmin)
maxx = np.nanmax(datalim.xmax)
miny = np.nanmin(datalim.ymin)
maxy = np.nanmax(datalim.ymax)
else:
minx = np.nanmin(masked_verts[..., 0])
maxx = np.nanmax(masked_verts[..., 0])
miny = np.nanmin(masked_verts[..., 1])
maxy = np.nanmax(masked_verts[..., 1])
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners, updatex, updatey)
self._request_autoscale_view()
return lines
@_api.make_keyword_only("3.10", "orientation")
@_preprocess_data(replace_names=["positions", "lineoffsets",
"linelengths", "linewidths",
"colors", "linestyles"])
@_docstring.interpd
def eventplot(self, positions, orientation='horizontal', lineoffsets=1,
linelengths=1, linewidths=None, colors=None, alpha=None,
linestyles='solid', **kwargs):
"""
Plot identical parallel lines at the given positions.
This type of plot is commonly used in neuroscience for representing
neural events, where it is usually called a spike raster, dot raster,
or raster plot.
However, it is useful in any situation where you wish to show the
timing or position of multiple sets of discrete events, such as the
arrival times of people to a business on each day of the month or the
date of hurricanes each year of the last century.
Parameters
----------
positions : array-like or list of array-like
A 1D array-like defines the positions of one sequence of events.
Multiple groups of events may be passed as a list of array-likes.
Each group can be styled independently by passing lists of values
to *lineoffsets*, *linelengths*, *linewidths*, *colors* and
*linestyles*.
Note that *positions* can be a 2D array, but in practice different
event groups usually have different counts so that one will use a
list of different-length arrays rather than a 2D array.
orientation : {'horizontal', 'vertical'}, default: 'horizontal'
The direction of the event sequence:
- 'horizontal': the events are arranged horizontally.
The indicator lines are vertical.
- 'vertical': the events are arranged vertically.
The indicator lines are horizontal.
lineoffsets : float or array-like, default: 1
The offset of the center of the lines from the origin, in the
direction orthogonal to *orientation*.
If *positions* is 2D, this can be a sequence with length matching
the length of *positions*.
linelengths : float or array-like, default: 1
The total height of the lines (i.e. the lines stretches from
``lineoffset - linelength/2`` to ``lineoffset + linelength/2``).
If *positions* is 2D, this can be a sequence with length matching
the length of *positions*.
linewidths : float or array-like, default: :rc:`lines.linewidth`
The line width(s) of the event lines, in points.
If *positions* is 2D, this can be a sequence with length matching
the length of *positions*.
colors : :mpltype:`color` or list of color, default: :rc:`lines.color`
The color(s) of the event lines.
If *positions* is 2D, this can be a sequence with length matching
the length of *positions*.
alpha : float or array-like, default: 1
The alpha blending value(s), between 0 (transparent) and 1
(opaque).
If *positions* is 2D, this can be a sequence with length matching
the length of *positions*.
linestyles : str or tuple or list of such values, default: 'solid'
Default is 'solid'. Valid strings are ['solid', 'dashed',
'dashdot', 'dotted', '-', '--', '-.', ':']. Dash tuples
should be of the form::
(offset, onoffseq),
where *onoffseq* is an even length tuple of on and off ink
in points.
If *positions* is 2D, this can be a sequence with length matching
the length of *positions*.
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs
Other keyword arguments are line collection properties. See
`.LineCollection` for a list of the valid properties.
Returns
-------
list of `.EventCollection`
The `.EventCollection` that were added.
Notes
-----
For *linelengths*, *linewidths*, *colors*, *alpha* and *linestyles*, if
only a single value is given, that value is applied to all lines. If an
array-like is given, it must have the same length as *positions*, and
each value will be applied to the corresponding row of the array.
Examples
--------
.. plot:: gallery/lines_bars_and_markers/eventplot_demo.py
"""
lineoffsets, linelengths = self._process_unit_info(
[("y", lineoffsets), ("y", linelengths)], kwargs)
# fix positions, noting that it can be a list of lists:
if not np.iterable(positions):
positions = [positions]
elif any(np.iterable(position) for position in positions):
positions = [np.asanyarray(position) for position in positions]
else:
positions = [np.asanyarray(positions)]
poss = []
for position in positions:
poss += self._process_unit_info([("x", position)], kwargs)
positions = poss
# prevent 'singular' keys from **kwargs dict from overriding the effect
# of 'plural' keyword arguments (e.g. 'color' overriding 'colors')
colors = cbook._local_over_kwdict(colors, kwargs, 'color')
linewidths = cbook._local_over_kwdict(linewidths, kwargs, 'linewidth')
linestyles = cbook._local_over_kwdict(linestyles, kwargs, 'linestyle')
if not np.iterable(lineoffsets):
lineoffsets = [lineoffsets]
if not np.iterable(linelengths):
linelengths = [linelengths]
if not np.iterable(linewidths):
linewidths = [linewidths]
if not np.iterable(colors):
colors = [colors]
if not np.iterable(alpha):
alpha = [alpha]
if hasattr(linestyles, 'lower') or not np.iterable(linestyles):
linestyles = [linestyles]
lineoffsets = np.asarray(lineoffsets)
linelengths = np.asarray(linelengths)
linewidths = np.asarray(linewidths)
if len(lineoffsets) == 0:
raise ValueError('lineoffsets cannot be empty')
if len(linelengths) == 0:
raise ValueError('linelengths cannot be empty')
if len(linestyles) == 0:
raise ValueError('linestyles cannot be empty')
if len(linewidths) == 0:
raise ValueError('linewidths cannot be empty')
if len(alpha) == 0:
raise ValueError('alpha cannot be empty')
if len(colors) == 0:
colors = [None]
try:
# Early conversion of the colors into RGBA values to take care
# of cases like colors='0.5' or colors='C1'. (Issue #8193)
colors = mcolors.to_rgba_array(colors)
except ValueError:
# Will fail if any element of *colors* is None. But as long
# as len(colors) == 1 or len(positions), the rest of the
# code should process *colors* properly.
pass
if len(lineoffsets) == 1 and len(positions) != 1:
lineoffsets = np.tile(lineoffsets, len(positions))
lineoffsets[0] = 0
lineoffsets = np.cumsum(lineoffsets)
if len(linelengths) == 1:
linelengths = np.tile(linelengths, len(positions))
if len(linewidths) == 1:
linewidths = np.tile(linewidths, len(positions))
if len(colors) == 1:
colors = list(colors) * len(positions)
if len(alpha) == 1:
alpha = list(alpha) * len(positions)
if len(linestyles) == 1:
linestyles = [linestyles] * len(positions)
if len(lineoffsets) != len(positions):
raise ValueError('lineoffsets and positions are unequal sized '
'sequences')
if len(linelengths) != len(positions):
raise ValueError('linelengths and positions are unequal sized '
'sequences')
if len(linewidths) != len(positions):
raise ValueError('linewidths and positions are unequal sized '
'sequences')
if len(colors) != len(positions):
raise ValueError('colors and positions are unequal sized '
'sequences')
if len(alpha) != len(positions):
raise ValueError('alpha and positions are unequal sized '
'sequences')
if len(linestyles) != len(positions):
raise ValueError('linestyles and positions are unequal sized '
'sequences')
colls = []
for position, lineoffset, linelength, linewidth, color, alpha_, \
linestyle in \
zip(positions, lineoffsets, linelengths, linewidths,
colors, alpha, linestyles):
coll = mcoll.EventCollection(position,
orientation=orientation,
lineoffset=lineoffset,
linelength=linelength,
linewidth=linewidth,
color=color,
alpha=alpha_,
linestyle=linestyle)
self.add_collection(coll, autolim=False)
coll._internal_update(kwargs)
colls.append(coll)
if len(positions) > 0:
# try to get min/max
min_max = [(np.min(_p), np.max(_p)) for _p in positions
if len(_p) > 0]
# if we have any non-empty positions, try to autoscale
if len(min_max) > 0:
mins, maxes = zip(*min_max)
minpos = np.min(mins)
maxpos = np.max(maxes)
minline = (lineoffsets - linelengths).min()
maxline = (lineoffsets + linelengths).max()
if orientation == "vertical":
corners = (minline, minpos), (maxline, maxpos)
else: # "horizontal"
corners = (minpos, minline), (maxpos, maxline)
self.update_datalim(corners)
self._request_autoscale_view()
return colls
#### Basic plotting
# Uses a custom implementation of data-kwarg handling in
# _process_plot_var_args.
@_docstring.interpd
def plot(self, *args, scalex=True, scaley=True, data=None, **kwargs):
"""
Plot y versus x as lines and/or markers.
Call signatures::
plot([x], y, [fmt], *, data=None, **kwargs)
plot([x], y, [fmt], [x2], y2, [fmt2], ..., **kwargs)
The coordinates of the points or line nodes are given by *x*, *y*.
The optional parameter *fmt* is a convenient way for defining basic
formatting like color, marker and linestyle. It's a shortcut string
notation described in the *Notes* section below.
>>> plot(x, y) # plot x and y using default line style and color
>>> plot(x, y, 'bo') # plot x and y using blue circle markers
>>> plot(y) # plot y using x as index array 0..N-1
>>> plot(y, 'r+') # ditto, but with red plusses
You can use `.Line2D` properties as keyword arguments for more
control on the appearance. Line properties and *fmt* can be mixed.
The following two calls yield identical results:
>>> plot(x, y, 'go--', linewidth=2, markersize=12)
>>> plot(x, y, color='green', marker='o', linestyle='dashed',
... linewidth=2, markersize=12)
When conflicting with *fmt*, keyword arguments take precedence.
**Plotting labelled data**
There's a convenient way for plotting objects with labelled data (i.e.
data that can be accessed by index ``obj['y']``). Instead of giving
the data in *x* and *y*, you can provide the object in the *data*
parameter and just give the labels for *x* and *y*::
>>> plot('xlabel', 'ylabel', data=obj)
All indexable objects are supported. This could e.g. be a `dict`, a
`pandas.DataFrame` or a structured numpy array.
**Plotting multiple sets of data**
There are various ways to plot multiple sets of data.
- The most straight forward way is just to call `plot` multiple times.
Example:
>>> plot(x1, y1, 'bo')
>>> plot(x2, y2, 'go')
- If *x* and/or *y* are 2D arrays, a separate data set will be drawn
for every column. If both *x* and *y* are 2D, they must have the
same shape. If only one of them is 2D with shape (N, m) the other
must have length N and will be used for every data set m.
Example:
>>> x = [1, 2, 3]
>>> y = np.array([[1, 2], [3, 4], [5, 6]])
>>> plot(x, y)
is equivalent to:
>>> for col in range(y.shape[1]):
... plot(x, y[:, col])
- The third way is to specify multiple sets of *[x]*, *y*, *[fmt]*
groups::
>>> plot(x1, y1, 'g^', x2, y2, 'g-')
In this case, any additional keyword argument applies to all
datasets. Also, this syntax cannot be combined with the *data*
parameter.
By default, each line is assigned a different style specified by a
'style cycle'. The *fmt* and line property parameters are only
necessary if you want explicit deviations from these defaults.
Alternatively, you can also change the style cycle using
:rc:`axes.prop_cycle`.
Parameters
----------
x, y : array-like or float
The horizontal / vertical coordinates of the data points.
*x* values are optional and default to ``range(len(y))``.
Commonly, these parameters are 1D arrays.
They can also be scalars, or two-dimensional (in that case, the
columns represent separate data sets).
These arguments cannot be passed as keywords.
fmt : str, optional
A format string, e.g. 'ro' for red circles. See the *Notes*
section for a full description of the format strings.
Format strings are just an abbreviation for quickly setting
basic line properties. All of these and more can also be
controlled by keyword arguments.
This argument cannot be passed as keyword.
data : indexable object, optional
An object with labelled data. If given, provide the label names to
plot in *x* and *y*.
.. note::
Technically there's a slight ambiguity in calls where the
second label is a valid *fmt*. ``plot('n', 'o', data=obj)``
could be ``plt(x, y)`` or ``plt(y, fmt)``. In such cases,
the former interpretation is chosen, but a warning is issued.
You may suppress the warning by adding an empty format string
``plot('n', 'o', '', data=obj)``.
Returns
-------
list of `.Line2D`
A list of lines representing the plotted data.
Other Parameters
----------------
scalex, scaley : bool, default: True
These parameters determine if the view limits are adapted to the
data limits. The values are passed on to
`~.axes.Axes.autoscale_view`.
**kwargs : `~matplotlib.lines.Line2D` properties, optional
*kwargs* are used to specify properties like a line label (for
auto legends), linewidth, antialiasing, marker face color.
Example::
>>> plot([1, 2, 3], [1, 2, 3], 'go-', label='line 1', linewidth=2)
>>> plot([1, 2, 3], [1, 4, 9], 'rs', label='line 2')
If you specify multiple lines with one plot call, the kwargs apply
to all those lines. In case the label object is iterable, each
element is used as labels for each set of data.
Here is a list of available `.Line2D` properties:
%(Line2D:kwdoc)s
See Also
--------
scatter : XY scatter plot with markers of varying size and/or color (
sometimes also called bubble chart).
Notes
-----
**Format Strings**
A format string consists of a part for color, marker and line::
fmt = '[marker][line][color]'
Each of them is optional. If not provided, the value from the style
cycle is used. Exception: If ``line`` is given, but no ``marker``,
the data will be a line without markers.
Other combinations such as ``[color][marker][line]`` are also
supported, but note that their parsing may be ambiguous.
**Markers**
============= ===============================
character description
============= ===============================
``'.'`` point marker
``','`` pixel marker
``'o'`` circle marker
``'v'`` triangle_down marker
``'^'`` triangle_up marker
``'<'`` triangle_left marker
``'>'`` triangle_right marker
``'1'`` tri_down marker
``'2'`` tri_up marker
``'3'`` tri_left marker
``'4'`` tri_right marker
``'8'`` octagon marker
``'s'`` square marker
``'p'`` pentagon marker
``'P'`` plus (filled) marker
``'*'`` star marker
``'h'`` hexagon1 marker
``'H'`` hexagon2 marker
``'+'`` plus marker
``'x'`` x marker
``'X'`` x (filled) marker
``'D'`` diamond marker
``'d'`` thin_diamond marker
``'|'`` vline marker
``'_'`` hline marker
============= ===============================
**Line Styles**
============= ===============================
character description
============= ===============================
``'-'`` solid line style
``'--'`` dashed line style
``'-.'`` dash-dot line style
``':'`` dotted line style
============= ===============================
Example format strings::
'b' # blue markers with default shape
'or' # red circles
'-g' # green solid line
'--' # dashed line with default color
'^k:' # black triangle_up markers connected by a dotted line
**Colors**
The supported color abbreviations are the single letter codes
============= ===============================
character color
============= ===============================
``'b'`` blue
``'g'`` green
``'r'`` red
``'c'`` cyan
``'m'`` magenta
``'y'`` yellow
``'k'`` black
``'w'`` white
============= ===============================
and the ``'CN'`` colors that index into the default property cycle.
If the color is the only part of the format string, you can
additionally use any `matplotlib.colors` spec, e.g. full names
(``'green'``) or hex strings (``'#008000'``).
"""
kwargs = cbook.normalize_kwargs(kwargs, mlines.Line2D)
lines = [*self._get_lines(self, *args, data=data, **kwargs)]
for line in lines:
self.add_line(line)
if scalex:
self._request_autoscale_view("x")
if scaley:
self._request_autoscale_view("y")
return lines
# @_preprocess_data() # let 'plot' do the unpacking..
@_docstring.interpd
def loglog(self, *args, **kwargs):
"""
Make a plot with log scaling on both the x- and y-axis.
Call signatures::
loglog([x], y, [fmt], data=None, **kwargs)
loglog([x], y, [fmt], [x2], y2, [fmt2], ..., **kwargs)
This is just a thin wrapper around `.plot` which additionally changes
both the x-axis and the y-axis to log scaling. All the concepts and
parameters of plot can be used here as well.
The additional parameters *base*, *subs* and *nonpositive* control the
x/y-axis properties. They are just forwarded to `.Axes.set_xscale` and
`.Axes.set_yscale`. To use different properties on the x-axis and the
y-axis, use e.g.
``ax.set_xscale("log", base=10); ax.set_yscale("log", base=2)``.
Parameters
----------
base : float, default: 10
Base of the logarithm.
subs : sequence, optional
The location of the minor ticks. If *None*, reasonable locations
are automatically chosen depending on the number of decades in the
plot. See `.Axes.set_xscale`/`.Axes.set_yscale` for details.
nonpositive : {'mask', 'clip'}, default: 'clip'
Non-positive values can be masked as invalid, or clipped to a very
small positive number.
**kwargs
All parameters supported by `.plot`.
Returns
-------
list of `.Line2D`
Objects representing the plotted data.
"""
dx = {k: v for k, v in kwargs.items()
if k in ['base', 'subs', 'nonpositive',
'basex', 'subsx', 'nonposx']}
self.set_xscale('log', **dx)
dy = {k: v for k, v in kwargs.items()
if k in ['base', 'subs', 'nonpositive',
'basey', 'subsy', 'nonposy']}
self.set_yscale('log', **dy)
return self.plot(
*args, **{k: v for k, v in kwargs.items() if k not in {*dx, *dy}})
# @_preprocess_data() # let 'plot' do the unpacking..
@_docstring.interpd
def semilogx(self, *args, **kwargs):
"""
Make a plot with log scaling on the x-axis.
Call signatures::
semilogx([x], y, [fmt], data=None, **kwargs)
semilogx([x], y, [fmt], [x2], y2, [fmt2], ..., **kwargs)
This is just a thin wrapper around `.plot` which additionally changes
the x-axis to log scaling. All the concepts and parameters of plot can
be used here as well.
The additional parameters *base*, *subs*, and *nonpositive* control the
x-axis properties. They are just forwarded to `.Axes.set_xscale`.
Parameters
----------
base : float, default: 10
Base of the x logarithm.
subs : array-like, optional
The location of the minor xticks. If *None*, reasonable locations
are automatically chosen depending on the number of decades in the
plot. See `.Axes.set_xscale` for details.
nonpositive : {'mask', 'clip'}, default: 'clip'
Non-positive values in x can be masked as invalid, or clipped to a
very small positive number.
**kwargs
All parameters supported by `.plot`.
Returns
-------
list of `.Line2D`
Objects representing the plotted data.
"""
d = {k: v for k, v in kwargs.items()
if k in ['base', 'subs', 'nonpositive',
'basex', 'subsx', 'nonposx']}
self.set_xscale('log', **d)
return self.plot(
*args, **{k: v for k, v in kwargs.items() if k not in d})
# @_preprocess_data() # let 'plot' do the unpacking..
@_docstring.interpd
def semilogy(self, *args, **kwargs):
"""
Make a plot with log scaling on the y-axis.
Call signatures::
semilogy([x], y, [fmt], data=None, **kwargs)
semilogy([x], y, [fmt], [x2], y2, [fmt2], ..., **kwargs)
This is just a thin wrapper around `.plot` which additionally changes
the y-axis to log scaling. All the concepts and parameters of plot can
be used here as well.
The additional parameters *base*, *subs*, and *nonpositive* control the
y-axis properties. They are just forwarded to `.Axes.set_yscale`.
Parameters
----------
base : float, default: 10
Base of the y logarithm.
subs : array-like, optional
The location of the minor yticks. If *None*, reasonable locations
are automatically chosen depending on the number of decades in the
plot. See `.Axes.set_yscale` for details.
nonpositive : {'mask', 'clip'}, default: 'clip'
Non-positive values in y can be masked as invalid, or clipped to a
very small positive number.
**kwargs
All parameters supported by `.plot`.
Returns
-------
list of `.Line2D`
Objects representing the plotted data.
"""
d = {k: v for k, v in kwargs.items()
if k in ['base', 'subs', 'nonpositive',
'basey', 'subsy', 'nonposy']}
self.set_yscale('log', **d)
return self.plot(
*args, **{k: v for k, v in kwargs.items() if k not in d})
@_preprocess_data(replace_names=["x"], label_namer="x")
def acorr(self, x, **kwargs):
"""
Plot the autocorrelation of *x*.
Parameters
----------
x : array-like
Not run through Matplotlib's unit conversion, so this should
be a unit-less array.
detrend : callable, default: `.mlab.detrend_none` (no detrending)
A detrending function applied to *x*. It must have the
signature ::
detrend(x: np.ndarray) -> np.ndarray
normed : bool, default: True
If ``True``, input vectors are normalised to unit length.
usevlines : bool, default: True
Determines the plot style.
If ``True``, vertical lines are plotted from 0 to the acorr value
using `.Axes.vlines`. Additionally, a horizontal line is plotted
at y=0 using `.Axes.axhline`.
If ``False``, markers are plotted at the acorr values using
`.Axes.plot`.
maxlags : int, default: 10
Number of lags to show. If ``None``, will return all
``2 * len(x) - 1`` lags.
Returns
-------
lags : array (length ``2*maxlags+1``)
The lag vector.
c : array (length ``2*maxlags+1``)
The auto correlation vector.
line : `.LineCollection` or `.Line2D`
`.Artist` added to the Axes of the correlation:
- `.LineCollection` if *usevlines* is True.
- `.Line2D` if *usevlines* is False.
b : `~matplotlib.lines.Line2D` or None
Horizontal line at 0 if *usevlines* is True
None *usevlines* is False.
Other Parameters
----------------
linestyle : `~matplotlib.lines.Line2D` property, optional
The linestyle for plotting the data points.
Only used if *usevlines* is ``False``.
marker : str, default: 'o'
The marker for plotting the data points.
Only used if *usevlines* is ``False``.
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs
Additional parameters are passed to `.Axes.vlines` and
`.Axes.axhline` if *usevlines* is ``True``; otherwise they are
passed to `.Axes.plot`.
Notes
-----
The cross correlation is performed with `numpy.correlate` with
``mode = "full"``.
"""
return self.xcorr(x, x, **kwargs)
@_api.make_keyword_only("3.10", "normed")
@_preprocess_data(replace_names=["x", "y"], label_namer="y")
def xcorr(self, x, y, normed=True, detrend=mlab.detrend_none,
usevlines=True, maxlags=10, **kwargs):
r"""
Plot the cross correlation between *x* and *y*.
The correlation with lag k is defined as
:math:`\sum_n x[n+k] \cdot y^*[n]`, where :math:`y^*` is the complex
conjugate of :math:`y`.
Parameters
----------
x, y : array-like of length n
Neither *x* nor *y* are run through Matplotlib's unit conversion, so
these should be unit-less arrays.
detrend : callable, default: `.mlab.detrend_none` (no detrending)
A detrending function applied to *x* and *y*. It must have the
signature ::
detrend(x: np.ndarray) -> np.ndarray
normed : bool, default: True
If ``True``, input vectors are normalised to unit length.
usevlines : bool, default: True
Determines the plot style.
If ``True``, vertical lines are plotted from 0 to the xcorr value
using `.Axes.vlines`. Additionally, a horizontal line is plotted
at y=0 using `.Axes.axhline`.
If ``False``, markers are plotted at the xcorr values using
`.Axes.plot`.
maxlags : int, default: 10
Number of lags to show. If None, will return all ``2 * len(x) - 1``
lags.
Returns
-------
lags : array (length ``2*maxlags+1``)
The lag vector.
c : array (length ``2*maxlags+1``)
The auto correlation vector.
line : `.LineCollection` or `.Line2D`
`.Artist` added to the Axes of the correlation:
- `.LineCollection` if *usevlines* is True.
- `.Line2D` if *usevlines* is False.
b : `~matplotlib.lines.Line2D` or None
Horizontal line at 0 if *usevlines* is True
None *usevlines* is False.
Other Parameters
----------------
linestyle : `~matplotlib.lines.Line2D` property, optional
The linestyle for plotting the data points.
Only used if *usevlines* is ``False``.
marker : str, default: 'o'
The marker for plotting the data points.
Only used if *usevlines* is ``False``.
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs
Additional parameters are passed to `.Axes.vlines` and
`.Axes.axhline` if *usevlines* is ``True``; otherwise they are
passed to `.Axes.plot`.
Notes
-----
The cross correlation is performed with `numpy.correlate` with
``mode = "full"``.
"""
Nx = len(x)
if Nx != len(y):
raise ValueError('x and y must be equal length')
x = detrend(np.asarray(x))
y = detrend(np.asarray(y))
correls = np.correlate(x, y, mode="full")
if normed:
correls = correls / np.sqrt(np.dot(x, x) * np.dot(y, y))
if maxlags is None:
maxlags = Nx - 1
if maxlags >= Nx or maxlags < 1:
raise ValueError('maxlags must be None or strictly '
'positive < %d' % Nx)
lags = np.arange(-maxlags, maxlags + 1)
correls = correls[Nx - 1 - maxlags:Nx + maxlags]
if usevlines:
a = self.vlines(lags, [0], correls, **kwargs)
# Make label empty so only vertical lines get a legend entry
kwargs.pop('label', '')
b = self.axhline(**kwargs)
else:
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
a, = self.plot(lags, correls, **kwargs)
b = None
return lags, correls, a, b
#### Specialized plotting
# @_preprocess_data() # let 'plot' do the unpacking..
def step(self, x, y, *args, where='pre', data=None, **kwargs):
"""
Make a step plot.
Call signatures::
step(x, y, [fmt], *, data=None, where='pre', **kwargs)
step(x, y, [fmt], x2, y2, [fmt2], ..., *, where='pre', **kwargs)
This is just a thin wrapper around `.plot` which changes some
formatting options. Most of the concepts and parameters of plot can be
used here as well.
.. note::
This method uses a standard plot with a step drawstyle: The *x*
values are the reference positions and steps extend left/right/both
directions depending on *where*.
For the common case where you know the values and edges of the
steps, use `~.Axes.stairs` instead.
Parameters
----------
x : array-like
1D sequence of x positions. It is assumed, but not checked, that
it is uniformly increasing.
y : array-like
1D sequence of y levels.
fmt : str, optional
A format string, e.g. 'g' for a green line. See `.plot` for a more
detailed description.
Note: While full format strings are accepted, it is recommended to
only specify the color. Line styles are currently ignored (use
the keyword argument *linestyle* instead). Markers are accepted
and plotted on the given positions, however, this is a rarely
needed feature for step plots.
where : {'pre', 'post', 'mid'}, default: 'pre'
Define where the steps should be placed:
- 'pre': The y value is continued constantly to the left from
every *x* position, i.e. the interval ``(x[i-1], x[i]]`` has the
value ``y[i]``.
- 'post': The y value is continued constantly to the right from
every *x* position, i.e. the interval ``[x[i], x[i+1])`` has the
value ``y[i]``.
- 'mid': Steps occur half-way between the *x* positions.
data : indexable object, optional
An object with labelled data. If given, provide the label names to
plot in *x* and *y*.
**kwargs
Additional parameters are the same as those for `.plot`.
Returns
-------
list of `.Line2D`
Objects representing the plotted data.
"""
_api.check_in_list(('pre', 'post', 'mid'), where=where)
kwargs['drawstyle'] = 'steps-' + where
return self.plot(x, y, *args, data=data, **kwargs)
@staticmethod
def _convert_dx(dx, x0, xconv, convert):
"""
Small helper to do logic of width conversion flexibly.
*dx* and *x0* have units, but *xconv* has already been converted
to unitless (and is an ndarray). This allows the *dx* to have units
that are different from *x0*, but are still accepted by the
``__add__`` operator of *x0*.
"""
# x should be an array...
assert type(xconv) is np.ndarray
if xconv.size == 0:
# xconv has already been converted, but maybe empty...
return convert(dx)
try:
# attempt to add the width to x0; this works for
# datetime+timedelta, for instance
# only use the first element of x and x0. This saves
# having to be sure addition works across the whole
# vector. This is particularly an issue if
# x0 and dx are lists so x0 + dx just concatenates the lists.
# We can't just cast x0 and dx to numpy arrays because that
# removes the units from unit packages like `pint` that
# wrap numpy arrays.
try:
x0 = cbook._safe_first_finite(x0)
except (TypeError, IndexError, KeyError):
pass
try:
x = cbook._safe_first_finite(xconv)
except (TypeError, IndexError, KeyError):
x = xconv
delist = False
if not np.iterable(dx):
dx = [dx]
delist = True
dx = [convert(x0 + ddx) - x for ddx in dx]
if delist:
dx = dx[0]
except (ValueError, TypeError, AttributeError):
# if the above fails (for any reason) just fallback to what
# we do by default and convert dx by itself.
dx = convert(dx)
return dx
def _parse_bar_color_args(self, kwargs):
"""
Helper function to process color-related arguments of `.Axes.bar`.
Argument precedence for facecolors:
- kwargs['facecolor']
- kwargs['color']
- 'Result of ``self._get_patches_for_fill.get_next_color``
Argument precedence for edgecolors:
- kwargs['edgecolor']
- None
Parameters
----------
self : Axes
kwargs : dict
Additional kwargs. If these keys exist, we pop and process them:
'facecolor', 'edgecolor', 'color'
Note: The dict is modified by this function.
Returns
-------
facecolor
The facecolor. One or more colors as (N, 4) rgba array.
edgecolor
The edgecolor. Not normalized; may be any valid color spec or None.
"""
color = kwargs.pop('color', None)
facecolor = kwargs.pop('facecolor', color)
edgecolor = kwargs.pop('edgecolor', None)
facecolor = (facecolor if facecolor is not None
else self._get_patches_for_fill.get_next_color())
try:
facecolor = mcolors.to_rgba_array(facecolor)
except ValueError as err:
raise ValueError(
"'facecolor' or 'color' argument must be a valid color or "
"sequence of colors."
) from err
return facecolor, edgecolor
@_preprocess_data()
@_docstring.interpd
def bar(self, x, height, width=0.8, bottom=None, *, align="center",
**kwargs):
r"""
Make a bar plot.
The bars are positioned at *x* with the given *align*\ment. Their
dimensions are given by *height* and *width*. The vertical baseline
is *bottom* (default 0).
Many parameters can take either a single value applying to all bars
or a sequence of values, one for each bar.
Parameters
----------
x : float or array-like
The x coordinates of the bars. See also *align* for the
alignment of the bars to the coordinates.
Bars are often used for categorical data, i.e. string labels below
the bars. You can provide a list of strings directly to *x*.
``bar(['A', 'B', 'C'], [1, 2, 3])`` is often a shorter and more
convenient notation compared to
``bar(range(3), [1, 2, 3], tick_label=['A', 'B', 'C'])``. They are
equivalent as long as the names are unique. The explicit *tick_label*
notation draws the names in the sequence given. However, when having
duplicate values in categorical *x* data, these values map to the same
numerical x coordinate, and hence the corresponding bars are drawn on
top of each other.
height : float or array-like
The height(s) of the bars.
Note that if *bottom* has units (e.g. datetime), *height* should be in
units that are a difference from the value of *bottom* (e.g. timedelta).
width : float or array-like, default: 0.8
The width(s) of the bars.
Note that if *x* has units (e.g. datetime), then *width* should be in
units that are a difference (e.g. timedelta) around the *x* values.
bottom : float or array-like, default: 0
The y coordinate(s) of the bottom side(s) of the bars.
Note that if *bottom* has units, then the y-axis will get a Locator and
Formatter appropriate for the units (e.g. dates, or categorical).
align : {'center', 'edge'}, default: 'center'
Alignment of the bars to the *x* coordinates:
- 'center': Center the base on the *x* positions.
- 'edge': Align the left edges of the bars with the *x* positions.
To align the bars on the right edge pass a negative *width* and
``align='edge'``.
Returns
-------
`.BarContainer`
Container with all the bars and optionally errorbars.
Other Parameters
----------------
color : :mpltype:`color` or list of :mpltype:`color`, optional
The colors of the bar faces. This is an alias for *facecolor*.
If both are given, *facecolor* takes precedence.
facecolor : :mpltype:`color` or list of :mpltype:`color`, optional
The colors of the bar faces.
If both *color* and *facecolor are given, *facecolor* takes precedence.
edgecolor : :mpltype:`color` or list of :mpltype:`color`, optional
The colors of the bar edges.
linewidth : float or array-like, optional
Width of the bar edge(s). If 0, don't draw edges.
tick_label : str or list of str, optional
The tick labels of the bars.
Default: None (Use default numeric labels.)
label : str or list of str, optional
A single label is attached to the resulting `.BarContainer` as a
label for the whole dataset.
If a list is provided, it must be the same length as *x* and
labels the individual bars. Repeated labels are not de-duplicated
and will cause repeated label entries, so this is best used when
bars also differ in style (e.g., by passing a list to *color*.)
xerr, yerr : float or array-like of shape(N,) or shape(2, N), optional
If not *None*, add horizontal / vertical errorbars to the bar tips.
The values are +/- sizes relative to the data:
- scalar: symmetric +/- values for all bars
- shape(N,): symmetric +/- values for each bar
- shape(2, N): Separate - and + values for each bar. First row
contains the lower errors, the second row contains the upper
errors.
- *None*: No errorbar. (Default)
See :doc:`/gallery/statistics/errorbar_features` for an example on
the usage of *xerr* and *yerr*.
ecolor : :mpltype:`color` or list of :mpltype:`color`, default: 'black'
The line color of the errorbars.
capsize : float, default: :rc:`errorbar.capsize`
The length of the error bar caps in points.
error_kw : dict, optional
Dictionary of keyword arguments to be passed to the
`~.Axes.errorbar` method. Values of *ecolor* or *capsize* defined
here take precedence over the independent keyword arguments.
log : bool, default: False
If *True*, set the y-axis to be log scale.
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs : `.Rectangle` properties
%(Rectangle:kwdoc)s
See Also
--------
barh : Plot a horizontal bar plot.
grouped_bar : Plot multiple datasets as grouped bar plot.
Notes
-----
Stacked bars can be achieved by passing individual *bottom* values per
bar. See :doc:`/gallery/lines_bars_and_markers/bar_stacked`.
"""
kwargs = cbook.normalize_kwargs(kwargs, mpatches.Patch)
facecolor, edgecolor = self._parse_bar_color_args(kwargs)
linewidth = kwargs.pop('linewidth', None)
hatch = kwargs.pop('hatch', None)
# Because xerr and yerr will be passed to errorbar, most dimension
# checking and processing will be left to the errorbar method.
xerr = kwargs.pop('xerr', None)
yerr = kwargs.pop('yerr', None)
error_kw = kwargs.pop('error_kw', None)
error_kw = {} if error_kw is None else error_kw.copy()
ezorder = error_kw.pop('zorder', None)
if ezorder is None:
ezorder = kwargs.get('zorder', None)
if ezorder is not None:
# If using the bar zorder, increment slightly to make sure
# errorbars are drawn on top of bars
ezorder += 0.01
error_kw.setdefault('zorder', ezorder)
ecolor = kwargs.pop('ecolor', 'k')
capsize = kwargs.pop('capsize', mpl.rcParams["errorbar.capsize"])
error_kw.setdefault('ecolor', ecolor)
error_kw.setdefault('capsize', capsize)
# The keyword argument *orientation* is used by barh() to defer all
# logic and drawing to bar(). It is considered internal and is
# intentionally not mentioned in the docstring.
orientation = kwargs.pop('orientation', 'vertical')
_api.check_in_list(['vertical', 'horizontal'], orientation=orientation)
log = kwargs.pop('log', False)
label = kwargs.pop('label', '')
tick_labels = kwargs.pop('tick_label', None)
y = bottom # Matches barh call signature.
if orientation == 'vertical':
if y is None:
y = 0
else: # horizontal
if x is None:
x = 0
if orientation == 'vertical':
# It is possible for y (bottom) to contain unit information.
# However, it is also possible for y=0 for the default and height
# to contain unit information. This will prioritize the units of y.
self._process_unit_info(
[("x", x), ("y", y), ("y", height)], kwargs, convert=False)
if log:
self.set_yscale('log', nonpositive='clip')
else: # horizontal
# It is possible for x (left) to contain unit information.
# However, it is also possible for x=0 for the default and width
# to contain unit information. This will prioritize the units of x.
self._process_unit_info(
[("x", x), ("x", width), ("y", y)], kwargs, convert=False)
if log:
self.set_xscale('log', nonpositive='clip')
# lets do some conversions now since some types cannot be
# subtracted uniformly
if self.xaxis is not None:
x0 = x
x = np.asarray(self.convert_xunits(x))
width = self._convert_dx(width, x0, x, self.convert_xunits)
if xerr is not None:
xerr = self._convert_dx(xerr, x0, x, self.convert_xunits)
if self.yaxis is not None:
y0 = y
y = np.asarray(self.convert_yunits(y))
height = self._convert_dx(height, y0, y, self.convert_yunits)
if yerr is not None:
yerr = self._convert_dx(yerr, y0, y, self.convert_yunits)
try:
x, height, width, y, linewidth, hatch = np.broadcast_arrays(
# Make args iterable too.
np.atleast_1d(x), height, width, y, linewidth, hatch
)
except ValueError as e:
arg_map = {
"arg 0": "'x'",
"arg 1": "'height'",
"arg 2": "'width'",
"arg 3": "'y'",
"arg 4": "'linewidth'",
"arg 5": "'hatch'"
}
error_message = str(e)
for arg, name in arg_map.items():
error_message = error_message.replace(arg, name)
if error_message != str(e):
raise ValueError(error_message) from e
else:
raise
# Now that units have been converted, set the tick locations.
if orientation == 'vertical':
tick_label_axis = self.xaxis
tick_label_position = x
else: # horizontal
tick_label_axis = self.yaxis
tick_label_position = y
if not isinstance(label, str) and np.iterable(label):
bar_container_label = '_nolegend_'
patch_labels = label
else:
bar_container_label = label
patch_labels = ['_nolegend_'] * len(x)
if len(patch_labels) != len(x):
raise ValueError(f'number of labels ({len(patch_labels)}) '
f'does not match number of bars ({len(x)}).')
linewidth = itertools.cycle(np.atleast_1d(linewidth))
hatch = itertools.cycle(np.atleast_1d(hatch))
facecolor = itertools.chain(itertools.cycle(facecolor),
# Fallback if color == "none".
itertools.repeat('none'))
if edgecolor is None:
edgecolor = itertools.repeat(None)
else:
edgecolor = itertools.chain(
itertools.cycle(mcolors.to_rgba_array(edgecolor)),
# Fallback if edgecolor == "none".
itertools.repeat('none'))
# We will now resolve the alignment and really have
# left, bottom, width, height vectors
_api.check_in_list(['center', 'edge'], align=align)
if align == 'center':
if orientation == 'vertical':
try:
left = x - width / 2
except TypeError as e:
raise TypeError(f'the dtypes of parameters x ({x.dtype}) '
f'and width ({width.dtype}) '
f'are incompatible') from e
bottom = y
else: # horizontal
try:
bottom = y - height / 2
except TypeError as e:
raise TypeError(f'the dtypes of parameters y ({y.dtype}) '
f'and height ({height.dtype}) '
f'are incompatible') from e
left = x
else: # edge
left = x
bottom = y
patches = []
args = zip(left, bottom, width, height, facecolor, edgecolor, linewidth,
hatch, patch_labels)
for l, b, w, h, c, e, lw, htch, lbl in args:
r = mpatches.Rectangle(
xy=(l, b), width=w, height=h,
facecolor=c,
edgecolor=e,
linewidth=lw,
label=lbl,
hatch=htch,
)
r._internal_update(kwargs)
r.get_path()._interpolation_steps = 100
if orientation == 'vertical':
r.sticky_edges.y.append(b)
else: # horizontal
r.sticky_edges.x.append(l)
self.add_patch(r)
patches.append(r)
if xerr is not None or yerr is not None:
if orientation == 'vertical':
# using list comps rather than arrays to preserve unit info
ex = [l + 0.5 * w for l, w in zip(left, width)]
ey = [b + h for b, h in zip(bottom, height)]
else: # horizontal
# using list comps rather than arrays to preserve unit info
ex = [l + w for l, w in zip(left, width)]
ey = [b + 0.5 * h for b, h in zip(bottom, height)]
error_kw.setdefault("label", '_nolegend_')
errorbar = self.errorbar(ex, ey, yerr=yerr, xerr=xerr, fmt='none',
**error_kw)
else:
errorbar = None
self._request_autoscale_view()
if orientation == 'vertical':
datavalues = height
else: # horizontal
datavalues = width
bar_container = BarContainer(patches, errorbar, datavalues=datavalues,
orientation=orientation,
label=bar_container_label)
self.add_container(bar_container)
if tick_labels is not None:
tick_labels = np.broadcast_to(tick_labels, len(patches))
tick_label_axis.set_ticks(tick_label_position)
tick_label_axis.set_ticklabels(tick_labels)
return bar_container
# @_preprocess_data() # let 'bar' do the unpacking..
@_docstring.interpd
def barh(self, y, width, height=0.8, left=None, *, align="center",
data=None, **kwargs):
r"""
Make a horizontal bar plot.
The bars are positioned at *y* with the given *align*\ment. Their
dimensions are given by *width* and *height*. The horizontal baseline
is *left* (default 0).
Many parameters can take either a single value applying to all bars
or a sequence of values, one for each bar.
Parameters
----------
y : float or array-like
The y coordinates of the bars. See also *align* for the
alignment of the bars to the coordinates.
Bars are often used for categorical data, i.e. string labels below
the bars. You can provide a list of strings directly to *y*.
``barh(['A', 'B', 'C'], [1, 2, 3])`` is often a shorter and more
convenient notation compared to
``barh(range(3), [1, 2, 3], tick_label=['A', 'B', 'C'])``. They are
equivalent as long as the names are unique. The explicit *tick_label*
notation draws the names in the sequence given. However, when having
duplicate values in categorical *y* data, these values map to the same
numerical y coordinate, and hence the corresponding bars are drawn on
top of each other.
width : float or array-like
The width(s) of the bars.
Note that if *left* has units (e.g. datetime), *width* should be in
units that are a difference from the value of *left* (e.g. timedelta).
height : float or array-like, default: 0.8
The heights of the bars.
Note that if *y* has units (e.g. datetime), then *height* should be in
units that are a difference (e.g. timedelta) around the *y* values.
left : float or array-like, default: 0
The x coordinates of the left side(s) of the bars.
Note that if *left* has units, then the x-axis will get a Locator and
Formatter appropriate for the units (e.g. dates, or categorical).
align : {'center', 'edge'}, default: 'center'
Alignment of the base to the *y* coordinates*:
- 'center': Center the bars on the *y* positions.
- 'edge': Align the bottom edges of the bars with the *y*
positions.
To align the bars on the top edge pass a negative *height* and
``align='edge'``.
Returns
-------
`.BarContainer`
Container with all the bars and optionally errorbars.
Other Parameters
----------------
color : :mpltype:`color` or list of :mpltype:`color`, optional
The colors of the bar faces.
edgecolor : :mpltype:`color` or list of :mpltype:`color`, optional
The colors of the bar edges.
linewidth : float or array-like, optional
Width of the bar edge(s). If 0, don't draw edges.
tick_label : str or list of str, optional
The tick labels of the bars.
Default: None (Use default numeric labels.)
label : str or list of str, optional
A single label is attached to the resulting `.BarContainer` as a
label for the whole dataset.
If a list is provided, it must be the same length as *y* and
labels the individual bars. Repeated labels are not de-duplicated
and will cause repeated label entries, so this is best used when
bars also differ in style (e.g., by passing a list to *color*.)
xerr, yerr : float or array-like of shape(N,) or shape(2, N), optional
If not *None*, add horizontal / vertical errorbars to the bar tips.
The values are +/- sizes relative to the data:
- scalar: symmetric +/- values for all bars
- shape(N,): symmetric +/- values for each bar
- shape(2, N): Separate - and + values for each bar. First row
contains the lower errors, the second row contains the upper
errors.
- *None*: No errorbar. (default)
See :doc:`/gallery/statistics/errorbar_features` for an example on
the usage of *xerr* and *yerr*.
ecolor : :mpltype:`color` or list of :mpltype:`color`, default: 'black'
The line color of the errorbars.
capsize : float, default: :rc:`errorbar.capsize`
The length of the error bar caps in points.
error_kw : dict, optional
Dictionary of keyword arguments to be passed to the
`~.Axes.errorbar` method. Values of *ecolor* or *capsize* defined
here take precedence over the independent keyword arguments.
log : bool, default: False
If ``True``, set the x-axis to be log scale.
data : indexable object, optional
If given, all parameters also accept a string ``s``, which is
interpreted as ``data[s]`` if ``s`` is a key in ``data``.
**kwargs : `.Rectangle` properties
%(Rectangle:kwdoc)s
See Also
--------
bar : Plot a vertical bar plot.
Notes
-----
Stacked bars can be achieved by passing individual *left* values per
bar. See
:doc:`/gallery/lines_bars_and_markers/horizontal_barchart_distribution`.
"""
kwargs.setdefault('orientation', 'horizontal')
patches = self.bar(x=left, height=height, width=width, bottom=y,
align=align, data=data, **kwargs)
return patches
def bar_label(self, container, labels=None, *, fmt="%g", label_type="edge",
padding=0, **kwargs):
"""
Label a bar plot.
Adds labels to bars in the given `.BarContainer`.
You may need to adjust the axis limits to fit the labels.
Parameters
----------
container : `.BarContainer`
Container with all the bars and optionally errorbars, likely
returned from `.bar` or `.barh`.
labels : array-like, optional
A list of label texts, that should be displayed. If not given, the
label texts will be the data values formatted with *fmt*.
fmt : str or callable, default: '%g'
An unnamed %-style or {}-style format string for the label or a
function to call with the value as the first argument.
When *fmt* is a string and can be interpreted in both formats,
%-style takes precedence over {}-style.
.. versionadded:: 3.7
Support for {}-style format string and callables.
label_type : {'edge', 'center'}, default: 'edge'
The label type. Possible values:
- 'edge': label placed at the end-point of the bar segment, and the
value displayed will be the position of that end-point.
- 'center': label placed in the center of the bar segment, and the
value displayed will be the length of that segment.
(useful for stacked bars, i.e.,
:doc:`/gallery/lines_bars_and_markers/bar_label_demo`)
padding : float or array-like, default: 0
Distance of label from the end of the bar, in points.
If an array-like is provided, the padding values are applied
to each label individually. Must have the same length as container.
.. versionadded:: 3.11
**kwargs
Any remaining keyword arguments are passed through to
`.Axes.annotate`. The alignment parameters (
*horizontalalignment* / *ha*, *verticalalignment* / *va*) are
not supported because the labels are automatically aligned to
the bars.
Returns
-------
list of `.Annotation`
A list of `.Annotation` instances for the labels.
"""
for key in ['horizontalalignment', 'ha', 'verticalalignment', 'va']:
if key in kwargs:
raise ValueError(
f"Passing {key!r} to bar_label() is not supported.")
a, b = self.yaxis.get_view_interval()
y_inverted = a > b
c, d = self.xaxis.get_view_interval()
x_inverted = c > d
# want to know whether to put label on positive or negative direction
# cannot use np.sign here because it will return 0 if x == 0
def sign(x):
return 1 if x >= 0 else -1
_api.check_in_list(['edge', 'center'], label_type=label_type)
bars = container.patches
errorbar = container.errorbar
datavalues = container.datavalues
orientation = container.orientation
if errorbar:
# check "ErrorbarContainer" for the definition of these elements
lines = errorbar.lines # attribute of "ErrorbarContainer" (tuple)
barlinecols = lines[2] # 0: data_line, 1: caplines, 2: barlinecols
barlinecol = barlinecols[0] # the "LineCollection" of error bars
errs = barlinecol.get_segments()
else:
errs = []
if labels is None:
labels = []
annotations = []
if np.iterable(padding):
# if padding iterable, check length
padding = np.asarray(padding)
if len(padding) != len(bars):
raise ValueError(
f"padding must be of length {len(bars)} when passed as a sequence")
else:
# single value, apply to all labels
padding = [padding] * len(bars)
for bar, err, dat, lbl, pad in itertools.zip_longest(
bars, errs, datavalues, labels, padding
):
(x0, y0), (x1, y1) = bar.get_bbox().get_points()
xc, yc = (x0 + x1) / 2, (y0 + y1) / 2
if orientation == "vertical":
extrema = max(y0, y1) if dat >= 0 else min(y0, y1)
length = abs(y0 - y1)
else: # horizontal
extrema = max(x0, x1) if dat >= 0 else min(x0, x1)
length = abs(x0 - x1)
if err is None or np.size(err) == 0:
endpt = extrema
elif orientation == "vertical":
endpt = err[:, 1].max() if dat >= 0 else err[:, 1].min()
else: # horizontal
endpt = err[:, 0].max() if dat >= 0 else err[:, 0].min()
if label_type == "center":
value = sign(dat) * length
else: # edge
value = extrema
if label_type == "center":
xy = (0.5, 0.5)
kwargs["xycoords"] = (
lambda r, b=bar:
mtransforms.Bbox.intersection(
b.get_window_extent(r), b.get_clip_box()
) or mtransforms.Bbox.null()
)
else: # edge
if orientation == "vertical":
xy = xc, endpt
else: # horizontal
xy = endpt, yc
if orientation == "vertical":
y_direction = -1 if y_inverted else 1
xytext = 0, y_direction * sign(dat) * pad
else: # horizontal
x_direction = -1 if x_inverted else 1
xytext = x_direction * sign(dat) * pad, 0
if label_type == "center":
ha, va = "center", "center"
else: # edge
if orientation == "vertical":
ha = 'center'
if y_inverted:
va = 'top' if dat > 0 else 'bottom' # also handles NaN
else:
va = 'top' if dat < 0 else 'bottom' # also handles NaN
else: # horizontal
if x_inverted:
ha = 'right' if dat > 0 else 'left' # also handles NaN
else:
ha = 'right' if dat < 0 else 'left' # also handles NaN
va = 'center'
if np.isnan(dat):
lbl = ''
if lbl is None:
if isinstance(fmt, str):
lbl = cbook._auto_format_str(fmt, value)
elif callable(fmt):
lbl = fmt(value)
else:
raise TypeError("fmt must be a str or callable")
annotation = self.annotate(lbl,
xy, xytext, textcoords="offset points",
ha=ha, va=va, **kwargs)
annotations.append(annotation)
return annotations
@_preprocess_data()
@_docstring.interpd
def broken_barh(self, xranges, yrange, align="bottom", **kwargs):
"""
Plot a horizontal sequence of rectangles.
A rectangle is drawn for each element of *xranges*. All rectangles
have the same vertical position and size defined by *yrange*.
Parameters
----------
xranges : sequence of tuples (*xmin*, *xwidth*)
The x-positions and extents of the rectangles. For each tuple
(*xmin*, *xwidth*) a rectangle is drawn from *xmin* to *xmin* +
*xwidth*.
yrange : (*ypos*, *yheight*)
The y-position and extent for all the rectangles.
align : {"bottom", "center", "top"}, default: 'bottom'
The alignment of the yrange with respect to the y-position. One of:
- "bottom": Resulting y-range [ypos, ypos + yheight]
- "center": Resulting y-range [ypos - yheight/2, ypos + yheight/2]
- "top": Resulting y-range [ypos - yheight, ypos]
.. versionadded:: 3.11
Returns
-------
`~.collections.PolyCollection`
Other Parameters
----------------
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs : `.PolyCollection` properties
Each *kwarg* can be either a single argument applying to all
rectangles, e.g.::
facecolors='black'
or a sequence of arguments over which is cycled, e.g.::
facecolors=('black', 'blue')
would create interleaving black and blue rectangles.
Supported keywords:
%(PolyCollection:kwdoc)s
"""
# process the unit information
xdata = cbook._safe_first_finite(xranges) if len(xranges) else None
ydata = cbook._safe_first_finite(yrange) if len(yrange) else None
self._process_unit_info(
[("x", xdata), ("y", ydata)], kwargs, convert=False)
vertices = []
y0, dy = yrange
_api.check_in_list(['bottom', 'center', 'top'], align=align)
if align == "bottom":
y0, y1 = self.convert_yunits((y0, y0 + dy))
elif align == "center":
y0, y1 = self.convert_yunits((y0 - dy/2, y0 + dy/2))
else:
y0, y1 = self.convert_yunits((y0 - dy, y0))
for xr in xranges: # convert the absolute values, not the x and dx
try:
x0, dx = xr
except Exception:
raise ValueError(
"each range in xrange must be a sequence with two "
"elements (i.e. xrange must be an (N, 2) array)") from None
x0, x1 = self.convert_xunits((x0, x0 + dx))
vertices.append([(x0, y0), (x0, y1), (x1, y1), (x1, y0)])
col = mcoll.PolyCollection(np.array(vertices), **kwargs)
self.add_collection(col)
return col
@_docstring.interpd
def grouped_bar(self, heights, *, positions=None, group_spacing=1.5, bar_spacing=0,
tick_labels=None, labels=None, orientation="vertical", colors=None,
**kwargs):
"""
Make a grouped bar plot.
.. versionadded:: 3.11
The API is still provisional. We may still fine-tune some aspects based on
user-feedback.
Grouped bar charts visualize a collection of categorical datasets. Each value
in a dataset belongs to a distinct category and these categories are the same
across all datasets. The categories typically have string names, but could
also be dates or index keys. The values in each dataset are represented by a
sequence of bars of the same color. The bars of all datasets are grouped
together by their shared categories. The category names are drawn as the tick
labels for each bar group. Each dataset has a distinct bar color, and can
optionally get a label that is used for the legend.
Example:
.. code-block:: python
grouped_bar([dataset_0, dataset_1, dataset_2],
tick_labels=['A', 'B'],
labels=['dataset 0', 'dataset 1', 'dataset 2'])
.. plot:: _embedded_plots/grouped_bar.py
Parameters
----------
heights : list of array-like or dict of array-like or 2D array \
or pandas.DataFrame
The heights for all x and groups. One of:
- list of array-like: A list of datasets, each dataset must have
the same number of elements.
.. code-block:: none
# category_A, category_B
dataset_0 = [value_0_A, value_0_B]
dataset_1 = [value_1_A, value_1_B]
dataset_2 = [value_2_A, value_2_B]
Example call::
grouped_bar([dataset_0, dataset_1, dataset_2])
- dict of array-like: A mapping from names to datasets. Each dataset
(dict value) must have the same number of elements.
Example call:
.. code-block:: python
data_dict = {'ds0': dataset_0, 'ds1': dataset_1, 'ds2': dataset_2}
grouped_bar(data_dict)
The names are used as *labels*, i.e. this is equivalent to
.. code-block:: python
grouped_bar(data_dict.values(), labels=data_dict.keys())
When using a dict input, you must not pass *labels* explicitly.
- a 2D array: The rows are the categories, the columns are the different
datasets.
.. code-block:: none
dataset_0 dataset_1 dataset_2
category_A ds0_a ds1_a ds2_a
category_B ds0_b ds1_b ds2_b
Example call:
.. code-block:: python
categories = ["A", "B"]
dataset_labels = ["dataset_0", "dataset_1", "dataset_2"]
array = np.random.random((2, 3))
grouped_bar(array, tick_labels=categories, labels=dataset_labels)
- a `pandas.DataFrame`.
The index is used for the categories, the columns are used for the
datasets.
.. code-block:: python
df = pd.DataFrame(
np.random.random((2, 3)),
index=["A", "B"],
columns=["dataset_0", "dataset_1", "dataset_2"]
)
grouped_bar(df)
i.e. this is equivalent to
.. code-block::
grouped_bar(df.to_numpy(), tick_labels=df.index, labels=df.columns)
Note that ``grouped_bar(df)`` produces a structurally equivalent plot like
``df.plot.bar()``.
positions : array-like, optional
The center positions of the bar groups. The values have to be equidistant.
If not given, a sequence of integer positions 0, 1, 2, ... is used.
tick_labels : list of str, optional
The category labels, which are placed on ticks at the center *positions*
of the bar groups. If not set, the axis ticks (positions and labels) are
left unchanged.
labels : list of str, optional
The labels of the datasets, i.e. the bars within one group.
These will show up in the legend.
group_spacing : float, default: 1.5
The space between two bar groups as a multiple of bar width.
The default value of 1.5 thus means that there's a gap of
1.5 bar widths between bar groups.
bar_spacing : float, default: 0
The space between bars as a multiple of bar width.
orientation : {"vertical", "horizontal"}, default: "vertical"
The direction of the bars.
colors : list of :mpltype:`color`, optional
A sequence of colors to be cycled through and used to color bars
of the different datasets. The sequence need not be exactly the
same length as the number of provided y, in which case the colors
will repeat from the beginning.
If not specified, the colors from the Axes property cycle will be used.
**kwargs : `.Rectangle` properties
%(Rectangle:kwdoc)s
Returns
-------
_GroupedBarReturn
A provisional result object. This will be refined in the future.
For now, the guaranteed API on the returned object is limited to
- the attribute ``bar_containers``, which is a list of
`.BarContainer`, i.e. the results of the individual `~.Axes.bar`
calls for each dataset.
- a ``remove()`` method, that remove all bars from the Axes.
See also `.Artist.remove()`.
See Also
--------
bar : A lower-level API for bar plots, with more degrees of freedom like
individual bar sizes and colors.
Notes
-----
For a better understanding, we compare the `~.Axes.grouped_bar` API with
those of `~.Axes.bar` and `~.Axes.boxplot`.
**Comparison to bar()**
`~.Axes.grouped_bar` intentionally deviates from the `~.Axes.bar` API in some
aspects. ``bar(x, y)`` is a lower-level API and places bars with height *y*
at explicit positions *x*. It also allows to specify individual bar widths
and colors. This kind of detailed control and flexibility is difficult to
manage and often not needed when plotting multiple datasets as a grouped bar
plot. Therefore, ``grouped_bar`` focusses on the abstraction of bar plots
as visualization of categorical data.
The following examples may help to transfer from ``bar`` to
``grouped_bar``.
Positions are de-emphasized due to categories, and default to integer values.
If you have used ``range(N)`` as positions, you can leave that value out::
bar(range(N), heights)
grouped_bar([heights])
If needed, positions can be passed as keyword arguments::
bar(x, heights)
grouped_bar([heights], positions=x)
To place category labels in `~.Axes.bar` you could use the argument
*tick_label* or use a list of category names as *x*.
`~.Axes.grouped_bar` expects them in the argument *tick_labels*::
bar(range(N), heights, tick_label=["A", "B"])
bar(["A", "B"], heights)
grouped_bar([heights], tick_labels=["A", "B"])
Dataset labels, which are shown in the legend, are still passed via the
*label* parameter::
bar(..., label="dataset")
grouped_bar(..., label=["dataset"])
**Comparison to boxplot()**
Both, `~.Axes.grouped_bar` and `~.Axes.boxplot` visualize categorical data
from multiple datasets. The basic API on *tick_labels* and *positions*
is the same, so that you can easily switch between plotting all
individual values as `~.Axes.grouped_bar` or the statistical distribution
per category as `~.Axes.boxplot`::
grouped_bar(values, positions=..., tick_labels=...)
boxplot(values, positions=..., tick_labels=...)
"""
if cbook._is_pandas_dataframe(heights):
if labels is None:
labels = heights.columns.tolist()
if tick_labels is None:
tick_labels = heights.index.tolist()
heights = heights.to_numpy().T
elif hasattr(heights, 'keys'): # dict
if labels is not None:
raise ValueError("'labels' cannot be used if 'heights' is a mapping")
labels = heights.keys()
heights = list(heights.values())
elif hasattr(heights, 'shape'): # numpy array
heights = heights.T
num_datasets = len(heights)
num_groups = len(next(iter(heights))) # inferred from first dataset
# validate that all datasets have the same length, i.e. num_groups
# - can be skipped if heights is an array
if not hasattr(heights, 'shape'):
for i, dataset in enumerate(heights):
if len(dataset) != num_groups:
raise ValueError(
"'heights' contains datasets with different number of "
f"elements. dataset 0 has {num_groups} elements but "
f"dataset {i} has {len(dataset)} elements."
)
if positions is None:
group_centers = np.arange(num_groups)
group_distance = 1
else:
group_centers = np.asanyarray(positions)
if len(group_centers) > 1:
d = np.diff(group_centers)
if not np.allclose(d, d.mean()):
raise ValueError("'positions' must be equidistant")
group_distance = d[0]
else:
group_distance = 1
_api.check_in_list(["vertical", "horizontal"], orientation=orientation)
if colors is None:
colors = itertools.cycle([None])
else:
# Note: This is equivalent to the behavior in stackplot
# TODO: do we want to be more restrictive and check lengths?
colors = itertools.cycle(colors)
bar_width = (group_distance /
(num_datasets + (num_datasets - 1) * bar_spacing + group_spacing))
bar_spacing_abs = bar_spacing * bar_width
margin_abs = 0.5 * group_spacing * bar_width
if labels is None:
labels = [None] * num_datasets
else:
assert len(labels) == num_datasets
# place the bars, but only use numerical positions, categorical tick labels
# are handled separately below
bar_containers = []
for i, (hs, label, color) in enumerate(zip(heights, labels, colors)):
lefts = (group_centers - 0.5 * group_distance + margin_abs
+ i * (bar_width + bar_spacing_abs))
if orientation == "vertical":
bc = self.bar(lefts, hs, width=bar_width, align="edge",
label=label, color=color, **kwargs)
else:
bc = self.barh(lefts, hs, height=bar_width, align="edge",
label=label, color=color, **kwargs)
bar_containers.append(bc)
if tick_labels is not None:
if orientation == "vertical":
self.xaxis.set_ticks(group_centers, labels=tick_labels)
else:
self.yaxis.set_ticks(group_centers, labels=tick_labels)
return _GroupedBarReturn(bar_containers)
@_preprocess_data()
def stem(self, *args, linefmt=None, markerfmt=None, basefmt=None, bottom=0,
label=None, orientation='vertical'):
"""
Create a stem plot.
A stem plot draws lines perpendicular to a baseline at each location
*locs* from the baseline to *heads*, and places a marker there. For
vertical stem plots (the default), the *locs* are *x* positions, and
the *heads* are *y* values. For horizontal stem plots, the *locs* are
*y* positions, and the *heads* are *x* values.
Call signature::
stem([locs,] heads, linefmt=None, markerfmt=None, basefmt=None)
The *locs*-positions are optional. *linefmt* may be provided as
positional, but all other formats must be provided as keyword
arguments.
Parameters
----------
locs : array-like, default: (0, 1, ..., len(heads) - 1)
For vertical stem plots, the x-positions of the stems.
For horizontal stem plots, the y-positions of the stems.
heads : array-like
For vertical stem plots, the y-values of the stem heads.
For horizontal stem plots, the x-values of the stem heads.
linefmt : str, optional
A string defining the color and/or linestyle of the vertical lines:
========= =============
Character Line Style
========= =============
``'-'`` solid line
``'--'`` dashed line
``'-.'`` dash-dot line
``':'`` dotted line
========= =============
Default: 'C0-', i.e. solid line with the first color of the color
cycle.
Note: Markers specified through this parameter (e.g. 'x') will be
silently ignored. Instead, markers should be specified using
*markerfmt*.
markerfmt : str, optional
A string defining the color and/or shape of the markers at the stem
heads. If the marker is not given, use the marker 'o', i.e. filled
circles. If the color is not given, use the color from *linefmt*.
basefmt : str, default: 'C3-' ('C2-' in classic mode)
A format string defining the properties of the baseline.
orientation : {'vertical', 'horizontal'}, default: 'vertical'
The orientation of the stems.
bottom : float, default: 0
The y/x-position of the baseline (depending on *orientation*).
label : str, optional
The label to use for the stems in legends.
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
Returns
-------
`.StemContainer`
The container may be treated like a tuple
(*markerline*, *stemlines*, *baseline*)
Notes
-----
.. seealso::
The MATLAB function
`stem <https://www.mathworks.com/help/matlab/ref/stem.html>`_
which inspired this method.
"""
if not 1 <= len(args) <= 3:
raise _api.nargs_error('stem', '1-3', len(args))
_api.check_in_list(['horizontal', 'vertical'], orientation=orientation)
if len(args) == 1:
heads, = args
locs = np.arange(len(heads))
args = ()
elif isinstance(args[1], str):
heads, *args = args
locs = np.arange(len(heads))
else:
locs, heads, *args = args
if orientation == 'vertical':
locs, heads = self._process_unit_info([("x", locs), ("y", heads)])
else: # horizontal
heads, locs = self._process_unit_info([("x", heads), ("y", locs)])
heads = cbook._check_1d(heads)
locs = cbook._check_1d(locs)
# resolve line format
if linefmt is None:
linefmt = args[0] if len(args) > 0 else "C0-"
linestyle, linemarker, linecolor = _process_plot_format(linefmt)
# resolve marker format
if markerfmt is None:
# if not given as kwarg, fall back to 'o'
markerfmt = "o"
if markerfmt == '':
markerfmt = ' ' # = empty line style; '' would resolve rcParams
markerstyle, markermarker, markercolor = _process_plot_format(markerfmt)
if markermarker is None:
markermarker = 'o'
if markerstyle is None:
markerstyle = 'None'
if markercolor is None:
markercolor = linecolor
# resolve baseline format
if basefmt is None:
basefmt = ("C2-" if mpl.rcParams["_internal.classic_mode"] else
"C3-")
basestyle, basemarker, basecolor = _process_plot_format(basefmt)
# New behaviour in 3.1 is to use a LineCollection for the stemlines
linestyle = mpl._val_or_rc(linestyle, 'lines.linestyle')
xlines = self.vlines if orientation == "vertical" else self.hlines
stemlines = xlines(
locs, bottom, heads,
colors=linecolor, linestyles=linestyle, label="_nolegend_")
if orientation == 'horizontal':
marker_x = heads
marker_y = locs
baseline_x = [bottom, bottom]
baseline_y = [np.min(locs), np.max(locs)]
else:
marker_x = locs
marker_y = heads
baseline_x = [np.min(locs), np.max(locs)]
baseline_y = [bottom, bottom]
markerline, = self.plot(marker_x, marker_y,
color=markercolor, linestyle=markerstyle,
marker=markermarker, label="_nolegend_")
baseline, = self.plot(baseline_x, baseline_y,
color=basecolor, linestyle=basestyle,
marker=basemarker, label="_nolegend_")
baseline.get_path()._interpolation_steps = \
mpl.axis.GRIDLINE_INTERPOLATION_STEPS
stem_container = StemContainer((markerline, stemlines, baseline),
label=label)
self.add_container(stem_container)
return stem_container
@_api.make_keyword_only("3.10", "explode")
@_preprocess_data(replace_names=["x", "explode", "labels", "colors"])
def pie(self, x, explode=None, labels=None, colors=None,
autopct=None, pctdistance=0.6, shadow=False, labeldistance=1.1,
startangle=0, radius=1, counterclock=True,
wedgeprops=None, textprops=None, center=(0, 0),
frame=False, rotatelabels=False, *, normalize=True, hatch=None):
"""
Plot a pie chart.
Make a pie chart of array *x*. The fractional area of each wedge is
given by ``x/sum(x)``.
The wedges are plotted counterclockwise, by default starting from the
x-axis.
Parameters
----------
x : 1D array-like
The wedge sizes.
explode : array-like, default: None
If not *None*, is a ``len(x)`` array which specifies the fraction
of the radius with which to offset each wedge.
labels : list, default: None
A sequence of strings providing the labels for each wedge
colors : :mpltype:`color` or list of :mpltype:`color`, default: None
A sequence of colors through which the pie chart will cycle. If
*None*, will use the colors in the currently active cycle.
hatch : str or list, default: None
Hatching pattern applied to all pie wedges or sequence of patterns
through which the chart will cycle. For a list of valid patterns,
see :doc:`/gallery/shapes_and_collections/hatch_style_reference`.
.. versionadded:: 3.7
autopct : None or str or callable, default: None
If not *None*, *autopct* is a string or function used to label the
wedges with their numeric value. The label will be placed inside
the wedge. If *autopct* is a format string, the label will be
``fmt % pct``. If *autopct* is a function, then it will be called.
pctdistance : float, default: 0.6
The relative distance along the radius at which the text
generated by *autopct* is drawn. To draw the text outside the pie,
set *pctdistance* > 1. This parameter is ignored if *autopct* is
``None``.
labeldistance : float or None, default: 1.1
The relative distance along the radius at which the labels are
drawn. To draw the labels inside the pie, set *labeldistance* < 1.
If set to ``None``, labels are not drawn but are still stored for
use in `.legend`.
shadow : bool or dict, default: False
If bool, whether to draw a shadow beneath the pie. If dict, draw a shadow
passing the properties in the dict to `.Shadow`.
.. versionadded:: 3.8
*shadow* can be a dict.
startangle : float, default: 0 degrees
The angle by which the start of the pie is rotated,
counterclockwise from the x-axis.
radius : float, default: 1
The radius of the pie.
counterclock : bool, default: True
Specify fractions direction, clockwise or counterclockwise.
wedgeprops : dict, default: None
Dict of arguments passed to each `.patches.Wedge` of the pie.
For example, ``wedgeprops = {'linewidth': 3}`` sets the width of
the wedge border lines equal to 3. By default, ``clip_on=False``.
When there is a conflict between these properties and other
keywords, properties passed to *wedgeprops* take precedence.
textprops : dict, default: None
Dict of arguments to pass to the `.Text` objects.
center : (float, float), default: (0, 0)
The coordinates of the center of the chart.
frame : bool, default: False
Plot Axes frame with the chart if true.
rotatelabels : bool, default: False
Rotate each label to the angle of the corresponding slice if true.
normalize : bool, default: True
When *True*, always make a full pie by normalizing x so that
``sum(x) == 1``. *False* makes a partial pie if ``sum(x) <= 1``
and raises a `ValueError` for ``sum(x) > 1``.
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
Returns
-------
`.PieContainer`
Container with all the wedge patches and any associated text objects.
.. versionchanged:: 3.11
Previously the wedges and texts were returned in a tuple.
Notes
-----
The pie chart will probably look best if the figure and Axes are
square, or the Axes aspect is equal.
This method sets the aspect ratio of the axis to "equal".
The Axes aspect ratio can be controlled with `.Axes.set_aspect`.
"""
self.set_aspect('equal')
x = np.asarray(x)
if x.ndim > 1:
raise ValueError("x must be 1D")
if np.any(x < 0):
raise ValueError("Wedge sizes 'x' must be non negative values")
if not np.all(np.isfinite(x)):
raise ValueError('Wedge sizes must be finite numbers')
sx = x.sum()
if sx == 0:
raise ValueError('All wedge sizes are zero')
if normalize:
fracs = x / sx
elif sx > 1:
raise ValueError('Cannot plot an unnormalized pie with sum(x) > 1')
else:
fracs = x
if labels is None:
labels = [''] * len(x)
if explode is None:
explode = [0] * len(x)
if len(x) != len(labels):
raise ValueError(f"'labels' must be of length 'x', not {len(labels)}")
if len(x) != len(explode):
raise ValueError(f"'explode' must be of length 'x', not {len(explode)}")
if colors is None:
get_next_color = self._get_patches_for_fill.get_next_color
else:
color_cycle = itertools.cycle(colors)
def get_next_color():
return next(color_cycle)
hatch_cycle = itertools.cycle(np.atleast_1d(hatch))
_api.check_isinstance(Real, radius=radius, startangle=startangle)
if radius <= 0:
raise ValueError(f"'radius' must be a positive number, not {radius}")
# Starting theta1 is the start fraction of the circle
theta1 = startangle / 360
if wedgeprops is None:
wedgeprops = {}
slices = []
for frac, label, expl in zip(fracs, labels, explode):
x_pos, y_pos = center
theta2 = (theta1 + frac) if counterclock else (theta1 - frac)
thetam = 2 * np.pi * 0.5 * (theta1 + theta2)
x_pos += expl * math.cos(thetam)
y_pos += expl * math.sin(thetam)
w = mpatches.Wedge((x_pos, y_pos), radius, 360. * min(theta1, theta2),
360. * max(theta1, theta2),
facecolor=get_next_color(),
hatch=next(hatch_cycle),
clip_on=False,
label=label)
w.set(**wedgeprops)
slices.append(w)
self.add_patch(w)
if shadow:
# Make sure to add a shadow after the call to add_patch so the
# figure and transform props will be set.
shadow_dict = {'ox': -0.02, 'oy': -0.02, 'label': '_nolegend_'}
if isinstance(shadow, dict):
shadow_dict.update(shadow)
self.add_patch(mpatches.Shadow(w, **shadow_dict))
theta1 = theta2
pc = PieContainer(slices, x, normalize)
if labeldistance is None:
# Insert an empty list of texts for backwards compatibility of the
# return value.
pc.add_texts([])
else:
# Add labels to the wedges.
labels_textprops = {
'fontsize': mpl.rcParams['xtick.labelsize'],
**cbook.normalize_kwargs(textprops or {}, Text)
}
self.pie_label(pc, labels, distance=labeldistance,
alignment='outer', rotate=rotatelabels,
textprops=labels_textprops)
if autopct is not None:
# Add automatic percentage labels to wedges
auto_labels = []
for frac in fracs:
if isinstance(autopct, str):
s = autopct % (100. * frac)
elif callable(autopct):
s = autopct(100. * frac)
else:
raise TypeError(
'autopct must be callable or a format string')
if textprops is not None and mpl._val_or_rc(textprops.get("usetex"),
"text.usetex"):
# escape % (i.e. \%) if it is not already escaped
s = re.sub(r"([^\\])%", r"\1\\%", s)
auto_labels.append(s)
self.pie_label(pc, auto_labels, distance=pctdistance,
alignment='center',
textprops=textprops)
if frame:
self._request_autoscale_view()
else:
self.set(frame_on=False, xticks=[], yticks=[],
xlim=(-1.25 + center[0], 1.25 + center[0]),
ylim=(-1.25 + center[1], 1.25 + center[1]))
return pc
def pie_label(self, container, /, labels, *, distance=0.6,
textprops=None, rotate=False, alignment='auto'):
"""
Label a pie chart.
.. versionadded:: 3.11
Adds labels to wedges in the given `.PieContainer`.
Parameters
----------
container : `.PieContainer`
Container with all the wedges, likely returned from `.pie`.
labels : str or list of str
A sequence of strings providing the labels for each wedge, or a format
string with ``absval`` and/or ``frac`` placeholders. For example, to label
each wedge with its value and the percentage in brackets::
wedge_labels="{absval:d} ({frac:.0%})"
distance : float, default: 0.6
The radial position of the labels, relative to the pie radius. Values > 1
are outside the wedge and values < 1 are inside the wedge.
textprops : dict, default: None
Dict of arguments to pass to the `.Text` objects.
rotate : bool, default: False
Rotate each label to the angle of the corresponding slice if true.
alignment : {'center', 'outer', 'auto'}, default: 'auto'
Controls the horizontal alignment of the text objects relative to their
nominal position.
- 'center': The labels are centered on their points.
- 'outer': Labels are aligned away from the center of the pie, i.e., labels
on the left side of the pie are right-aligned and labels on the right
side are left-aligned.
- 'auto': Translates to 'outer' if *distance* > 1 (so that the labels do not
overlap the wedges) and 'center' if *distance* < 1.
If *rotate* is True, the vertical alignment is also affected in an
analogous way.
- 'center': The labels are centered on their points.
- 'outer': Labels are aligned away from the center of the pie, i.e., labels
on the top half of the pie are bottom-aligned and labels on the bottom
half are top-aligned.
Returns
-------
list
A list of the label `.Text` instances.
"""
_api.check_in_list(['center', 'outer', 'auto'], alignment=alignment)
if alignment == 'auto':
alignment = 'outer' if distance > 1 else 'center'
if textprops is None:
textprops = {}
if isinstance(labels, str):
# Assume we have a format string
labels = [labels.format(absval=val, frac=frac) for val, frac in
zip(container.values, container.fracs)]
if mpl._val_or_rc(textprops.get("usetex"), "text.usetex"):
# escape % (i.e. \%) if it is not already escaped
labels = [re.sub(r"([^\\])%", r"\1\\%", s) for s in labels]
elif (nw := len(container.wedges)) != (nl := len(labels)):
raise ValueError(
f'The number of labels ({nl}) must match the number of wedges ({nw})')
texts = []
for wedge, label in zip(container.wedges, labels):
thetam = 2 * np.pi * 0.5 * (wedge.theta1 + wedge.theta2) / 360
xt = wedge.center[0] + distance * wedge.r * math.cos(thetam)
yt = wedge.center[1] + distance * wedge.r * math.sin(thetam)
if alignment == 'outer':
label_alignment_h = 'left' if xt > 0 else 'right'
else:
label_alignment_h = 'center'
label_alignment_v = 'center'
label_rotation = 'horizontal'
if rotate:
if alignment == 'outer':
label_alignment_v = 'bottom' if yt > 0 else 'top'
label_rotation = (np.rad2deg(thetam) + (0 if xt > 0 else 180))
t = self.text(xt, yt, label, clip_on=False, rotation=label_rotation,
horizontalalignment=label_alignment_h,
verticalalignment=label_alignment_v)
t.set(**textprops)
texts.append(t)
container.add_texts(texts)
return texts
@staticmethod
def _errorevery_to_mask(x, errorevery):
"""
Normalize `errorbar`'s *errorevery* to be a boolean mask for data *x*.
This function is split out to be usable both by 2D and 3D errorbars.
"""
if isinstance(errorevery, Integral):
errorevery = (0, errorevery)
if isinstance(errorevery, tuple):
if (len(errorevery) == 2 and
isinstance(errorevery[0], Integral) and
isinstance(errorevery[1], Integral)):
errorevery = slice(errorevery[0], None, errorevery[1])
else:
raise ValueError(
f'{errorevery=!r} is a not a tuple of two integers')
elif isinstance(errorevery, slice):
pass
elif not isinstance(errorevery, str) and np.iterable(errorevery):
try:
x[errorevery] # fancy indexing
except (ValueError, IndexError) as err:
raise ValueError(
f"{errorevery=!r} is iterable but not a valid NumPy fancy "
"index to match 'xerr'/'yerr'") from err
else:
raise ValueError(f"{errorevery=!r} is not a recognized value")
everymask = np.zeros(len(x), bool)
everymask[errorevery] = True
return everymask
@_api.make_keyword_only("3.10", "ecolor")
@_preprocess_data(replace_names=["x", "y", "xerr", "yerr"],
label_namer="y")
@_docstring.interpd
def errorbar(self, x, y, yerr=None, xerr=None,
fmt='', ecolor=None, elinewidth=None, capsize=None,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, errorevery=1,
capthick=None, elinestyle=None,
**kwargs):
"""
Plot y versus x as lines and/or markers with attached errorbars.
*x*, *y* define the data locations, *xerr*, *yerr* define the errorbar
sizes. By default, this draws the data markers/lines as well as the
errorbars. Use fmt='none' to draw errorbars without any data markers.
.. versionadded:: 3.7
Caps and error lines are drawn in polar coordinates on polar plots.
Parameters
----------
x, y : float or array-like
The data positions.
xerr, yerr : float or array-like, shape(N,) or shape(2, N), optional
The errorbar sizes:
- scalar: Symmetric +/- values for all data points.
- shape(N,): Symmetric +/-values for each data point.
- shape(2, N): Separate - and + values for each bar. First row
contains the lower errors, the second row contains the upper
errors.
- *None*: No errorbar.
All values must be >= 0.
See :doc:`/gallery/statistics/errorbar_features`
for an example on the usage of ``xerr`` and ``yerr``.
fmt : str, default: ''
The format for the data points / data lines. See `.plot` for
details.
Use 'none' (case-insensitive) to plot errorbars without any data
markers.
ecolor : :mpltype:`color`, default: None
The color of the errorbar lines. If None, use the color of the
line connecting the markers.
elinewidth : float, default: None
The linewidth of the errorbar lines. If None, the linewidth of
the current style is used.
elinestyle : str or tuple, default: 'solid'
The linestyle of the errorbar lines.
Valid values for linestyles include {'-', '--', '-.',
':', '', (offset, on-off-seq)}. See `.Line2D.set_linestyle` for a
complete description.
capsize : float, default: :rc:`errorbar.capsize`
The length of the error bar caps in points.
capthick : float, default: None
An alias to the keyword argument *markeredgewidth* (a.k.a. *mew*).
This setting is a more sensible name for the property that
controls the thickness of the error bar cap in points. For
backwards compatibility, if *mew* or *markeredgewidth* are given,
then they will over-ride *capthick*. This may change in future
releases.
barsabove : bool, default: False
If True, will plot the errorbars above the plot
symbols. Default is below.
lolims, uplims, xlolims, xuplims : bool or array-like, default: False
These arguments can be used to indicate that a value gives only
upper/lower limits. In that case a caret symbol is used to
indicate this. *lims*-arguments may be scalars, or array-likes of
the same length as *xerr* and *yerr*. To use limits with inverted
axes, `~.Axes.set_xlim` or `~.Axes.set_ylim` must be called before
:meth:`errorbar`. Note the tricky parameter names: setting e.g.
*lolims* to True means that the y-value is a *lower* limit of the
True value, so, only an *upward*-pointing arrow will be drawn!
errorevery : int or (int, int), default: 1
draws error bars on a subset of the data. *errorevery* =N draws
error bars on the points (x[::N], y[::N]).
*errorevery* =(start, N) draws error bars on the points
(x[start::N], y[start::N]). e.g. errorevery=(6, 3)
adds error bars to the data at (x[6], x[9], x[12], x[15], ...).
Used to avoid overlapping error bars when two series share x-axis
values.
Returns
-------
`.ErrorbarContainer`
The container contains:
- data_line : A `~matplotlib.lines.Line2D` instance of x, y plot markers
and/or line.
- caplines : A tuple of `~matplotlib.lines.Line2D` instances of the error
bar caps.
- barlinecols : A tuple of `.LineCollection` with the horizontal and
vertical error ranges.
Other Parameters
----------------
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs
All other keyword arguments are passed on to the `~.Axes.plot` call
drawing the markers. For example, this code makes big red squares
with thick green edges::
x, y, yerr = rand(3, 10)
errorbar(x, y, yerr, marker='s', mfc='red',
mec='green', ms=20, mew=4)
where *mfc*, *mec*, *ms* and *mew* are aliases for the longer
property names, *markerfacecolor*, *markeredgecolor*, *markersize*
and *markeredgewidth*.
Valid kwargs for the marker properties are:
- *dashes*
- *dash_capstyle*
- *dash_joinstyle*
- *drawstyle*
- *fillstyle*
- *linestyle*
- *marker*
- *markeredgecolor*
- *markeredgewidth*
- *markerfacecolor*
- *markerfacecoloralt*
- *markersize*
- *markevery*
- *solid_capstyle*
- *solid_joinstyle*
Refer to the corresponding `.Line2D` property for more details:
%(Line2D:kwdoc)s
"""
kwargs = cbook.normalize_kwargs(kwargs, mlines.Line2D)
# Drop anything that comes in as None to use the default instead.
kwargs = {k: v for k, v in kwargs.items() if v is not None}
kwargs.setdefault('zorder', 2)
# Casting to object arrays preserves units.
if not isinstance(x, np.ndarray):
x = np.asarray(x, dtype=object)
if not isinstance(y, np.ndarray):
y = np.asarray(y, dtype=object)
def _upcast_err(err):
"""
Safely handle tuple of containers that carry units.
This function covers the case where the input to the xerr/yerr is a
length 2 tuple of equal length ndarray-subclasses that carry the
unit information in the container.
If we have a tuple of nested numpy array (subclasses), we defer
coercing the units to be consistent to the underlying unit
library (and implicitly the broadcasting).
Otherwise, fallback to casting to an object array.
"""
if (
# make sure it is not a scalar
np.iterable(err) and
# and it is not empty
len(err) > 0 and
# and the first element is an array sub-class use
# safe_first_element because getitem is index-first not
# location first on pandas objects so err[0] almost always
# fails.
isinstance(cbook._safe_first_finite(err), np.ndarray)
):
# Get the type of the first element
atype = type(cbook._safe_first_finite(err))
# Promote the outer container to match the inner container
if atype is np.ndarray:
# Converts using np.asarray, because data cannot
# be directly passed to init of np.ndarray
return np.asarray(err, dtype=object)
# If atype is not np.ndarray, directly pass data to init.
# This works for types such as unyts and astropy units
return atype(err)
# Otherwise wrap it in an object array
return np.asarray(err, dtype=object)
if xerr is not None and not isinstance(xerr, np.ndarray):
xerr = _upcast_err(xerr)
if yerr is not None and not isinstance(yerr, np.ndarray):
yerr = _upcast_err(yerr)
x, y = np.atleast_1d(x, y) # Make sure all the args are iterable.
if len(x) != len(y):
raise ValueError("'x' and 'y' must have the same size")
everymask = self._errorevery_to_mask(x, errorevery)
label = kwargs.pop("label", None)
kwargs['label'] = '_nolegend_'
# Create the main line and determine overall kwargs for child artists.
# We avoid calling self.plot() directly, or self._get_lines(), because
# that would call self._process_unit_info again, and do other indirect
# data processing.
(data_line, base_style), = self._get_lines._plot_args(
self, (x, y) if fmt == '' else (x, y, fmt), kwargs, return_kwargs=True)
# Do this after creating `data_line` to avoid modifying `base_style`.
if barsabove:
data_line.set_zorder(kwargs['zorder'] - .1)
else:
data_line.set_zorder(kwargs['zorder'] + .1)
# Add line to plot, or throw it away and use it to determine kwargs.
if fmt.lower() != 'none':
self.add_line(data_line)
else:
data_line = None
# Remove alpha=0 color that _get_lines._plot_args returns for
# 'none' format, and replace it with user-specified color, if
# supplied.
base_style.pop('color')
if 'color' in kwargs:
base_style['color'] = kwargs.pop('color')
if 'color' not in base_style:
base_style['color'] = 'C0'
if ecolor is None:
ecolor = base_style['color']
# Eject any line-specific information from format string, as it's not
# needed for bars or caps.
for key in ['marker', 'markersize', 'markerfacecolor',
'markerfacecoloralt',
'markeredgewidth', 'markeredgecolor', 'markevery',
'linestyle', 'fillstyle', 'drawstyle', 'dash_capstyle',
'dash_joinstyle', 'solid_capstyle', 'solid_joinstyle',
'dashes']:
base_style.pop(key, None)
# Make the style dict for the line collections (the bars).
eb_lines_style = {**base_style, 'color': ecolor}
if elinewidth is not None:
eb_lines_style['linewidth'] = elinewidth
elif 'linewidth' in kwargs:
eb_lines_style['linewidth'] = kwargs['linewidth']
for key in ('transform', 'alpha', 'zorder', 'rasterized'):
if key in kwargs:
eb_lines_style[key] = kwargs[key]
if elinestyle is not None:
eb_lines_style['linestyle'] = elinestyle
# Make the style dict for caps (the "hats").
eb_cap_style = {**base_style, 'linestyle': 'none'}
capsize = mpl._val_or_rc(capsize, "errorbar.capsize")
if capsize > 0:
eb_cap_style['markersize'] = 2. * capsize
if capthick is not None:
eb_cap_style['markeredgewidth'] = capthick
# For backwards-compat, allow explicit setting of
# 'markeredgewidth' to over-ride capthick.
for key in ('markeredgewidth', 'transform', 'alpha',
'zorder', 'rasterized'):
if key in kwargs:
eb_cap_style[key] = kwargs[key]
eb_cap_style["markeredgecolor"] = ecolor
barcols = []
caplines = {'x': [], 'y': []}
# Vectorized fancy-indexer.
def apply_mask(arrays, mask):
return [array[mask] for array in arrays]
# dep: dependent dataset, indep: independent dataset
for (dep_axis, dep, err, lolims, uplims, indep, lines_func,
marker, lomarker, himarker) in [
("x", x, xerr, xlolims, xuplims, y, self.hlines,
"|", mlines.CARETRIGHTBASE, mlines.CARETLEFTBASE),
("y", y, yerr, lolims, uplims, x, self.vlines,
"_", mlines.CARETUPBASE, mlines.CARETDOWNBASE),
]:
if err is None:
continue
lolims = np.broadcast_to(lolims, len(dep)).astype(bool)
uplims = np.broadcast_to(uplims, len(dep)).astype(bool)
try:
np.broadcast_to(err, (2, len(dep)))
except ValueError:
raise ValueError(
f"'{dep_axis}err' (shape: {np.shape(err)}) must be a "
f"scalar or a 1D or (2, n) array-like whose shape matches "
f"'{dep_axis}' (shape: {np.shape(dep)})") from None
if err.dtype is np.dtype(object) and np.any(err == None): # noqa: E711
raise ValueError(
f"'{dep_axis}err' must not contain None. "
"Use NaN if you want to skip a value.")
# Raise if any errors are negative, but not if they are nan.
# To avoid nan comparisons (which lead to warnings on some
# platforms), we select with `err==err` (which is False for nan).
# Also, since datetime.timedelta cannot be compared with 0,
# we compare with the negative error instead.
if np.any((check := err[err == err]) < -check):
raise ValueError(
f"'{dep_axis}err' must not contain negative values")
# This is like
# elow, ehigh = np.broadcast_to(...)
# return dep - elow * ~lolims, dep + ehigh * ~uplims
# except that broadcast_to would strip units.
low, high = dep + np.vstack([-(1 - lolims), 1 - uplims]) * err
barcols.append(lines_func(
*apply_mask([indep, low, high], everymask), **eb_lines_style))
if self.name == "polar" and dep_axis == "x":
for b in barcols:
for p in b.get_paths():
p._interpolation_steps = 2
# Normal errorbars for points without upper/lower limits.
nolims = ~(lolims | uplims)
if nolims.any() and capsize > 0:
indep_masked, lo_masked, hi_masked = apply_mask(
[indep, low, high], nolims & everymask)
for lh_masked in [lo_masked, hi_masked]:
# Since this has to work for x and y as dependent data, we
# first set both x and y to the independent variable and
# overwrite the respective dependent data in a second step.
line = mlines.Line2D(indep_masked, indep_masked,
marker=marker, **eb_cap_style)
line.set(**{f"{dep_axis}data": lh_masked})
caplines[dep_axis].append(line)
for idx, (lims, hl) in enumerate([(lolims, high), (uplims, low)]):
if not lims.any():
continue
hlmarker = (
himarker
if self._axis_map[dep_axis].get_inverted() ^ idx
else lomarker)
x_masked, y_masked, hl_masked = apply_mask(
[x, y, hl], lims & everymask)
# As above, we set the dependent data in a second step.
line = mlines.Line2D(x_masked, y_masked,
marker=hlmarker, **eb_cap_style)
line.set(**{f"{dep_axis}data": hl_masked})
caplines[dep_axis].append(line)
if capsize > 0:
caplines[dep_axis].append(mlines.Line2D(
x_masked, y_masked, marker=marker, **eb_cap_style))
if self.name == 'polar':
trans_shift = self.transShift
for axis in caplines:
for l in caplines[axis]:
# Rotate caps to be perpendicular to the error bars
for theta, r in zip(l.get_xdata(), l.get_ydata()):
rotation = _ScaledRotation(theta=theta, trans_shift=trans_shift)
if axis == 'y':
rotation += mtransforms.Affine2D().rotate(np.pi / 2)
ms = mmarkers.MarkerStyle(marker=marker,
transform=rotation)
self.add_line(mlines.Line2D([theta], [r], marker=ms,
**eb_cap_style))
else:
for axis in caplines:
for l in caplines[axis]:
self.add_line(l)
self._request_autoscale_view()
caplines = caplines['x'] + caplines['y']
errorbar_container = ErrorbarContainer(
(data_line, tuple(caplines), tuple(barcols)),
has_xerr=(xerr is not None), has_yerr=(yerr is not None),
label=label)
self.add_container(errorbar_container)
return errorbar_container # (l0, caplines, barcols)
@_api.make_keyword_only("3.10", "notch")
@_preprocess_data()
@_api.rename_parameter("3.9", "labels", "tick_labels")
def boxplot(self, x, notch=None, sym=None, vert=None,
orientation='vertical', whis=None, positions=None,
widths=None, patch_artist=None, bootstrap=None,
usermedians=None, conf_intervals=None,
meanline=None, showmeans=None, showcaps=None,
showbox=None, showfliers=None, boxprops=None,
tick_labels=None, flierprops=None, medianprops=None,
meanprops=None, capprops=None, whiskerprops=None,
manage_ticks=True, autorange=False, zorder=None,
capwidths=None, label=None):
"""
Draw a box and whisker plot.
The box extends from the first quartile (Q1) to the third
quartile (Q3) of the data, with a line at the median.
The whiskers extend from the box to the farthest data point
lying within 1.5x the inter-quartile range (IQR) from the box.
Flier points are those past the end of the whiskers.
See https://en.wikipedia.org/wiki/Box_plot for reference.
.. code-block:: none
Q1-1.5IQR Q1 median Q3 Q3+1.5IQR
|-----:-----|
o |--------| : |--------| o o
|-----:-----|
flier <-----------> fliers
IQR
Parameters
----------
x : Array or a sequence of vectors.
The input data. If a 2D array, a boxplot is drawn for each column
in *x*. If a sequence of 1D arrays, a boxplot is drawn for each
array in *x*.
notch : bool, default: :rc:`boxplot.notch`
Whether to draw a notched boxplot (`True`), or a rectangular
boxplot (`False`). The notches represent the confidence interval
(CI) around the median. The documentation for *bootstrap*
describes how the locations of the notches are computed by
default, but their locations may also be overridden by setting the
*conf_intervals* parameter.
.. note::
In cases where the values of the CI are less than the
lower quartile or greater than the upper quartile, the
notches will extend beyond the box, giving it a
distinctive "flipped" appearance. This is expected
behavior and consistent with other statistical
visualization packages.
sym : str, optional
The default symbol for flier points. An empty string ('') hides
the fliers. If `None`, then the fliers default to 'b+'. More
control is provided by the *flierprops* parameter.
vert : bool, optional
.. deprecated:: 3.11
Use *orientation* instead.
If this is given during the deprecation period, it overrides
the *orientation* parameter.
If True, plots the boxes vertically.
If False, plots the boxes horizontally.
orientation : {'vertical', 'horizontal'}, default: 'vertical'
If 'horizontal', plots the boxes horizontally.
Otherwise, plots the boxes vertically.
.. versionadded:: 3.10
whis : float or (float, float), default: 1.5
The position of the whiskers.
If a float, the lower whisker is at the lowest datum above
``Q1 - whis*(Q3-Q1)``, and the upper whisker at the highest datum
below ``Q3 + whis*(Q3-Q1)``, where Q1 and Q3 are the first and
third quartiles. The default value of ``whis = 1.5`` corresponds
to Tukey's original definition of boxplots.
If a pair of floats, they indicate the percentiles at which to
draw the whiskers (e.g., (5, 95)). In particular, setting this to
(0, 100) results in whiskers covering the whole range of the data.
In the edge case where ``Q1 == Q3``, *whis* is automatically set
to (0, 100) (cover the whole range of the data) if *autorange* is
True.
Beyond the whiskers, data are considered outliers and are plotted
as individual points.
bootstrap : int, optional
Specifies whether to bootstrap the confidence intervals
around the median for notched boxplots. If *bootstrap* is
None, no bootstrapping is performed, and notches are
calculated using a Gaussian-based asymptotic approximation
(see McGill, R., Tukey, J.W., and Larsen, W.A., 1978, and
Kendall and Stuart, 1967). Otherwise, bootstrap specifies
the number of times to bootstrap the median to determine its
95% confidence intervals. Values between 1000 and 10000 are
recommended.
usermedians : 1D array-like, optional
A 1D array-like of length ``len(x)``. Each entry that is not
`None` forces the value of the median for the corresponding
dataset. For entries that are `None`, the medians are computed
by Matplotlib as normal.
conf_intervals : array-like, optional
A 2D array-like of shape ``(len(x), 2)``. Each entry that is not
None forces the location of the corresponding notch (which is
only drawn if *notch* is `True`). For entries that are `None`,
the notches are computed by the method specified by the other
parameters (e.g., *bootstrap*).
positions : array-like, optional
The positions of the boxes. The ticks and limits are
automatically set to match the positions. Defaults to
``range(1, N+1)`` where N is the number of boxes to be drawn.
widths : float or array-like
The widths of the boxes. The default is 0.5, or ``0.15*(distance
between extreme positions)``, if that is smaller.
patch_artist : bool, default: :rc:`boxplot.patchartist`
If `False` produces boxes with the Line2D artist. Otherwise,
boxes are drawn with Patch artists.
tick_labels : list of str, optional
The tick labels of each boxplot.
Ticks are always placed at the box *positions*. If *tick_labels* is given,
the ticks are labelled accordingly. Otherwise, they keep their numeric
values.
.. versionchanged:: 3.9
Renamed from *labels*, which is deprecated since 3.9
and will be removed in 3.11.
manage_ticks : bool, default: True
If True, the tick locations and labels will be adjusted to match
the boxplot positions.
autorange : bool, default: False
When `True` and the data are distributed such that the 25th and
75th percentiles are equal, *whis* is set to (0, 100) such
that the whisker ends are at the minimum and maximum of the data.
meanline : bool, default: :rc:`boxplot.meanline`
If `True` (and *showmeans* is `True`), will try to render the
mean as a line spanning the full width of the box according to
*meanprops* (see below). Not recommended if *shownotches* is also
True. Otherwise, means will be shown as points.
zorder : float, default: ``Line2D.zorder = 2``
The zorder of the boxplot.
Returns
-------
dict
A dictionary mapping each component of the boxplot to a list
of the `.Line2D` instances created. That dictionary has the
following keys (assuming vertical boxplots):
- ``boxes``: the main body of the boxplot showing the
quartiles and the median's confidence intervals if
enabled.
- ``medians``: horizontal lines at the median of each box.
- ``whiskers``: the vertical lines extending to the most
extreme, non-outlier data points.
- ``caps``: the horizontal lines at the ends of the
whiskers.
- ``fliers``: points representing data that extend beyond
the whiskers (fliers).
- ``means``: points or lines representing the means.
Other Parameters
----------------
showcaps : bool, default: :rc:`boxplot.showcaps`
Show the caps on the ends of whiskers.
showbox : bool, default: :rc:`boxplot.showbox`
Show the central box.
showfliers : bool, default: :rc:`boxplot.showfliers`
Show the outliers beyond the caps.
showmeans : bool, default: :rc:`boxplot.showmeans`
Show the arithmetic means.
capprops : dict, default: None
The style of the caps.
capwidths : float or array, default: None
The widths of the caps.
boxprops : dict, default: None
The style of the box.
whiskerprops : dict, default: None
The style of the whiskers.
flierprops : dict, default: None
The style of the fliers.
medianprops : dict, default: None
The style of the median.
meanprops : dict, default: None
The style of the mean.
label : str or list of str, optional
Legend labels. Use a single string when all boxes have the same style and
you only want a single legend entry for them. Use a list of strings to
label all boxes individually. To be distinguishable, the boxes should be
styled individually, which is currently only possible by modifying the
returned artists, see e.g. :doc:`/gallery/statistics/boxplot_demo`.
In the case of a single string, the legend entry will technically be
associated with the first box only. By default, the legend will show the
median line (``result["medians"]``); if *patch_artist* is True, the legend
will show the box `.Patch` artists (``result["boxes"]``) instead.
.. versionadded:: 3.9
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
See Also
--------
.Axes.bxp : Draw a boxplot from pre-computed statistics.
violinplot : Draw an estimate of the probability density function.
"""
# Missing arguments default to rcParams.
whis = mpl._val_or_rc(whis, 'boxplot.whiskers')
bootstrap = mpl._val_or_rc(bootstrap, 'boxplot.bootstrap')
bxpstats = cbook.boxplot_stats(x, whis=whis, bootstrap=bootstrap,
labels=tick_labels, autorange=autorange)
notch = mpl._val_or_rc(notch, 'boxplot.notch')
patch_artist = mpl._val_or_rc(patch_artist, 'boxplot.patchartist')
meanline = mpl._val_or_rc(meanline, 'boxplot.meanline')
showmeans = mpl._val_or_rc(showmeans, 'boxplot.showmeans')
showcaps = mpl._val_or_rc(showcaps, 'boxplot.showcaps')
showbox = mpl._val_or_rc(showbox, 'boxplot.showbox')
showfliers = mpl._val_or_rc(showfliers, 'boxplot.showfliers')
if boxprops is None:
boxprops = {}
if whiskerprops is None:
whiskerprops = {}
if capprops is None:
capprops = {}
if medianprops is None:
medianprops = {}
if meanprops is None:
meanprops = {}
if flierprops is None:
flierprops = {}
if patch_artist:
boxprops['linestyle'] = 'solid' # Not consistent with bxp.
if 'color' in boxprops:
boxprops['edgecolor'] = boxprops.pop('color')
# if non-default sym value, put it into the flier dictionary
# the logic for providing the default symbol ('b+') now lives
# in bxp in the initial value of flierkw
# handle all of the *sym* related logic here so we only have to pass
# on the flierprops dict.
if sym is not None:
# no-flier case, which should really be done with
# 'showfliers=False' but none-the-less deal with it to keep back
# compatibility
if sym == '':
# blow away existing dict and make one for invisible markers
flierprops = dict(linestyle='none', marker='', color='none')
# turn the fliers off just to be safe
showfliers = False
# now process the symbol string
else:
# process the symbol string
# discarded linestyle
_, marker, color = _process_plot_format(sym)
# if we have a marker, use it
if marker is not None:
flierprops['marker'] = marker
# if we have a color, use it
if color is not None:
# assume that if color is passed in the user want
# filled symbol, if the users want more control use
# flierprops
flierprops['color'] = color
flierprops['markerfacecolor'] = color
flierprops['markeredgecolor'] = color
# replace medians if necessary:
if usermedians is not None:
if (len(np.ravel(usermedians)) != len(bxpstats) or
np.shape(usermedians)[0] != len(bxpstats)):
raise ValueError(
"'usermedians' and 'x' have different lengths")
else:
# reassign medians as necessary
for stats, med in zip(bxpstats, usermedians):
if med is not None:
stats['med'] = med
if conf_intervals is not None:
if len(conf_intervals) != len(bxpstats):
raise ValueError(
"'conf_intervals' and 'x' have different lengths")
else:
for stats, ci in zip(bxpstats, conf_intervals):
if ci is not None:
if len(ci) != 2:
raise ValueError('each confidence interval must '
'have two values')
else:
if ci[0] is not None:
stats['cilo'] = ci[0]
if ci[1] is not None:
stats['cihi'] = ci[1]
artists = self.bxp(bxpstats, positions=positions, widths=widths,
vert=vert, patch_artist=patch_artist,
shownotches=notch, showmeans=showmeans,
showcaps=showcaps, showbox=showbox,
boxprops=boxprops, flierprops=flierprops,
medianprops=medianprops, meanprops=meanprops,
meanline=meanline, showfliers=showfliers,
capprops=capprops, whiskerprops=whiskerprops,
manage_ticks=manage_ticks, zorder=zorder,
capwidths=capwidths, label=label,
orientation=orientation)
return artists
@_api.make_keyword_only("3.10", "widths")
def bxp(self, bxpstats, positions=None, widths=None, vert=None,
orientation='vertical', patch_artist=False, shownotches=False,
showmeans=False, showcaps=True, showbox=True, showfliers=True,
boxprops=None, whiskerprops=None, flierprops=None,
medianprops=None, capprops=None, meanprops=None,
meanline=False, manage_ticks=True, zorder=None,
capwidths=None, label=None):
"""
Draw a box and whisker plot from pre-computed statistics.
The box extends from the first quartile *q1* to the third
quartile *q3* of the data, with a line at the median (*med*).
The whiskers extend from *whislow* to *whishi*.
Flier points are markers past the end of the whiskers.
See https://en.wikipedia.org/wiki/Box_plot for reference.
.. code-block:: none
whislow q1 med q3 whishi
|-----:-----|
o |--------| : |--------| o o
|-----:-----|
flier fliers
.. note::
This is a low-level drawing function for when you already
have the statistical parameters. If you want a boxplot based
on a dataset, use `~.Axes.boxplot` instead.
Parameters
----------
bxpstats : list of dicts
A list of dictionaries containing stats for each boxplot.
Required keys are:
- ``med``: Median (float).
- ``q1``, ``q3``: First & third quartiles (float).
- ``whislo``, ``whishi``: Lower & upper whisker positions (float).
Optional keys are:
- ``mean``: Mean (float). Needed if ``showmeans=True``.
- ``fliers``: Data beyond the whiskers (array-like).
Needed if ``showfliers=True``.
- ``cilo``, ``cihi``: Lower & upper confidence intervals
about the median. Needed if ``shownotches=True``.
- ``label``: Name of the dataset (str). If available,
this will be used a tick label for the boxplot
positions : array-like, default: [1, 2, ..., n]
The positions of the boxes. The ticks and limits
are automatically set to match the positions.
widths : float or array-like, default: None
The widths of the boxes. The default is
``clip(0.15*(distance between extreme positions), 0.15, 0.5)``.
capwidths : float or array-like, default: None
Either a scalar or a vector and sets the width of each cap.
The default is ``0.5*(width of the box)``, see *widths*.
vert : bool, optional
.. deprecated:: 3.11
Use *orientation* instead.
If this is given during the deprecation period, it overrides
the *orientation* parameter.
If True, plots the boxes vertically.
If False, plots the boxes horizontally.
orientation : {'vertical', 'horizontal'}, default: 'vertical'
If 'horizontal', plots the boxes horizontally.
Otherwise, plots the boxes vertically.
.. versionadded:: 3.10
patch_artist : bool, default: False
If `False` produces boxes with the `.Line2D` artist.
If `True` produces boxes with the `~matplotlib.patches.Patch` artist.
shownotches, showmeans, showcaps, showbox, showfliers : bool
Whether to draw the CI notches, the mean value (both default to
False), the caps, the box, and the fliers (all three default to
True).
boxprops, whiskerprops, capprops, flierprops, medianprops, meanprops :\
dict, optional
Artist properties for the boxes, whiskers, caps, fliers, medians, and
means.
meanline : bool, default: False
If `True` (and *showmeans* is `True`), will try to render the mean
as a line spanning the full width of the box according to
*meanprops*. Not recommended if *shownotches* is also True.
Otherwise, means will be shown as points.
manage_ticks : bool, default: True
If True, the tick locations and labels will be adjusted to match the
boxplot positions.
label : str or list of str, optional
Legend labels. Use a single string when all boxes have the same style and
you only want a single legend entry for them. Use a list of strings to
label all boxes individually. To be distinguishable, the boxes should be
styled individually, which is currently only possible by modifying the
returned artists, see e.g. :doc:`/gallery/statistics/boxplot_demo`.
In the case of a single string, the legend entry will technically be
associated with the first box only. By default, the legend will show the
median line (``result["medians"]``); if *patch_artist* is True, the legend
will show the box `.Patch` artists (``result["boxes"]``) instead.
.. versionadded:: 3.9
zorder : float, default: ``Line2D.zorder = 2``
The zorder of the resulting boxplot.
Returns
-------
dict
A dictionary mapping each component of the boxplot to a list
of the `.Line2D` instances created. That dictionary has the
following keys (assuming vertical boxplots):
- ``boxes``: main bodies of the boxplot showing the quartiles, and
the median's confidence intervals if enabled.
- ``medians``: horizontal lines at the median of each box.
- ``whiskers``: vertical lines up to the last non-outlier data.
- ``caps``: horizontal lines at the ends of the whiskers.
- ``fliers``: points representing data beyond the whiskers (fliers).
- ``means``: points or lines representing the means.
See Also
--------
boxplot : Draw a boxplot from data instead of pre-computed statistics.
"""
# Clamp median line to edge of box by default.
medianprops = {
"solid_capstyle": "butt",
"dash_capstyle": "butt",
**(medianprops or {}),
}
meanprops = {
"solid_capstyle": "butt",
"dash_capstyle": "butt",
**(meanprops or {}),
}
# lists of artists to be output
whiskers = []
caps = []
boxes = []
medians = []
means = []
fliers = []
# empty list of xticklabels
datalabels = []
# Use default zorder if none specified
if zorder is None:
zorder = mlines.Line2D.zorder
zdelta = 0.1
def merge_kw_rc(subkey, explicit, zdelta=0, usemarker=True):
d = {k.split('.')[-1]: v for k, v in mpl.rcParams.items()
if k.startswith(f'boxplot.{subkey}props')}
d['zorder'] = zorder + zdelta
if not usemarker:
d['marker'] = ''
d.update(cbook.normalize_kwargs(explicit, mlines.Line2D))
return d
box_kw = {
'linestyle': mpl.rcParams['boxplot.boxprops.linestyle'],
'linewidth': mpl.rcParams['boxplot.boxprops.linewidth'],
'edgecolor': mpl.rcParams['boxplot.boxprops.color'],
'facecolor': ('white' if mpl.rcParams['_internal.classic_mode']
else mpl.rcParams['patch.facecolor']),
'zorder': zorder,
**cbook.normalize_kwargs(boxprops, mpatches.PathPatch)
} if patch_artist else merge_kw_rc('box', boxprops, usemarker=False)
whisker_kw = merge_kw_rc('whisker', whiskerprops, usemarker=False)
cap_kw = merge_kw_rc('cap', capprops, usemarker=False)
flier_kw = merge_kw_rc('flier', flierprops)
median_kw = merge_kw_rc('median', medianprops, zdelta, usemarker=False)
mean_kw = merge_kw_rc('mean', meanprops, zdelta)
removed_prop = 'marker' if meanline else 'linestyle'
# Only remove the property if it's not set explicitly as a parameter.
if meanprops is None or removed_prop not in meanprops:
mean_kw[removed_prop] = ''
# vert and orientation parameters are linked until vert's
# deprecation period expires. vert only takes precedence
# if set to False.
if vert is None:
vert = mpl.rcParams['boxplot.vertical']
else:
_api.warn_deprecated(
"3.11",
name="vert: bool",
alternative="orientation: {'vertical', 'horizontal'}",
)
if vert is False:
orientation = 'horizontal'
_api.check_in_list(['horizontal', 'vertical'], orientation=orientation)
if not mpl.rcParams['boxplot.vertical']:
_api.warn_deprecated(
"3.10",
name='boxplot.vertical', obj_type="rcparam"
)
# vertical or horizontal plot?
maybe_swap = slice(None) if orientation == 'vertical' else slice(None, None, -1)
def do_plot(xs, ys, **kwargs):
return self.plot(*[xs, ys][maybe_swap], **kwargs)[0]
def do_patch(xs, ys, **kwargs):
path = mpath.Path._create_closed(
np.column_stack([xs, ys][maybe_swap]))
patch = mpatches.PathPatch(path, **kwargs)
self.add_artist(patch)
return patch
# input validation
N = len(bxpstats)
datashape_message = ("List of boxplot statistics and `{0}` "
"values must have same the length")
# check position
if positions is None:
positions = list(range(1, N + 1))
elif len(positions) != N:
raise ValueError(datashape_message.format("positions"))
positions = np.array(positions)
if len(positions) > 0 and not all(isinstance(p, Real) for p in positions):
raise TypeError("positions should be an iterable of numbers")
# width
if widths is None:
widths = [np.clip(0.15 * np.ptp(positions), 0.15, 0.5)] * N
elif np.isscalar(widths):
widths = [widths] * N
elif len(widths) != N:
raise ValueError(datashape_message.format("widths"))
# capwidth
if capwidths is None:
capwidths = 0.5 * np.array(widths)
elif np.isscalar(capwidths):
capwidths = [capwidths] * N
elif len(capwidths) != N:
raise ValueError(datashape_message.format("capwidths"))
for pos, width, stats, capwidth in zip(positions, widths, bxpstats,
capwidths):
# try to find a new label
datalabels.append(stats.get('label', pos))
# whisker coords
whis_x = [pos, pos]
whislo_y = [stats['q1'], stats['whislo']]
whishi_y = [stats['q3'], stats['whishi']]
# cap coords
cap_left = pos - capwidth * 0.5
cap_right = pos + capwidth * 0.5
cap_x = [cap_left, cap_right]
cap_lo = np.full(2, stats['whislo'])
cap_hi = np.full(2, stats['whishi'])
# box and median coords
box_left = pos - width * 0.5
box_right = pos + width * 0.5
med_y = [stats['med'], stats['med']]
# notched boxes
if shownotches:
notch_left = pos - width * 0.25
notch_right = pos + width * 0.25
box_x = [box_left, box_right, box_right, notch_right,
box_right, box_right, box_left, box_left, notch_left,
box_left, box_left]
box_y = [stats['q1'], stats['q1'], stats['cilo'],
stats['med'], stats['cihi'], stats['q3'],
stats['q3'], stats['cihi'], stats['med'],
stats['cilo'], stats['q1']]
med_x = [notch_left, notch_right]
# plain boxes
else:
box_x = [box_left, box_right, box_right, box_left, box_left]
box_y = [stats['q1'], stats['q1'], stats['q3'], stats['q3'],
stats['q1']]
med_x = [box_left, box_right]
# maybe draw the box
if showbox:
do_box = do_patch if patch_artist else do_plot
boxes.append(do_box(box_x, box_y, **box_kw))
median_kw.setdefault('label', '_nolegend_')
# draw the whiskers
whisker_kw.setdefault('label', '_nolegend_')
whiskers.append(do_plot(whis_x, whislo_y, **whisker_kw))
whiskers.append(do_plot(whis_x, whishi_y, **whisker_kw))
# maybe draw the caps
if showcaps:
cap_kw.setdefault('label', '_nolegend_')
caps.append(do_plot(cap_x, cap_lo, **cap_kw))
caps.append(do_plot(cap_x, cap_hi, **cap_kw))
# draw the medians
medians.append(do_plot(med_x, med_y, **median_kw))
# maybe draw the means
if showmeans:
if meanline:
means.append(do_plot(
[box_left, box_right], [stats['mean'], stats['mean']],
**mean_kw
))
else:
means.append(do_plot([pos], [stats['mean']], **mean_kw))
# maybe draw the fliers
if showfliers:
flier_kw.setdefault('label', '_nolegend_')
flier_x = np.full(len(stats['fliers']), pos, dtype=np.float64)
flier_y = stats['fliers']
fliers.append(do_plot(flier_x, flier_y, **flier_kw))
# Set legend labels
if label:
box_or_med = boxes if showbox and patch_artist else medians
if cbook.is_scalar_or_string(label):
# assign the label only to the first box
box_or_med[0].set_label(label)
else: # label is a sequence
if len(box_or_med) != len(label):
raise ValueError(datashape_message.format("label"))
for artist, lbl in zip(box_or_med, label):
artist.set_label(lbl)
if manage_ticks:
axis_name = "x" if orientation == 'vertical' else "y"
interval = getattr(self.dataLim, f"interval{axis_name}")
axis = self._axis_map[axis_name]
positions = axis.convert_units(positions)
# The 0.5 additional padding ensures reasonable-looking boxes
# even when drawing a single box. We set the sticky edge to
# prevent margins expansion, in order to match old behavior (back
# when separate calls to boxplot() would completely reset the axis
# limits regardless of what was drawn before). The sticky edges
# are attached to the median lines, as they are always present.
interval[:] = (min(interval[0], min(positions) - .5),
max(interval[1], max(positions) + .5))
for median, position in zip(medians, positions):
getattr(median.sticky_edges, axis_name).extend(
[position - .5, position + .5])
# Modified from Axis.set_ticks and Axis.set_ticklabels.
locator = axis.get_major_locator()
if not isinstance(axis.get_major_locator(),
mticker.FixedLocator):
locator = mticker.FixedLocator([])
axis.set_major_locator(locator)
locator.locs = np.array([*locator.locs, *positions])
formatter = axis.get_major_formatter()
if not isinstance(axis.get_major_formatter(),
mticker.FixedFormatter):
formatter = mticker.FixedFormatter([])
axis.set_major_formatter(formatter)
formatter.seq = [*formatter.seq, *datalabels]
self._request_autoscale_view()
return dict(whiskers=whiskers, caps=caps, boxes=boxes,
medians=medians, fliers=fliers, means=means)
@staticmethod
def _parse_scatter_color_args(c, edgecolors, kwargs, xsize,
get_next_color_func):
"""
Helper function to process color related arguments of `.Axes.scatter`.
Argument precedence for facecolors:
- c (if not None)
- kwargs['facecolor']
- kwargs['facecolors']
- kwargs['color'] (==kwcolor)
- 'b' if in classic mode else the result of ``get_next_color_func()``
Argument precedence for edgecolors:
- kwargs['edgecolor']
- edgecolors (is an explicit kw argument in scatter())
- kwargs['color'] (==kwcolor)
- 'face' if not in classic mode else None
Parameters
----------
c : :mpltype:`color` or array-like or list of :mpltype:`color` or None
See argument description of `.Axes.scatter`.
edgecolors : :mpltype:`color` or sequence of color or {'face', 'none'} or None
See argument description of `.Axes.scatter`.
kwargs : dict
Additional kwargs. If these keys exist, we pop and process them:
'facecolors', 'facecolor', 'edgecolor', 'color'
Note: The dict is modified by this function.
xsize : int
The size of the x and y arrays passed to `.Axes.scatter`.
get_next_color_func : callable
A callable that returns a color. This color is used as facecolor
if no other color is provided.
Note, that this is a function rather than a fixed color value to
support conditional evaluation of the next color. As of the
current implementation obtaining the next color from the
property cycle advances the cycle. This must only happen if we
actually use the color, which will only be decided within this
method.
Returns
-------
c
The input *c* if it was not *None*, else a color derived from the
other inputs or defaults.
colors : array(N, 4) or None
The facecolors as RGBA values, or *None* if a colormap is used.
edgecolors
The edgecolor.
"""
facecolors = kwargs.pop('facecolors', None)
facecolors = kwargs.pop('facecolor', facecolors)
edgecolors = kwargs.pop('edgecolor', edgecolors)
kwcolor = kwargs.pop('color', None)
if kwcolor is not None and c is not None:
raise ValueError("Supply a 'c' argument or a 'color'"
" kwarg but not both; they differ but"
" their functionalities overlap.")
if kwcolor is not None:
try:
mcolors.to_rgba_array(kwcolor)
except ValueError as err:
raise ValueError(
"'color' kwarg must be a color or sequence of color "
"specs. For a sequence of values to be color-mapped, use "
"the 'c' argument instead.") from err
if edgecolors is None:
edgecolors = kwcolor
if facecolors is None:
facecolors = kwcolor
if edgecolors is None and not mpl.rcParams['_internal.classic_mode']:
edgecolors = mpl.rcParams['scatter.edgecolors']
# Raise a warning if both `c` and `facecolor` are set (issue #24404).
if c is not None and facecolors is not None:
_api.warn_external(
"You passed both c and facecolor/facecolors for the markers. "
"c has precedence over facecolor/facecolors. "
"This behavior may change in the future."
)
c_was_none = c is None
if c is None:
c = (facecolors if facecolors is not None
else "b" if mpl.rcParams['_internal.classic_mode']
else get_next_color_func())
c_is_string_or_strings = (
isinstance(c, str)
or (np.iterable(c) and len(c) > 0
and isinstance(cbook._safe_first_finite(c), str)))
def invalid_shape_exception(csize, xsize):
return ValueError(
f"'c' argument has {csize} elements, which is inconsistent "
f"with 'x' and 'y' with size {xsize}.")
c_is_mapped = False # Unless proven otherwise below.
valid_shape = True # Unless proven otherwise below.
if not c_was_none and kwcolor is None and not c_is_string_or_strings:
try: # First, does 'c' look suitable for value-mapping?
c = np.asanyarray(c, dtype=float)
except ValueError:
pass # Failed to convert to float array; must be color specs.
else:
# handle the documented special case of a 2D array with 1
# row which as RGB(A) to broadcast.
if c.shape == (1, 4) or c.shape == (1, 3):
c_is_mapped = False
if c.size != xsize:
valid_shape = False
# If c can be either mapped values or an RGB(A) color, prefer
# the former if shapes match, the latter otherwise.
elif c.size == xsize:
c = c.ravel()
c_is_mapped = True
else: # Wrong size; it must not be intended for mapping.
if c.shape in ((3,), (4,)):
_api.warn_external(
"*c* argument looks like a single numeric RGB or "
"RGBA sequence, which should be avoided as value-"
"mapping will have precedence in case its length "
"matches with *x* & *y*. Please use the *color* "
"keyword-argument or provide a 2D array "
"with a single row if you intend to specify "
"the same RGB or RGBA value for all points.")
valid_shape = False
if not c_is_mapped:
try: # Is 'c' acceptable as PathCollection facecolors?
colors = mcolors.to_rgba_array(c)
except (TypeError, ValueError) as err:
if "RGBA values should be within 0-1 range" in str(err):
raise
else:
if not valid_shape:
raise invalid_shape_exception(c.size, xsize) from err
# Both the mapping *and* the RGBA conversion failed: pretty
# severe failure => one may appreciate a verbose feedback.
raise ValueError(
f"'c' argument must be a color, a sequence of colors, "
f"or a sequence of numbers, not {c!r}") from err
else:
if len(colors) not in (0, 1, xsize):
# NB: remember that a single color is also acceptable.
# Besides *colors* will be an empty array if c == 'none'.
raise invalid_shape_exception(len(colors), xsize)
else:
colors = None # use cmap, norm after collection is created
return c, colors, edgecolors
@_api.make_keyword_only("3.10", "marker")
@_preprocess_data(replace_names=["x", "y", "s", "linewidths",
"edgecolors", "c", "facecolor",
"facecolors", "color"],
label_namer="y")
@_docstring.interpd
def scatter(self, x, y, s=None, c=None, marker=None, cmap=None, norm=None,
vmin=None, vmax=None, alpha=None, linewidths=None, *,
edgecolors=None, colorizer=None, plotnonfinite=False, **kwargs):
"""
A scatter plot of *y* vs. *x* with varying marker size and/or color.
Parameters
----------
x, y : float or array-like, shape (n, )
The data positions.
s : float or array-like, shape (n, ), optional
The marker size in points**2 (typographic points are 1/72 in.).
Default is ``rcParams['lines.markersize'] ** 2``.
The linewidth and edgecolor can visually interact with the marker
size, and can lead to artifacts if the marker size is smaller than
the linewidth.
If the linewidth is greater than 0 and the edgecolor is anything
but *'none'*, then the effective size of the marker will be
increased by half the linewidth because the stroke will be centered
on the edge of the shape.
To eliminate the marker edge either set *linewidth=0* or
*edgecolor='none'*.
c : array-like or list of :mpltype:`color` or :mpltype:`color`, optional
The marker colors. Possible values:
- A scalar or sequence of n numbers to be mapped to colors using
*cmap* and *norm*.
- A 2D array in which the rows are RGB or RGBA.
- A sequence of colors of length n.
- A single color format string.
Note that *c* should not be a single numeric RGB or RGBA sequence
because that is indistinguishable from an array of values to be
colormapped. If you want to specify the same RGB or RGBA value for
all points, use a 2D array with a single row. Otherwise,
value-matching will have precedence in case of a size matching with
*x* and *y*.
If you wish to specify a single color for all points
prefer the *color* keyword argument.
Defaults to `None`. In that case the marker color is determined
by the value of *color*, *facecolor* or *facecolors*. In case
those are not specified or `None`, the marker color is determined
by the next color of the ``Axes``' current "shape and fill" color
cycle. This cycle defaults to :rc:`axes.prop_cycle`.
marker : `~.markers.MarkerStyle`, default: :rc:`scatter.marker`
The marker style. *marker* can be either an instance of the class
or the text shorthand for a particular marker.
See :mod:`matplotlib.markers` for more information about marker
styles.
%(cmap_doc)s
This parameter is ignored if *c* is RGB(A).
%(norm_doc)s
This parameter is ignored if *c* is RGB(A).
%(vmin_vmax_doc)s
This parameter is ignored if *c* is RGB(A).
alpha : float, default: None
The alpha blending value, between 0 (transparent) and 1 (opaque).
linewidths : float or array-like, default: :rc:`lines.linewidth`
The linewidth of the marker edges. Note: The default *edgecolors*
is 'face'. You may want to change this as well.
edgecolors : {'face', 'none', *None*} or :mpltype:`color` or list of \
:mpltype:`color`, default: :rc:`scatter.edgecolors`
The edge color of the marker. Possible values:
- 'face': The edge color will always be the same as the face color.
- 'none': No patch boundary will be drawn.
- A color or sequence of colors.
For non-filled markers, *edgecolors* is ignored. Instead, the color
is determined like with 'face', i.e. from *c*, *colors*, or
*facecolors*.
%(colorizer_doc)s
This parameter is ignored if *c* is RGB(A).
plotnonfinite : bool, default: False
Whether to plot points with nonfinite *c* (i.e. ``inf``, ``-inf``
or ``nan``). If ``True`` the points are drawn with the *bad*
colormap color (see `.Colormap.set_bad`).
Returns
-------
`~matplotlib.collections.PathCollection`
Other Parameters
----------------
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs : `~matplotlib.collections.PathCollection` properties
%(PathCollection:kwdoc)s
See Also
--------
plot : To plot scatter plots when markers are identical in size and
color.
Notes
-----
* The `.plot` function will be faster for scatterplots where markers
don't vary in size or color.
* Any or all of *x*, *y*, *s*, and *c* may be masked arrays, in which
case all masks will be combined and only unmasked points will be
plotted.
* Fundamentally, scatter works with 1D arrays; *x*, *y*, *s*, and *c*
may be input as N-D arrays, but within scatter they will be
flattened. The exception is *c*, which will be flattened only if its
size matches the size of *x* and *y*.
"""
# add edgecolors and linewidths to kwargs so they
# can be processed by normailze_kwargs
if edgecolors is not None:
kwargs.update({'edgecolors': edgecolors})
if linewidths is not None:
kwargs.update({'linewidths': linewidths})
kwargs = cbook.normalize_kwargs(kwargs, mcoll.Collection)
# re direct linewidth and edgecolor so it can be
# further processed by the rest of the function
linewidths = kwargs.pop('linewidth', None)
edgecolors = kwargs.pop('edgecolor', None)
# Process **kwargs to handle aliases, conflicts with explicit kwargs:
x, y = self._process_unit_info([("x", x), ("y", y)], kwargs)
# np.ma.ravel yields an ndarray, not a masked array,
# unless its argument is a masked array.
x = np.ma.ravel(x)
y = np.ma.ravel(y)
if x.size != y.size:
raise ValueError("x and y must be the same size")
if s is None:
s = (20 if mpl.rcParams['_internal.classic_mode'] else
mpl.rcParams['lines.markersize'] ** 2.0)
s = np.ma.ravel(s)
if (len(s) not in (1, x.size) or
(not np.issubdtype(s.dtype, np.floating) and
not np.issubdtype(s.dtype, np.integer))):
raise ValueError(
"s must be a scalar, "
"or float array-like with the same size as x and y")
# get the original edgecolor the user passed before we normalize
orig_edgecolor = edgecolors
if edgecolors is None:
orig_edgecolor = kwargs.get('edgecolor', None)
c, colors, edgecolors = \
self._parse_scatter_color_args(
c, edgecolors, kwargs, x.size,
get_next_color_func=self._get_patches_for_fill.get_next_color)
if plotnonfinite and colors is None:
c = np.ma.masked_invalid(c)
x, y, s, edgecolors, linewidths = \
cbook._combine_masks(x, y, s, edgecolors, linewidths)
else:
x, y, s, c, colors, edgecolors, linewidths = \
cbook._combine_masks(
x, y, s, c, colors, edgecolors, linewidths)
# Unmask edgecolors if it was actually a single RGB or RGBA.
if (x.size in (3, 4)
and np.ma.is_masked(edgecolors)
and not np.ma.is_masked(orig_edgecolor)):
edgecolors = edgecolors.data
scales = s # Renamed for readability below.
# load default marker from rcParams
marker = mpl._val_or_rc(marker, 'scatter.marker')
if isinstance(marker, mmarkers.MarkerStyle):
marker_obj = marker
else:
marker_obj = mmarkers.MarkerStyle(marker)
if cbook._str_equal(marker_obj.get_marker(), ","):
_api.warn_external(
"The pixel maker ',' is not supported on scatter(); using "
"a finite-sized square instead, which is not necessarily 1 pixel in "
"size. Use the square marker 's' instead to suppress this warning."
)
path = marker_obj.get_path().transformed(
marker_obj.get_transform())
if not marker_obj.is_filled():
if orig_edgecolor is not None:
_api.warn_external(
f"You passed an edgecolor/edgecolors ({orig_edgecolor!r}) "
f"for an unfilled marker ({marker!r}). Matplotlib is "
"ignoring the edgecolor in favor of the facecolor. This "
"behavior may change in the future."
)
# We need to handle markers that cannot be filled (like
# '+' and 'x') differently than markers that can be
# filled, but have their fillstyle set to 'none'. This is
# to get:
#
# - respecting the fillestyle if set
# - maintaining back-compatibility for querying the facecolor of
# the un-fillable markers.
#
# While not an ideal situation, but is better than the
# alternatives.
if marker_obj.get_fillstyle() == 'none':
# promote the facecolor to be the edgecolor
edgecolors = colors
# set the facecolor to 'none' (at the last chance) because
# we cannot fill a path if the facecolor is non-null
# (which is defendable at the renderer level).
colors = 'none'
else:
# if we are not nulling the face color we can do this
# simpler
edgecolors = 'face'
if linewidths is None:
linewidths = mpl.rcParams['lines.linewidth']
elif np.iterable(linewidths):
linewidths = [
lw if lw is not None else mpl.rcParams['lines.linewidth']
for lw in linewidths]
offsets = np.ma.column_stack([x, y])
collection = mcoll.PathCollection(
(path,), scales,
facecolors=colors,
edgecolors=edgecolors,
linewidths=linewidths,
offsets=offsets,
offset_transform=kwargs.pop('transform', self.transData),
alpha=alpha,
)
collection.set_transform(mtransforms.IdentityTransform())
if colors is None:
if colorizer:
collection._set_colorizer_check_keywords(colorizer, cmap=cmap,
norm=norm, vmin=vmin,
vmax=vmax)
else:
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_array(c)
collection._scale_norm(norm, vmin, vmax)
else:
extra_kwargs = {
'cmap': cmap, 'norm': norm, 'vmin': vmin, 'vmax': vmax
}
extra_keys = [k for k, v in extra_kwargs.items() if v is not None]
if any(extra_keys):
keys_str = ", ".join(f"'{k}'" for k in extra_keys)
_api.warn_external(
"No data for colormapping provided via 'c'. "
f"Parameters {keys_str} will be ignored")
collection._internal_update(kwargs)
# Classic mode only:
# ensure there are margins to allow for the
# finite size of the symbols. In v2.x, margins
# are present by default, so we disable this
# scatter-specific override.
if mpl.rcParams['_internal.classic_mode']:
if self._xmargin < 0.05 and x.size > 0:
self.set_xmargin(0.05)
if self._ymargin < 0.05 and x.size > 0:
self.set_ymargin(0.05)
self.add_collection(collection)
return collection
@_api.make_keyword_only("3.10", "gridsize")
@_preprocess_data(replace_names=["x", "y", "C"], label_namer="y")
@_docstring.interpd
def hexbin(self, x, y, C=None, gridsize=100, bins=None,
xscale='linear', yscale='linear', extent=None,
cmap=None, norm=None, vmin=None, vmax=None,
alpha=None, linewidths=None, edgecolors='face',
reduce_C_function=np.mean, mincnt=None, marginals=False,
colorizer=None, **kwargs):
"""
Make a 2D hexagonal binning plot of points *x*, *y*.
If *C* is *None*, the value of the hexagon is determined by the number
of points in the hexagon. Otherwise, *C* specifies values at the
coordinate (x[i], y[i]). For each hexagon, these values are reduced
using *reduce_C_function*.
Parameters
----------
x, y : array-like
The data positions. *x* and *y* must be of the same length.
C : array-like, optional
If given, these values are accumulated in the bins. Otherwise,
every point has a value of 1. Must be of the same length as *x*
and *y*.
gridsize : int or (int, int), default: 100
If a single int, the number of hexagons in the *x*-direction.
The number of hexagons in the *y*-direction is chosen such that
the hexagons are approximately regular.
Alternatively, if a tuple (*nx*, *ny*), the number of hexagons
in the *x*-direction and the *y*-direction. In the
*y*-direction, counting is done along vertically aligned
hexagons, not along the zig-zag chains of hexagons; see the
following illustration.
.. plot::
import numpy
import matplotlib.pyplot as plt
np.random.seed(19680801)
n= 300
x = np.random.standard_normal(n)
y = np.random.standard_normal(n)
fig, ax = plt.subplots(figsize=(4, 4))
h = ax.hexbin(x, y, gridsize=(5, 3))
hx, hy = h.get_offsets().T
ax.plot(hx[24::3], hy[24::3], 'ro-')
ax.plot(hx[-3:], hy[-3:], 'ro-')
ax.set_title('gridsize=(5, 3)')
ax.axis('off')
To get approximately regular hexagons, choose
:math:`n_x = \\sqrt{3}\\,n_y`.
bins : 'log' or int or sequence, default: None
Discretization of the hexagon values.
- If *None*, no binning is applied; the color of each hexagon
directly corresponds to its count value.
- If 'log', use a logarithmic scale for the colormap.
Internally, :math:`log_{10}(i)` is used to determine the
hexagon color. This is equivalent to ``norm=LogNorm()``.
Note that 0 counts are thus marked with the "bad" color.
- If an integer, divide the counts in the specified number
of bins, and color the hexagons accordingly.
- If a sequence of values, the values of the lower bound of
the bins to be used.
xscale : {'linear', 'log'}, default: 'linear'
Use a linear or log10 scale on the horizontal axis.
yscale : {'linear', 'log'}, default: 'linear'
Use a linear or log10 scale on the vertical axis.
mincnt : int >= 0, default: *None*
If not *None*, only display cells with at least *mincnt*
number of points in the cell.
marginals : bool, default: *False*
If marginals is *True*, plot the marginal density as
colormapped rectangles along the bottom of the x-axis and
left of the y-axis.
extent : 4-tuple of float, default: *None*
The limits of the bins (xmin, xmax, ymin, ymax).
The default assigns the limits based on
*gridsize*, *x*, *y*, *xscale* and *yscale*.
If *xscale* or *yscale* is set to 'log', the limits are
expected to be the exponent for a power of 10. E.g. for
x-limits of 1 and 50 in 'linear' scale and y-limits
of 10 and 1000 in 'log' scale, enter (1, 50, 1, 3).
Returns
-------
`~matplotlib.collections.PolyCollection`
A `.PolyCollection` defining the hexagonal bins.
- `.PolyCollection.get_offsets` contains a Mx2 array containing
the x, y positions of the M hexagon centers in data coordinates.
- `.PolyCollection.get_array` contains the values of the M
hexagons.
If *marginals* is *True*, horizontal
bar and vertical bar (both PolyCollections) will be attached
to the return collection as attributes *hbar* and *vbar*.
Other Parameters
----------------
%(cmap_doc)s
%(norm_doc)s
%(vmin_vmax_doc)s
alpha : float between 0 and 1, optional
The alpha blending value, between 0 (transparent) and 1 (opaque).
linewidths : float, default: *None*
If *None*, defaults to :rc:`patch.linewidth`.
edgecolors : {'face', 'none', *None*} or color, default: 'face'
The color of the hexagon edges. Possible values are:
- 'face': Draw the edges in the same color as the fill color.
- 'none': No edges are drawn. This can sometimes lead to unsightly
unpainted pixels between the hexagons.
- *None*: Draw outlines in the default color.
- An explicit color.
reduce_C_function : callable, default: `numpy.mean`
The function to aggregate *C* within the bins. It is ignored if
*C* is not given. This must have the signature::
def reduce_C_function(C: array) -> float
Commonly used functions are:
- `numpy.mean`: average of the points
- `numpy.sum`: integral of the point values
- `numpy.amax`: value taken from the largest point
By default will only reduce cells with at least 1 point because some
reduction functions (such as `numpy.amax`) will error/warn with empty
input. Changing *mincnt* will adjust the cutoff, and if set to 0 will
pass empty input to the reduction function.
%(colorizer_doc)s
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs : `~matplotlib.collections.PolyCollection` properties
All other keyword arguments are passed on to `.PolyCollection`:
%(PolyCollection:kwdoc)s
See Also
--------
hist2d : 2D histogram rectangular bins
"""
self._process_unit_info([("x", x), ("y", y)], kwargs, convert=False)
x, y, C = cbook.delete_masked_points(x, y, C)
# Set the size of the hexagon grid
if np.iterable(gridsize):
nx, ny = gridsize
else:
nx = gridsize
ny = int(nx / math.sqrt(3))
# Count the number of data in each hexagon
x = np.asarray(x, float)
y = np.asarray(y, float)
# Will be log()'d if necessary, and then rescaled.
tx = x
ty = y
if xscale == 'log':
if np.any(x <= 0.0):
raise ValueError(
"x contains non-positive values, so cannot be log-scaled")
tx = np.log10(tx)
if yscale == 'log':
if np.any(y <= 0.0):
raise ValueError(
"y contains non-positive values, so cannot be log-scaled")
ty = np.log10(ty)
if extent is not None:
xmin, xmax, ymin, ymax = extent
if xmin > xmax:
raise ValueError("In extent, xmax must be greater than xmin")
if ymin > ymax:
raise ValueError("In extent, ymax must be greater than ymin")
else:
xmin, xmax = (tx.min(), tx.max()) if len(x) else (0, 1)
ymin, ymax = (ty.min(), ty.max()) if len(y) else (0, 1)
# to avoid issues with singular data, expand the min/max pairs
xmin, xmax = mtransforms.nonsingular(xmin, xmax, expander=0.1)
ymin, ymax = mtransforms.nonsingular(ymin, ymax, expander=0.1)
nx1 = nx + 1
ny1 = ny + 1
nx2 = nx
ny2 = ny
n = nx1 * ny1 + nx2 * ny2
# In the x-direction, the hexagons exactly cover the region from
# xmin to xmax. Need some padding to avoid roundoff errors.
padding = 1.e-9 * (xmax - xmin)
xmin -= padding
xmax += padding
sx = (xmax - xmin) / nx
sy = (ymax - ymin) / ny
# Positions in hexagon index coordinates.
ix = (tx - xmin) / sx
iy = (ty - ymin) / sy
ix1 = np.round(ix).astype(int)
iy1 = np.round(iy).astype(int)
ix2 = np.floor(ix).astype(int)
iy2 = np.floor(iy).astype(int)
# flat indices, plus one so that out-of-range points go to position 0.
i1 = np.where((0 <= ix1) & (ix1 < nx1) & (0 <= iy1) & (iy1 < ny1),
ix1 * ny1 + iy1 + 1, 0)
i2 = np.where((0 <= ix2) & (ix2 < nx2) & (0 <= iy2) & (iy2 < ny2),
ix2 * ny2 + iy2 + 1, 0)
d1 = (ix - ix1) ** 2 + 3.0 * (iy - iy1) ** 2
d2 = (ix - ix2 - 0.5) ** 2 + 3.0 * (iy - iy2 - 0.5) ** 2
bdist = (d1 < d2)
if C is None: # [1:] drops out-of-range points.
counts1 = np.bincount(i1[bdist], minlength=1 + nx1 * ny1)[1:]
counts2 = np.bincount(i2[~bdist], minlength=1 + nx2 * ny2)[1:]
accum = np.concatenate([counts1, counts2]).astype(float)
if mincnt is not None:
accum[accum < mincnt] = np.nan
C = np.ones(len(x))
else:
# store the C values in a list per hexagon index
Cs_at_i1 = [[] for _ in range(1 + nx1 * ny1)]
Cs_at_i2 = [[] for _ in range(1 + nx2 * ny2)]
for i in range(len(x)):
if bdist[i]:
Cs_at_i1[i1[i]].append(C[i])
else:
Cs_at_i2[i2[i]].append(C[i])
if mincnt is None:
mincnt = 1
accum = np.array(
[reduce_C_function(acc) if len(acc) >= mincnt else np.nan
for Cs_at_i in [Cs_at_i1, Cs_at_i2]
for acc in Cs_at_i[1:]], # [1:] drops out-of-range points.
float)
good_idxs = ~np.isnan(accum)
offsets = np.zeros((n, 2), float)
offsets[:nx1 * ny1, 0] = np.repeat(np.arange(nx1), ny1)
offsets[:nx1 * ny1, 1] = np.tile(np.arange(ny1), nx1)
offsets[nx1 * ny1:, 0] = np.repeat(np.arange(nx2) + 0.5, ny2)
offsets[nx1 * ny1:, 1] = np.tile(np.arange(ny2), nx2) + 0.5
offsets[:, 0] *= sx
offsets[:, 1] *= sy
offsets[:, 0] += xmin
offsets[:, 1] += ymin
# remove accumulation bins with no data
offsets = offsets[good_idxs, :]
accum = accum[good_idxs]
polygon = [sx, sy / 3] * np.array(
[[.5, -.5], [.5, .5], [0., 1.], [-.5, .5], [-.5, -.5], [0., -1.]])
if linewidths is None:
linewidths = [mpl.rcParams['patch.linewidth']]
if xscale == 'log' or yscale == 'log':
polygons = np.expand_dims(polygon, 0)
if xscale == 'log':
polygons[:, :, 0] = 10.0 ** polygons[:, :, 0]
xmin = 10.0 ** xmin
xmax = 10.0 ** xmax
self.set_xscale(xscale)
if yscale == 'log':
polygons[:, :, 1] = 10.0 ** polygons[:, :, 1]
ymin = 10.0 ** ymin
ymax = 10.0 ** ymax
self.set_yscale(yscale)
else:
polygons = [polygon]
collection = mcoll.PolyCollection(
polygons,
edgecolors=edgecolors,
linewidths=linewidths,
offsets=offsets,
offset_transform=mtransforms.AffineDeltaTransform(self.transData)
)
# Set normalizer if bins is 'log'
if cbook._str_equal(bins, 'log'):
if norm is not None:
_api.warn_external("Only one of 'bins' and 'norm' arguments "
f"can be supplied, ignoring {bins=}")
else:
norm = mcolors.LogNorm(vmin=vmin, vmax=vmax)
vmin = vmax = None
bins = None
if bins is not None:
if not np.iterable(bins):
minimum, maximum = min(accum), max(accum)
bins -= 1 # one less edge than bins
bins = minimum + (maximum - minimum) * np.arange(bins) / bins
bins = np.sort(bins)
accum = bins.searchsorted(accum)
if colorizer:
collection._set_colorizer_check_keywords(colorizer, cmap=cmap,
norm=norm, vmin=vmin,
vmax=vmax)
else:
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_array(accum)
collection.set_alpha(alpha)
collection._internal_update(kwargs)
collection._scale_norm(norm, vmin, vmax)
# autoscale the norm with current accum values if it hasn't been set
if norm is not None:
if collection.norm.vmin is None and collection.norm.vmax is None:
collection.norm.autoscale()
corners = ((xmin, ymin), (xmax, ymax))
self.update_datalim(corners)
self._request_autoscale_view(tight=True)
# add the collection last
self.add_collection(collection, autolim=False)
if not marginals:
return collection
# Process marginals
bars = []
for zname, z, zmin, zmax, zscale, nbins in [
("x", x, xmin, xmax, xscale, nx),
("y", y, ymin, ymax, yscale, 2 * ny),
]:
if zscale == "log":
bin_edges = np.geomspace(zmin, zmax, nbins + 1)
else:
bin_edges = np.linspace(zmin, zmax, nbins + 1)
verts = np.empty((nbins, 4, 2))
verts[:, 0, 0] = verts[:, 1, 0] = bin_edges[:-1]
verts[:, 2, 0] = verts[:, 3, 0] = bin_edges[1:]
verts[:, 0, 1] = verts[:, 3, 1] = .00
verts[:, 1, 1] = verts[:, 2, 1] = .05
if zname == "y":
verts = verts[:, :, ::-1] # Swap x and y.
# Sort z-values into bins defined by bin_edges.
bin_idxs = np.searchsorted(bin_edges, z) - 1
values = np.empty(nbins)
for i in range(nbins):
# Get C-values for each bin, and compute bin value with
# reduce_C_function.
ci = C[bin_idxs == i]
values[i] = reduce_C_function(ci) if len(ci) > 0 else np.nan
mask = ~np.isnan(values)
verts = verts[mask]
values = values[mask]
trans = getattr(self, f"get_{zname}axis_transform")(which="grid")
bar = mcoll.PolyCollection(
verts, transform=trans, edgecolors="face")
bar.set_array(values)
bar.set_cmap(cmap)
bar.set_norm(norm)
bar.set_alpha(alpha)
bar._internal_update(kwargs)
bars.append(self.add_collection(bar, autolim=False))
collection.hbar, collection.vbar = bars
def on_changed(collection):
collection.hbar.set_cmap(collection.get_cmap())
collection.hbar.set_cmap(collection.get_cmap())
collection.vbar.set_clim(collection.get_clim())
collection.vbar.set_clim(collection.get_clim())
collection.callbacks.connect('changed', on_changed)
return collection
@_docstring.interpd
def arrow(self, x, y, dx, dy, **kwargs):
"""
[*Discouraged*] Add an arrow to the Axes.
This draws an arrow from ``(x, y)`` to ``(x+dx, y+dy)``.
.. admonition:: Discouraged
The use of this method is discouraged because it is not guaranteed
that the arrow renders reasonably. For example, the resulting arrow
is affected by the Axes aspect ratio and limits, which may distort
the arrow.
Consider using `~.Axes.annotate` without a text instead, e.g. ::
ax.annotate("", xytext=(0, 0), xy=(0.5, 0.5),
arrowprops=dict(arrowstyle="->"))
Parameters
----------
%(FancyArrow)s
Returns
-------
`.FancyArrow`
The created `.FancyArrow` object.
"""
# Strip away units for the underlying patch since units
# do not make sense to most patch-like code
x = self.convert_xunits(x)
y = self.convert_yunits(y)
dx = self.convert_xunits(dx)
dy = self.convert_yunits(dy)
a = mpatches.FancyArrow(x, y, dx, dy, **kwargs)
self.add_patch(a)
self._request_autoscale_view()
return a
@_docstring.copy(mquiver.QuiverKey.__init__)
def quiverkey(self, Q, X, Y, U, label, **kwargs):
qk = mquiver.QuiverKey(Q, X, Y, U, label, **kwargs)
self.add_artist(qk)
return qk
# Handle units for x and y, if they've been passed
def _quiver_units(self, args, kwargs):
if len(args) > 3:
x, y = args[0:2]
x, y = self._process_unit_info([("x", x), ("y", y)], kwargs)
return (x, y) + args[2:]
return args
# args can be a combination of X, Y, U, V, C and all should be replaced
@_preprocess_data()
@_docstring.interpd
def quiver(self, *args, **kwargs):
"""%(quiver_doc)s"""
# Make sure units are handled for x and y values
args = self._quiver_units(args, kwargs)
q = mquiver.Quiver(self, *args, **kwargs)
self.add_collection(q)
return q
# args can be some combination of X, Y, U, V, C and all should be replaced
@_preprocess_data()
@_docstring.interpd
def barbs(self, *args, **kwargs):
"""%(barbs_doc)s"""
# Make sure units are handled for x and y values
args = self._quiver_units(args, kwargs)
b = mquiver.Barbs(self, *args, **kwargs)
self.add_collection(b)
return b
# Uses a custom implementation of data-kwarg handling in
# _process_plot_var_args.
def fill(self, *args, data=None, **kwargs):
"""
Plot filled polygons.
Parameters
----------
*args : sequence of x, y, [color]
Each polygon is defined by the lists of *x* and *y* positions of
its nodes, optionally followed by a *color* specifier. See
:mod:`matplotlib.colors` for supported color specifiers. The
standard color cycle is used for polygons without a color
specifier.
You can plot multiple polygons by providing multiple *x*, *y*,
*[color]* groups.
For example, each of the following is legal::
ax.fill(x, y) # a polygon with default color
ax.fill(x, y, "b") # a blue polygon
ax.fill(x, y, x2, y2) # two polygons
ax.fill(x, y, "b", x2, y2, "r") # a blue and a red polygon
data : indexable object, optional
An object with labelled data. If given, provide the label names to
plot in *x* and *y*, e.g.::
ax.fill("time", "signal",
data={"time": [0, 1, 2], "signal": [0, 1, 0]})
Returns
-------
list of `~matplotlib.patches.Polygon`
Other Parameters
----------------
**kwargs : `~matplotlib.patches.Polygon` properties
Notes
-----
Use :meth:`fill_between` if you would like to fill the region between
two curves.
"""
# For compatibility(!), get aliases from Line2D rather than Patch.
kwargs = cbook.normalize_kwargs(kwargs, mlines.Line2D)
# _get_patches_for_fill returns a generator, convert it to a list.
patches = [*self._get_patches_for_fill(self, *args, data=data, **kwargs)]
for poly in patches:
self.add_patch(poly)
self._request_autoscale_view()
return patches
def _fill_between_x_or_y(
self, ind_dir, ind, dep1, dep2=0, *,
where=None, interpolate=False, step=None, **kwargs):
# Common implementation between fill_between (*ind_dir*="x") and
# fill_betweenx (*ind_dir*="y"). *ind* is the independent variable,
# *dep* the dependent variable. The docstring below is interpolated
# to generate both methods' docstrings.
"""
Fill the area between two {dir} curves.
The curves are defined by the points (*{ind}*, *{dep}1*) and (*{ind}*,
*{dep}2*). This creates one or multiple polygons describing the filled
area.
You may exclude some {dir} sections from filling using *where*.
By default, the edges connect the given points directly. Use *step*
if the filling should be a step function, i.e. constant in between
*{ind}*.
Parameters
----------
{ind} : array-like
The {ind} coordinates of the nodes defining the curves.
{dep}1 : array-like or float
The {dep} coordinates of the nodes defining the first curve.
{dep}2 : array-like or float, default: 0
The {dep} coordinates of the nodes defining the second curve.
where : array-like of bool, optional
Define *where* to exclude some {dir} regions from being filled.
The filled regions are defined by the coordinates ``{ind}[where]``.
More precisely, fill between ``{ind}[i]`` and ``{ind}[i+1]`` if
``where[i] and where[i+1]``. Note that this definition implies
that an isolated *True* value between two *False* values in *where*
will not result in filling. Both sides of the *True* position
remain unfilled due to the adjacent *False* values.
interpolate : bool, default: False
This option is only relevant if *where* is used and the two curves
are crossing each other.
Semantically, *where* is often used for *{dep}1* > *{dep}2* or
similar. By default, the nodes of the polygon defining the filled
region will only be placed at the positions in the *{ind}* array.
Such a polygon cannot describe the above semantics close to the
intersection. The {ind}-sections containing the intersection are
simply clipped.
Setting *interpolate* to *True* will calculate the actual
intersection point and extend the filled region up to this point.
step : {{'pre', 'post', 'mid'}}, optional
Define *step* if the filling should be a step function,
i.e. constant in between *{ind}*. The value determines where the
step will occur:
- 'pre': The {dep} value is continued constantly to the left from
every *{ind}* position, i.e. the interval ``({ind}[i-1], {ind}[i]]``
has the value ``{dep}[i]``.
- 'post': The y value is continued constantly to the right from
every *{ind}* position, i.e. the interval ``[{ind}[i], {ind}[i+1])``
has the value ``{dep}[i]``.
- 'mid': Steps occur half-way between the *{ind}* positions.
Returns
-------
`.FillBetweenPolyCollection`
A `.FillBetweenPolyCollection` containing the plotted polygons.
Other Parameters
----------------
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs
All other keyword arguments are passed on to
`.FillBetweenPolyCollection`. They control the `.Polygon` properties:
%(FillBetweenPolyCollection:kwdoc)s
See Also
--------
fill_between : Fill between two sets of y-values.
fill_betweenx : Fill between two sets of x-values.
"""
dep_dir = mcoll.FillBetweenPolyCollection._f_dir_from_t(ind_dir)
if not mpl.rcParams["_internal.classic_mode"]:
kwargs = cbook.normalize_kwargs(kwargs, mcoll.Collection)
if not any(c in kwargs for c in ("color", "facecolor")):
kwargs["facecolor"] = self._get_patches_for_fill.get_next_color()
ind, dep1, dep2 = self._fill_between_process_units(
ind_dir, dep_dir, ind, dep1, dep2, **kwargs)
collection = mcoll.FillBetweenPolyCollection(
ind_dir, ind, dep1, dep2,
where=where, interpolate=interpolate, step=step, **kwargs)
self.add_collection(collection)
return collection
def _fill_between_process_units(self, ind_dir, dep_dir, ind, dep1, dep2, **kwargs):
"""Handle united data, such as dates."""
return map(np.ma.masked_invalid, self._process_unit_info(
[(ind_dir, ind), (dep_dir, dep1), (dep_dir, dep2)], kwargs))
def fill_between(self, x, y1, y2=0, where=None, interpolate=False,
step=None, **kwargs):
return self._fill_between_x_or_y(
"x", x, y1, y2,
where=where, interpolate=interpolate, step=step, **kwargs)
if _fill_between_x_or_y.__doc__:
fill_between.__doc__ = _fill_between_x_or_y.__doc__.format(
dir="horizontal", ind="x", dep="y"
)
fill_between = _preprocess_data(
_docstring.interpd(fill_between),
replace_names=["x", "y1", "y2", "where"])
def fill_betweenx(self, y, x1, x2=0, where=None,
step=None, interpolate=False, **kwargs):
return self._fill_between_x_or_y(
"y", y, x1, x2,
where=where, interpolate=interpolate, step=step, **kwargs)
if _fill_between_x_or_y.__doc__:
fill_betweenx.__doc__ = _fill_between_x_or_y.__doc__.format(
dir="vertical", ind="y", dep="x"
)
fill_betweenx = _preprocess_data(
_docstring.interpd(fill_betweenx),
replace_names=["y", "x1", "x2", "where"])
#### plotting z(x, y): imshow, pcolor and relatives, contour
@_preprocess_data()
@_docstring.interpd
def imshow(self, X, cmap=None, norm=None, *, aspect=None,
interpolation=None, alpha=None,
vmin=None, vmax=None, colorizer=None, origin=None, extent=None,
interpolation_stage=None, filternorm=True, filterrad=4.0,
resample=None, url=None, **kwargs):
"""
Display data as an image, i.e., on a 2D regular raster.
The input may either be actual RGB(A) data, or 2D scalar data, which
will be rendered as a pseudocolor image. For displaying a grayscale
image, set up the colormapping using the parameters
``cmap='gray', vmin=0, vmax=255``.
The number of pixels used to render an image is set by the Axes size
and the figure *dpi*. This can lead to aliasing artifacts when
the image is resampled, because the displayed image size will usually
not match the size of *X* (see
:doc:`/gallery/images_contours_and_fields/image_antialiasing`).
The resampling can be controlled via the *interpolation* parameter
and/or :rc:`image.interpolation`.
Parameters
----------
X : array-like or PIL image
The image data. Supported array shapes are:
- (M, N): an image with scalar data. The values are mapped to
colors using normalization and a colormap. See parameters *norm*,
*cmap*, *vmin*, *vmax*.
- (M, N, 3): an image with RGB values (0-1 float or 0-255 int).
- (M, N, 4): an image with RGBA values (0-1 float or 0-255 int),
i.e. including transparency.
The first two dimensions (M, N) define the rows and columns of
the image.
Out-of-range RGB(A) values are clipped.
%(cmap_doc)s
This parameter is ignored if *X* is RGB(A).
%(norm_doc)s
This parameter is ignored if *X* is RGB(A).
%(vmin_vmax_doc)s
This parameter is ignored if *X* is RGB(A).
%(colorizer_doc)s
This parameter is ignored if *X* is RGB(A).
aspect : {'equal', 'auto'} or float or None, default: None
The aspect ratio of the Axes. This parameter is particularly
relevant for images since it determines whether data pixels are
square.
This parameter is a shortcut for explicitly calling
`.Axes.set_aspect`. See there for further details.
- 'equal': Ensures an aspect ratio of 1. Pixels will be square
(unless pixel sizes are explicitly made non-square in data
coordinates using *extent*).
- 'auto': The Axes is kept fixed and the aspect is adjusted so
that the data fit in the Axes. In general, this will result in
non-square pixels.
Normally, None (the default) means to use :rc:`image.aspect`. However, if
the image uses a transform that does not contain the axes data transform,
then None means to not modify the axes aspect at all (in that case, directly
call `.Axes.set_aspect` if desired).
interpolation : str, default: :rc:`image.interpolation`
The interpolation method used.
Supported values are 'none', 'auto', 'nearest', 'bilinear',
'bicubic', 'spline16', 'spline36', 'hanning', 'hamming', 'hermite',
'kaiser', 'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell',
'sinc', 'lanczos', 'blackman'.
The data *X* is resampled to the pixel size of the image on the
figure canvas, using the interpolation method to either up- or
downsample the data.
If *interpolation* is 'none', then for the ps, pdf, and svg
backends no down- or upsampling occurs, and the image data is
passed to the backend as a native image. Note that different ps,
pdf, and svg viewers may display these raw pixels differently. On
other backends, 'none' is the same as 'nearest'.
If *interpolation* is the default 'auto', then 'nearest'
interpolation is used if the image is upsampled by more than a
factor of three (i.e. the number of display pixels is at least
three times the size of the data array). If the upsampling rate is
smaller than 3, or the image is downsampled, then 'hanning'
interpolation is used to act as an anti-aliasing filter, unless the
image happens to be upsampled by exactly a factor of two or one.
See
:doc:`/gallery/images_contours_and_fields/interpolation_methods`
for an overview of the supported interpolation methods, and
:doc:`/gallery/images_contours_and_fields/image_antialiasing` for
a discussion of image antialiasing.
Some interpolation methods require an additional radius parameter,
which can be set by *filterrad*. Additionally, the antigrain image
resize filter is controlled by the parameter *filternorm*.
interpolation_stage : {'auto', 'data', 'rgba'}, default: 'auto'
Supported values:
- 'data': Interpolation is carried out on the data provided by the user
This is useful if interpolating between pixels during upsampling.
- 'rgba': The interpolation is carried out in RGBA-space after the
color-mapping has been applied. This is useful if downsampling and
combining pixels visually.
- 'auto': Select a suitable interpolation stage automatically. This uses
'rgba' when downsampling, or upsampling at a rate less than 3, and
'data' when upsampling at a higher rate.
See :doc:`/gallery/images_contours_and_fields/image_antialiasing` for
a discussion of image antialiasing.
alpha : float or array-like, optional
The alpha blending value, between 0 (transparent) and 1 (opaque).
If *alpha* is an array, the alpha blending values are applied pixel
by pixel, and *alpha* must have the same shape as *X*.
origin : {'upper', 'lower'}, default: :rc:`image.origin`
Place the [0, 0] index of the array in the upper left or lower
left corner of the Axes. The convention (the default) 'upper' is
typically used for matrices and images.
Note that the vertical axis points upward for 'lower'
but downward for 'upper'.
See the :ref:`imshow_extent` tutorial for
examples and a more detailed description.
extent : floats (left, right, bottom, top), optional
The bounding box in data coordinates that the image will fill.
These values may be unitful and match the units of the Axes.
The image is stretched individually along x and y to fill the box.
The default extent is determined by the following conditions.
Pixels have unit size in data coordinates. Their centers are on
integer coordinates, and their center coordinates range from 0 to
columns-1 horizontally and from 0 to rows-1 vertically.
Note that the direction of the vertical axis and thus the default
values for top and bottom depend on *origin*:
- For ``origin == 'upper'`` the default is
``(-0.5, numcols-0.5, numrows-0.5, -0.5)``.
- For ``origin == 'lower'`` the default is
``(-0.5, numcols-0.5, -0.5, numrows-0.5)``.
See the :ref:`imshow_extent` tutorial for
examples and a more detailed description.
filternorm : bool, default: True
A parameter for the antigrain image resize filter (see the
antigrain documentation). If *filternorm* is set, the filter
normalizes integer values and corrects the rounding errors. It
doesn't do anything with the source floating point values, it
corrects only integers according to the rule of 1.0 which means
that any sum of pixel weights must be equal to 1.0. So, the
filter function must produce a graph of the proper shape.
filterrad : float > 0, default: 4.0
The filter radius for filters that have a radius parameter, i.e.
when interpolation is one of: 'sinc', 'lanczos' or 'blackman'.
resample : bool, default: :rc:`image.resample`
When *True*, use a full resampling method. When *False*, only
resample when the output image is larger than the input image.
url : str, optional
Set the url of the created `.AxesImage`. See `.Artist.set_url`.
Returns
-------
`~matplotlib.image.AxesImage`
Other Parameters
----------------
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs : `~matplotlib.artist.Artist` properties
These parameters are passed on to the constructor of the
`.AxesImage` artist.
See Also
--------
matshow : Plot a matrix or an array as an image.
Notes
-----
Unless *extent* is used, pixel centers will be located at integer
coordinates. In other words: the origin will coincide with the center
of pixel (0, 0).
There are two common representations for RGB images with an alpha
channel:
- Straight (unassociated) alpha: R, G, and B channels represent the
color of the pixel, disregarding its opacity.
- Premultiplied (associated) alpha: R, G, and B channels represent
the color of the pixel, adjusted for its opacity by multiplication.
`~matplotlib.pyplot.imshow` expects RGB images adopting the straight
(unassociated) alpha representation.
"""
im = mimage.AxesImage(self, cmap=cmap, norm=norm, colorizer=colorizer,
interpolation=interpolation, origin=origin,
extent=extent, filternorm=filternorm,
filterrad=filterrad, resample=resample,
interpolation_stage=interpolation_stage,
**kwargs)
if aspect is None and not (
im.is_transform_set()
and not im.get_transform().contains_branch(self.transData)):
aspect = mpl.rcParams['image.aspect']
if aspect is not None:
self.set_aspect(aspect)
im.set_data(X)
im.set_alpha(alpha)
if im.get_clip_path() is None:
# image does not already have clipping set, clip to Axes patch
im.set_clip_path(self.patch)
im._check_exclusionary_keywords(colorizer, vmin=vmin, vmax=vmax)
im._scale_norm(norm, vmin, vmax)
im.set_url(url)
# update ax.dataLim, and, if autoscaling, set viewLim
# to tightly fit the image, regardless of dataLim.
im.set_extent(im.get_extent())
self.add_image(im)
return im
def _pcolorargs(self, funcname, *args, shading='auto', **kwargs):
# - create X and Y if not present;
# - reshape X and Y as needed if they are 1-D;
# - check for proper sizes based on `shading` kwarg;
# - reset shading if shading='auto' to flat or nearest
# depending on size;
_valid_shading = ['gouraud', 'nearest', 'flat', 'auto']
try:
_api.check_in_list(_valid_shading, shading=shading)
except ValueError:
_api.warn_external(f"shading value '{shading}' not in list of "
f"valid values {_valid_shading}. Setting "
"shading='auto'.")
shading = 'auto'
if len(args) == 1:
C = np.asanyarray(args[0])
nrows, ncols = C.shape[:2]
if shading in ['gouraud', 'nearest']:
X, Y = np.meshgrid(np.arange(ncols), np.arange(nrows))
else:
X, Y = np.meshgrid(np.arange(ncols + 1), np.arange(nrows + 1))
shading = 'flat'
elif len(args) == 3:
# Check x and y for bad data...
C = np.asanyarray(args[2])
# unit conversion allows e.g. datetime objects as axis values
X, Y = args[:2]
X, Y = self._process_unit_info([("x", X), ("y", Y)], kwargs)
X, Y = (cbook.safe_masked_invalid(a, copy=True) for a in [X, Y])
if funcname == 'pcolormesh':
if np.ma.is_masked(X) or np.ma.is_masked(Y):
raise ValueError(
'x and y arguments to pcolormesh cannot have '
'non-finite values or be of type '
'numpy.ma.MaskedArray with masked values')
nrows, ncols = C.shape[:2]
else:
raise _api.nargs_error(funcname, takes="1 or 3", given=len(args))
Nx = X.shape[-1]
Ny = Y.shape[0]
if X.ndim != 2 or X.shape[0] == 1:
x = X.reshape(1, Nx)
X = x.repeat(Ny, axis=0)
if Y.ndim != 2 or Y.shape[1] == 1:
y = Y.reshape(Ny, 1)
Y = y.repeat(Nx, axis=1)
if X.shape != Y.shape:
raise TypeError(f'Incompatible X, Y inputs to {funcname}; '
f'see help({funcname})')
if shading == 'auto':
if ncols == Nx and nrows == Ny:
shading = 'nearest'
else:
shading = 'flat'
if shading == 'flat':
if (Nx, Ny) != (ncols + 1, nrows + 1):
raise TypeError(f"Dimensions of C {C.shape} should"
f" be one smaller than X({Nx}) and Y({Ny})"
f" while using shading='flat'"
f" see help({funcname})")
else: # ['nearest', 'gouraud']:
if (Nx, Ny) != (ncols, nrows):
raise TypeError('Dimensions of C %s are incompatible with'
' X (%d) and/or Y (%d); see help(%s)' % (
C.shape, Nx, Ny, funcname))
if shading == 'nearest':
# grid is specified at the center, so define corners
# at the midpoints between the grid centers and then use the
# flat algorithm.
def _interp_grid(X, require_monotonicity=False):
# helper for below. To ensure the cell edges are calculated
# correctly, when expanding columns, the monotonicity of
# X coords needs to be checked. When expanding rows, the
# monotonicity of Y coords needs to be checked.
if np.shape(X)[1] > 1:
dX = np.diff(X, axis=1) * 0.5
if (require_monotonicity and
not (np.all(dX >= 0) or np.all(dX <= 0))):
_api.warn_external(
f"The input coordinates to {funcname} are "
"interpreted as cell centers, but are not "
"monotonically increasing or decreasing. "
"This may lead to incorrectly calculated cell "
"edges, in which case, please supply "
f"explicit cell edges to {funcname}.")
hstack = np.ma.hstack if np.ma.isMA(X) else np.hstack
X = hstack((X[:, [0]] - dX[:, [0]],
X[:, :-1] + dX,
X[:, [-1]] + dX[:, [-1]]))
else:
# This is just degenerate, but we can't reliably guess
# a dX if there is just one value.
X = np.hstack((X, X))
return X
if ncols == Nx:
X = _interp_grid(X, require_monotonicity=True)
Y = _interp_grid(Y)
if nrows == Ny:
X = _interp_grid(X.T).T
Y = _interp_grid(Y.T, require_monotonicity=True).T
shading = 'flat'
C = cbook.safe_masked_invalid(C, copy=True)
return X, Y, C, shading
@_preprocess_data()
@_docstring.interpd
def pcolor(self, *args, shading=None, alpha=None, norm=None, cmap=None,
vmin=None, vmax=None, colorizer=None, **kwargs):
r"""
Create a pseudocolor plot with a non-regular rectangular grid.
Call signature::
pcolor([X, Y,] C, /, **kwargs)
*X* and *Y* can be used to specify the corners of the quadrilaterals.
The arguments *X*, *Y*, *C* are positional-only.
.. hint::
``pcolor()`` can be very slow for large arrays. In most
cases you should use the similar but much faster
`~.Axes.pcolormesh` instead. See
:ref:`Differences between pcolor() and pcolormesh()
<differences-pcolor-pcolormesh>` for a discussion of the
differences.
Parameters
----------
C : 2D array-like
The color-mapped values. Color-mapping is controlled by *cmap*,
*norm*, *vmin*, and *vmax*.
X, Y : array-like, optional
The coordinates of the corners of quadrilaterals of a pcolormesh::
(X[i+1, j], Y[i+1, j]) (X[i+1, j+1], Y[i+1, j+1])
●╶───╴●
│ │
●╶───╴●
(X[i, j], Y[i, j]) (X[i, j+1], Y[i, j+1])
Note that the column index corresponds to the x-coordinate, and
the row index corresponds to y. For details, see the
:ref:`Notes <axes-pcolormesh-grid-orientation>` section below.
If ``shading='flat'`` the dimensions of *X* and *Y* should be one
greater than those of *C*, and the quadrilateral is colored due
to the value at ``C[i, j]``. If *X*, *Y* and *C* have equal
dimensions, a warning will be raised and the last row and column
of *C* will be ignored.
If ``shading='nearest'``, the dimensions of *X* and *Y* should be
the same as those of *C* (if not, a ValueError will be raised). The
color ``C[i, j]`` will be centered on ``(X[i, j], Y[i, j])``.
If *X* and/or *Y* are 1-D arrays or column vectors they will be
expanded as needed into the appropriate 2D arrays, making a
rectangular grid.
shading : {'flat', 'nearest', 'auto'}, default: :rc:`pcolor.shading`
The fill style for the quadrilateral. Possible values:
- 'flat': A solid color is used for each quad. The color of the
quad (i, j), (i+1, j), (i, j+1), (i+1, j+1) is given by
``C[i, j]``. The dimensions of *X* and *Y* should be
one greater than those of *C*; if they are the same as *C*,
then a deprecation warning is raised, and the last row
and column of *C* are dropped.
- 'nearest': Each grid point will have a color centered on it,
extending halfway between the adjacent grid centers. The
dimensions of *X* and *Y* must be the same as *C*.
- 'auto': Choose 'flat' if dimensions of *X* and *Y* are one
larger than *C*. Choose 'nearest' if dimensions are the same.
See :doc:`/gallery/images_contours_and_fields/pcolormesh_grids`
for more description.
%(cmap_doc)s
%(norm_doc)s
%(vmin_vmax_doc)s
%(colorizer_doc)s
edgecolors : {'none', None, 'face', color, color sequence}, optional
The color of the edges. Defaults to 'none'. Possible values:
- 'none' or '': No edge.
- *None*: :rc:`patch.edgecolor` will be used. Note that currently
:rc:`patch.force_edgecolor` has to be True for this to work.
- 'face': Use the adjacent face color.
- A color or sequence of colors will set the edge color.
The singular form *edgecolor* works as an alias.
alpha : float, default: None
The alpha blending value of the face color, between 0 (transparent)
and 1 (opaque). Note: The edgecolor is currently not affected by
this.
snap : bool, default: False
Whether to snap the mesh to pixel boundaries.
Returns
-------
`matplotlib.collections.PolyQuadMesh`
Other Parameters
----------------
antialiaseds : bool, default: False
The default *antialiaseds* is False if the default
*edgecolors*\ ="none" is used. This eliminates artificial lines
at patch boundaries, and works regardless of the value of alpha.
If *edgecolors* is not "none", then the default *antialiaseds*
is taken from :rc:`patch.antialiased`.
Stroking the edges may be preferred if *alpha* is 1, but will
cause artifacts otherwise.
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs
Additionally, the following arguments are allowed. They are passed
along to the `~matplotlib.collections.PolyQuadMesh` constructor:
%(PolyCollection:kwdoc)s
See Also
--------
pcolormesh : for an explanation of the differences between
pcolor and pcolormesh.
imshow : If *X* and *Y* are each equidistant, `~.Axes.imshow` can be a
faster alternative.
Notes
-----
**Masked arrays**
*X*, *Y* and *C* may be masked arrays. If either ``C[i, j]``, or one
of the vertices surrounding ``C[i, j]`` (*X* or *Y* at
``[i, j], [i+1, j], [i, j+1], [i+1, j+1]``) is masked, nothing is
plotted.
.. _axes-pcolor-grid-orientation:
**Grid orientation**
The grid orientation follows the standard matrix convention: An array
*C* with shape (nrows, ncolumns) is plotted with the column number as
*X* and the row number as *Y*.
"""
if shading is None:
shading = mpl.rcParams['pcolor.shading']
shading = shading.lower()
X, Y, C, shading = self._pcolorargs('pcolor', *args, shading=shading,
kwargs=kwargs)
linewidths = (0.25,)
if 'linewidth' in kwargs:
kwargs['linewidths'] = kwargs.pop('linewidth')
kwargs.setdefault('linewidths', linewidths)
if 'edgecolor' in kwargs:
kwargs['edgecolors'] = kwargs.pop('edgecolor')
ec = kwargs.setdefault('edgecolors', 'none')
# aa setting will default via collections to patch.antialiased
# unless the boundary is not stroked, in which case the
# default will be False; with unstroked boundaries, aa
# makes artifacts that are often disturbing.
if 'antialiaseds' in kwargs:
kwargs['antialiased'] = kwargs.pop('antialiaseds')
if 'antialiased' not in kwargs and cbook._str_lower_equal(ec, "none"):
kwargs['antialiased'] = False
kwargs.setdefault('snap', False)
if np.ma.isMaskedArray(X) or np.ma.isMaskedArray(Y):
stack = np.ma.stack
X = np.ma.asarray(X)
Y = np.ma.asarray(Y)
# For bounds collections later
x = X.compressed()
y = Y.compressed()
else:
stack = np.stack
x = X
y = Y
coords = stack([X, Y], axis=-1)
collection = mcoll.PolyQuadMesh(
coords, array=C, cmap=cmap, norm=norm, colorizer=colorizer,
alpha=alpha, **kwargs)
collection._check_exclusionary_keywords(colorizer, vmin=vmin, vmax=vmax)
collection._scale_norm(norm, vmin, vmax)
coords = coords.reshape(-1, 2) # flatten the grid structure; keep x, y
self._update_pcolor_lims(collection, coords)
return collection
@_preprocess_data()
@_docstring.interpd
def pcolormesh(self, *args, alpha=None, norm=None, cmap=None, vmin=None,
vmax=None, colorizer=None, shading=None, antialiased=False,
**kwargs):
"""
Create a pseudocolor plot with a non-regular rectangular grid.
Call signature::
pcolormesh([X, Y,] C, /, **kwargs)
*X* and *Y* can be used to specify the corners of the quadrilaterals.
The arguments *X*, *Y*, *C* are positional-only.
.. hint::
`~.Axes.pcolormesh` is similar to `~.Axes.pcolor`. It is much faster
and preferred in most cases. For a detailed discussion on the
differences see :ref:`Differences between pcolor() and pcolormesh()
<differences-pcolor-pcolormesh>`.
Parameters
----------
C : array-like
The mesh data. Supported array shapes are:
- (M, N) or M*N: a mesh with scalar data. The values are mapped to
colors using normalization and a colormap. See parameters *norm*,
*cmap*, *vmin*, *vmax*.
- (M, N, 3): an image with RGB values (0-1 float or 0-255 int).
- (M, N, 4): an image with RGBA values (0-1 float or 0-255 int),
i.e. including transparency.
The first two dimensions (M, N) define the rows and columns of
the mesh data.
X, Y : array-like, optional
The coordinates of the corners of quadrilaterals of a pcolormesh::
(X[i+1, j], Y[i+1, j]) (X[i+1, j+1], Y[i+1, j+1])
●╶───╴●
│ │
●╶───╴●
(X[i, j], Y[i, j]) (X[i, j+1], Y[i, j+1])
Note that the column index corresponds to the x-coordinate, and
the row index corresponds to y. For details, see the
:ref:`Notes <axes-pcolormesh-grid-orientation>` section below.
If ``shading='flat'`` the dimensions of *X* and *Y* should be one
greater than those of *C*, and the quadrilateral is colored due
to the value at ``C[i, j]``. If *X*, *Y* and *C* have equal
dimensions, a warning will be raised and the last row and column
of *C* will be ignored.
If ``shading='nearest'`` or ``'gouraud'``, the dimensions of *X*
and *Y* should be the same as those of *C* (if not, a ValueError
will be raised). For ``'nearest'`` the color ``C[i, j]`` is
centered on ``(X[i, j], Y[i, j])``. For ``'gouraud'``, a smooth
interpolation is carried out between the quadrilateral corners.
If *X* and/or *Y* are 1-D arrays or column vectors they will be
expanded as needed into the appropriate 2D arrays, making a
rectangular grid.
%(cmap_doc)s
%(norm_doc)s
%(vmin_vmax_doc)s
%(colorizer_doc)s
edgecolors : {'none', None, 'face', color, color sequence}, optional
The color of the edges. Defaults to 'none'. Possible values:
- 'none' or '': No edge.
- *None*: :rc:`patch.edgecolor` will be used. Note that currently
:rc:`patch.force_edgecolor` has to be True for this to work.
- 'face': Use the adjacent face color.
- A color or sequence of colors will set the edge color.
The singular form *edgecolor* works as an alias.
alpha : float, default: None
The alpha blending value, between 0 (transparent) and 1 (opaque).
shading : {'flat', 'nearest', 'gouraud', 'auto'}, optional
The fill style for the quadrilateral; defaults to
:rc:`pcolor.shading`. Possible values:
- 'flat': A solid color is used for each quad. The color of the
quad (i, j), (i+1, j), (i, j+1), (i+1, j+1) is given by
``C[i, j]``. The dimensions of *X* and *Y* should be
one greater than those of *C*; if they are the same as *C*,
then a deprecation warning is raised, and the last row
and column of *C* are dropped.
- 'nearest': Each grid point will have a color centered on it,
extending halfway between the adjacent grid centers. The
dimensions of *X* and *Y* must be the same as *C*.
- 'gouraud': Each quad will be Gouraud shaded: The color of the
corners (i', j') are given by ``C[i', j']``. The color values of
the area in between is interpolated from the corner values.
The dimensions of *X* and *Y* must be the same as *C*. When
Gouraud shading is used, *edgecolors* is ignored.
- 'auto': Choose 'flat' if dimensions of *X* and *Y* are one
larger than *C*. Choose 'nearest' if dimensions are the same.
See :doc:`/gallery/images_contours_and_fields/pcolormesh_grids`
for more description.
snap : bool, default: False
Whether to snap the mesh to pixel boundaries.
rasterized : bool, optional
Rasterize the pcolormesh when drawing vector graphics. This can
speed up rendering and produce smaller files for large data sets.
See also :doc:`/gallery/misc/rasterization_demo`.
Returns
-------
`matplotlib.collections.QuadMesh`
Other Parameters
----------------
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs
Additionally, the following arguments are allowed. They are passed
along to the `~matplotlib.collections.QuadMesh` constructor:
%(QuadMesh:kwdoc)s
See Also
--------
pcolor : An alternative implementation with slightly different
features. For a detailed discussion on the differences see
:ref:`Differences between pcolor() and pcolormesh()
<differences-pcolor-pcolormesh>`.
imshow : If *X* and *Y* are each equidistant, `~.Axes.imshow` can be a
faster alternative.
Notes
-----
**Masked arrays**
*C* may be a masked array. If ``C[i, j]`` is masked, the corresponding
quadrilateral will be transparent. Masking of *X* and *Y* is not
supported. Use `~.Axes.pcolor` if you need this functionality.
.. _axes-pcolormesh-grid-orientation:
**Grid orientation**
The grid orientation follows the standard matrix convention: An array
*C* with shape (nrows, ncolumns) is plotted with the column number as
*X* and the row number as *Y*.
.. _differences-pcolor-pcolormesh:
**Differences between pcolor() and pcolormesh()**
Both methods are used to create a pseudocolor plot of a 2D array
using quadrilaterals.
The main difference lies in the created object and internal data
handling:
While `~.Axes.pcolor` returns a `.PolyQuadMesh`, `~.Axes.pcolormesh`
returns a `.QuadMesh`. The latter is more specialized for the given
purpose and thus is faster. It should almost always be preferred.
There is also a slight difference in the handling of masked arrays.
Both `~.Axes.pcolor` and `~.Axes.pcolormesh` support masked arrays
for *C*. However, only `~.Axes.pcolor` supports masked arrays for *X*
and *Y*. The reason lies in the internal handling of the masked values.
`~.Axes.pcolor` leaves out the respective polygons from the
PolyQuadMesh. `~.Axes.pcolormesh` sets the facecolor of the masked
elements to transparent. You can see the difference when using
edgecolors. While all edges are drawn irrespective of masking in a
QuadMesh, the edge between two adjacent masked quadrilaterals in
`~.Axes.pcolor` is not drawn as the corresponding polygons do not
exist in the PolyQuadMesh. Because PolyQuadMesh draws each individual
polygon, it also supports applying hatches and linestyles to the collection.
Another difference is the support of Gouraud shading in
`~.Axes.pcolormesh`, which is not available with `~.Axes.pcolor`.
"""
shading = mpl._val_or_rc(shading, 'pcolor.shading').lower()
kwargs.setdefault('edgecolors', 'none')
X, Y, C, shading = self._pcolorargs('pcolormesh', *args,
shading=shading, kwargs=kwargs)
coords = np.stack([X, Y], axis=-1)
kwargs.setdefault('snap', mpl.rcParams['pcolormesh.snap'])
collection = mcoll.QuadMesh(
coords, antialiased=antialiased, shading=shading,
array=C, cmap=cmap, norm=norm, colorizer=colorizer, alpha=alpha, **kwargs)
collection._check_exclusionary_keywords(colorizer, vmin=vmin, vmax=vmax)
collection._scale_norm(norm, vmin, vmax)
coords = coords.reshape(-1, 2) # flatten the grid structure; keep x, y
self._update_pcolor_lims(collection, coords)
return collection
def _update_pcolor_lims(self, collection, coords):
"""
Common code for updating lims in pcolor() and pcolormesh() methods.
"""
# Transform from native to data coordinates?
t = collection._transform
if (not isinstance(t, mtransforms.Transform) and
hasattr(t, '_as_mpl_transform')):
t = t._as_mpl_transform(self.axes)
if t and any(t.contains_branch_separately(self.transData)):
trans_to_data = t - self.transData
coords = trans_to_data.transform(coords)
self.add_collection(collection, autolim=False)
minx, miny = np.min(coords, axis=0)
maxx, maxy = np.max(coords, axis=0)
collection.sticky_edges.x[:] = [minx, maxx]
collection.sticky_edges.y[:] = [miny, maxy]
self.update_datalim(coords)
self._request_autoscale_view()
@_preprocess_data()
@_docstring.interpd
def pcolorfast(self, *args, alpha=None, norm=None, cmap=None, vmin=None,
vmax=None, colorizer=None, **kwargs):
"""
Create a pseudocolor plot with a non-regular rectangular grid.
Call signature::
ax.pcolorfast([X, Y], C, /, **kwargs)
The arguments *X*, *Y*, *C* are positional-only.
This method is similar to `~.Axes.pcolor` and `~.Axes.pcolormesh`.
It's designed to provide the fastest pcolor-type plotting with the
Agg backend. To achieve this, it uses different algorithms internally
depending on the complexity of the input grid (regular rectangular,
non-regular rectangular or arbitrary quadrilateral).
.. warning::
This method is experimental. Compared to `~.Axes.pcolor` or
`~.Axes.pcolormesh` it has some limitations:
- It supports only flat shading (no outlines)
- It lacks support for log scaling of the axes.
- It does not have a pyplot wrapper.
Parameters
----------
C : array-like
The image data. Supported array shapes are:
- (M, N): an image with scalar data. Color-mapping is controlled
by *cmap*, *norm*, *vmin*, and *vmax*.
- (M, N, 3): an image with RGB values (0-1 float or 0-255 int).
- (M, N, 4): an image with RGBA values (0-1 float or 0-255 int),
i.e. including transparency.
The first two dimensions (M, N) define the rows and columns of
the image.
This parameter can only be passed positionally.
X, Y : tuple or array-like, default: ``(0, N)``, ``(0, M)``
*X* and *Y* are used to specify the coordinates of the
quadrilaterals. There are different ways to do this:
- Use tuples ``X=(xmin, xmax)`` and ``Y=(ymin, ymax)`` to define
a *uniform rectangular grid*.
The tuples define the outer edges of the grid. All individual
quadrilaterals will be of the same size. This is the fastest
version.
- Use 1D arrays *X*, *Y* to specify a *non-uniform rectangular
grid*.
In this case *X* and *Y* have to be monotonic 1D arrays of length
*N+1* and *M+1*, specifying the x and y boundaries of the cells.
The speed is intermediate. Note: The grid is checked, and if
found to be uniform the fast version is used.
- Use 2D arrays *X*, *Y* if you need an *arbitrary quadrilateral
grid* (i.e. if the quadrilaterals are not rectangular).
In this case *X* and *Y* are 2D arrays with shape (M + 1, N + 1),
specifying the x and y coordinates of the corners of the colored
quadrilaterals.
This is the most general, but the slowest to render. It may
produce faster and more compact output using ps, pdf, and
svg backends, however.
These arguments can only be passed positionally.
%(cmap_doc)s
This parameter is ignored if *C* is RGB(A).
%(norm_doc)s
This parameter is ignored if *C* is RGB(A).
%(vmin_vmax_doc)s
This parameter is ignored if *C* is RGB(A).
%(colorizer_doc)s
This parameter is ignored if *C* is RGB(A).
alpha : float, default: None
The alpha blending value, between 0 (transparent) and 1 (opaque).
snap : bool, default: False
Whether to snap the mesh to pixel boundaries.
Returns
-------
`.AxesImage` or `.PcolorImage` or `.QuadMesh`
The return type depends on the type of grid:
- `.AxesImage` for a regular rectangular grid.
- `.PcolorImage` for a non-regular rectangular grid.
- `.QuadMesh` for a non-rectangular grid.
Other Parameters
----------------
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs
Supported additional parameters depend on the type of grid.
See return types of *image* for further description.
"""
C = args[-1]
nr, nc = np.shape(C)[:2]
if len(args) == 1:
style = "image"
x = [0, nc]
y = [0, nr]
elif len(args) == 3:
x, y = args[:2]
x = np.asarray(x)
y = np.asarray(y)
if x.ndim == 1 and y.ndim == 1:
if x.size == 2 and y.size == 2:
style = "image"
else:
if x.size != nc + 1:
raise ValueError(
f"Length of X ({x.size}) must be one larger than the "
f"number of columns in C ({nc})")
if y.size != nr + 1:
raise ValueError(
f"Length of Y ({y.size}) must be one larger than the "
f"number of rows in C ({nr})"
)
dx = np.diff(x)
dy = np.diff(y)
if (np.ptp(dx) < 0.01 * abs(dx.mean()) and
np.ptp(dy) < 0.01 * abs(dy.mean())):
style = "image"
else:
style = "pcolorimage"
elif x.ndim == 2 and y.ndim == 2:
style = "quadmesh"
else:
raise TypeError(
f"When 3 positional parameters are passed to pcolorfast, the first "
f"two (X and Y) must be both 1D or both 2D; the given X was "
f"{x.ndim}D and the given Y was {y.ndim}D")
else:
raise _api.nargs_error('pcolorfast', '1 or 3', len(args))
mcolorizer.ColorizingArtist._check_exclusionary_keywords(colorizer, vmin=vmin,
vmax=vmax)
if style == "quadmesh":
# data point in each cell is value at lower left corner
coords = np.stack([x, y], axis=-1)
if np.ndim(C) not in {2, 3}:
raise ValueError("C must be 2D or 3D")
collection = mcoll.QuadMesh(
coords, array=C,
alpha=alpha, cmap=cmap, norm=norm, colorizer=colorizer,
antialiased=False, edgecolors="none")
self.add_collection(collection, autolim=False)
xl, xr, yb, yt = x.min(), x.max(), y.min(), y.max()
ret = collection
else: # It's one of the two image styles.
extent = xl, xr, yb, yt = x[0], x[-1], y[0], y[-1]
if style == "image":
im = mimage.AxesImage(
self, cmap=cmap, norm=norm, colorizer=colorizer,
data=C, alpha=alpha, extent=extent,
interpolation='nearest', origin='lower',
**kwargs)
elif style == "pcolorimage":
im = mimage.PcolorImage(
self, x, y, C,
cmap=cmap, norm=norm, colorizer=colorizer, alpha=alpha,
extent=extent, **kwargs)
self.add_image(im)
ret = im
if np.ndim(C) == 2: # C.ndim == 3 is RGB(A) so doesn't need scaling.
ret._scale_norm(norm, vmin, vmax)
if ret.get_clip_path() is None:
# image does not already have clipping set, clip to Axes patch
ret.set_clip_path(self.patch)
ret.sticky_edges.x[:] = [xl, xr]
ret.sticky_edges.y[:] = [yb, yt]
self.update_datalim(np.array([[xl, yb], [xr, yt]]))
self._request_autoscale_view(tight=True)
return ret
@_preprocess_data()
@_docstring.interpd
def contour(self, *args, **kwargs):
"""
Plot contour lines.
Call signature::
contour([X, Y,] Z, /, [levels], **kwargs)
The arguments *X*, *Y*, *Z* are positional-only.
%(contour_doc)s
"""
kwargs['filled'] = False
contours = mcontour.QuadContourSet(self, *args, **kwargs)
self._request_autoscale_view()
return contours
@_preprocess_data()
@_docstring.interpd
def contourf(self, *args, **kwargs):
"""
Plot filled contours.
Call signature::
contourf([X, Y,] Z, /, [levels], **kwargs)
The arguments *X*, *Y*, *Z* are positional-only.
%(contour_doc)s
"""
kwargs['filled'] = True
contours = mcontour.QuadContourSet(self, *args, **kwargs)
self._request_autoscale_view()
return contours
def clabel(self, CS, levels=None, **kwargs):
"""
Label a contour plot.
Adds labels to line contours in given `.ContourSet`.
Parameters
----------
CS : `.ContourSet` instance
Line contours to label.
levels : array-like, optional
A list of level values, that should be labeled. The list must be
a subset of ``CS.levels``. If not given, all levels are labeled.
**kwargs
All other parameters are documented in `~.ContourLabeler.clabel`.
"""
return CS.clabel(levels, **kwargs)
#### Data analysis
@_api.make_keyword_only("3.10", "range")
@_preprocess_data(replace_names=["x", 'weights'], label_namer="x")
def hist(self, x, bins=None, range=None, density=False, weights=None,
cumulative=False, bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False,
color=None, label=None, stacked=False, **kwargs):
"""
Compute and plot a histogram.
This method uses `numpy.histogram` to bin the data in *x* and count the
number of values in each bin, then draws the distribution either as a
`.BarContainer` or `.Polygon`. The *bins*, *range*, *density*, and
*weights* parameters are forwarded to `numpy.histogram`.
If the data has already been binned and counted, use `~.bar` or
`~.stairs` to plot the distribution::
counts, bins = np.histogram(x)
plt.stairs(counts, bins)
Alternatively, plot pre-computed bins and counts using ``hist()`` by
treating each bin as a single point with a weight equal to its count::
plt.hist(bins[:-1], bins, weights=counts)
The data input *x* can be a singular array, a list of datasets of
potentially different lengths ([*x0*, *x1*, ...]), or a 2D ndarray in
which each column is a dataset. Note that the ndarray form is
transposed relative to the list form. If the input is an array, then
the return value is a tuple (*n*, *bins*, *patches*); if the input is a
sequence of arrays, then the return value is a tuple
([*n0*, *n1*, ...], *bins*, [*patches0*, *patches1*, ...]).
Masked arrays are not supported.
Parameters
----------
x : (n,) array or sequence of (n,) arrays
Input values, this takes either a single array or a sequence of
arrays which are not required to be of the same length.
bins : int or sequence or str, default: :rc:`hist.bins`
If *bins* is an integer, it defines the number of equal-width bins
in the range.
If *bins* is a sequence, it defines the bin edges, including the
left edge of the first bin and the right edge of the last bin;
in this case, bins may be unequally spaced. All but the last
(righthand-most) bin is half-open. In other words, if *bins* is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
*includes* 4.
If *bins* is a string, it is one of the binning strategies
supported by `numpy.histogram_bin_edges`: 'auto', 'fd', 'doane',
'scott', 'stone', 'rice', 'sturges', or 'sqrt'.
range : tuple or None, default: None
The lower and upper range of the bins. Lower and upper outliers
are ignored. If not provided, *range* is ``(x.min(), x.max())``.
Range has no effect if *bins* is a sequence.
If *bins* is a sequence or *range* is specified, autoscaling
is based on the specified bin range instead of the
range of x.
density : bool, default: False
If ``True``, draw and return a probability density: each bin
will display the bin's raw count divided by the total number of
counts *and the bin width*
(``density = counts / (sum(counts) * np.diff(bins))``),
so that the area under the histogram integrates to 1
(``np.sum(density * np.diff(bins)) == 1``).
If *stacked* is also ``True``, the sum of the histograms is
normalized to 1.
weights : (n,) array-like or None, default: None
An array of weights, of the same shape as *x*. Each value in
*x* only contributes its associated weight towards the bin count
(instead of 1). If *density* is ``True``, the weights are
normalized, so that the integral of the density over the range
remains 1.
cumulative : bool or -1, default: False
If ``True``, then a histogram is computed where each bin gives the
counts in that bin plus all bins for smaller values. The last bin
gives the total number of datapoints.
If *density* is also ``True`` then the histogram is normalized such
that the last bin equals 1.
If *cumulative* is a number less than 0 (e.g., -1), the direction
of accumulation is reversed. In this case, if *density* is also
``True``, then the histogram is normalized such that the first bin
equals 1.
bottom : array-like or float, default: 0
Location of the bottom of each bin, i.e. bins are drawn from
``bottom`` to ``bottom + hist(x, bins)`` If a scalar, the bottom
of each bin is shifted by the same amount. If an array, each bin
is shifted independently and the length of bottom must match the
number of bins. If None, defaults to 0.
histtype : {'bar', 'barstacked', 'step', 'stepfilled'}, default: 'bar'
The type of histogram to draw.
- 'bar' is a traditional bar-type histogram. If multiple data
are given the bars are arranged side by side.
- 'barstacked' is a bar-type histogram where multiple
data are stacked on top of each other.
- 'step' generates a lineplot that is by default unfilled.
- 'stepfilled' generates a lineplot that is by default filled.
align : {'left', 'mid', 'right'}, default: 'mid'
The horizontal alignment of the histogram bars.
- 'left': bars are centered on the left bin edges.
- 'mid': bars are centered between the bin edges.
- 'right': bars are centered on the right bin edges.
orientation : {'vertical', 'horizontal'}, default: 'vertical'
If 'horizontal', `~.Axes.barh` will be used for bar-type histograms
and the *bottom* kwarg will be the left edges.
rwidth : float or None, default: None
The relative width of the bars as a fraction of the bin width. If
``None``, automatically compute the width.
Ignored if *histtype* is 'step' or 'stepfilled'.
log : bool, default: False
If ``True``, the histogram axis will be set to a log scale.
color : :mpltype:`color` or list of :mpltype:`color` or None, default: None
Color or sequence of colors, one per dataset. Default (``None``)
uses the standard line color sequence.
label : str or list of str, optional
String, or sequence of strings to match multiple datasets. Bar
charts yield multiple patches per dataset, but only the first gets
the label, so that `~.Axes.legend` will work as expected.
stacked : bool, default: False
If ``True``, multiple data are stacked on top of each other If
``False`` multiple data are arranged side by side if histtype is
'bar' or on top of each other if histtype is 'step'
Returns
-------
n : array or list of arrays
The values of the histogram bins. See *density* and *weights* for a
description of the possible semantics. If input *x* is an array,
then this is an array of length *nbins*. If input is a sequence of
arrays ``[data1, data2, ...]``, then this is a list of arrays with
the values of the histograms for each of the arrays in the same
order. The dtype of the array *n* (or of its element arrays) will
always be float even if no weighting or normalization is used.
bins : array
The edges of the bins. Length nbins + 1 (nbins left edges and right
edge of last bin). Always a single array even when multiple data
sets are passed in.
patches : `.BarContainer` or list of a single `.Polygon` or list of \
such objects
Container of individual artists used to create the histogram
or list of such containers if there are multiple input datasets.
Other Parameters
----------------
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs
`~matplotlib.patches.Patch` properties. The following properties
additionally accept a sequence of values corresponding to the
datasets in *x*:
*edgecolor*, *facecolor*, *linewidth*, *linestyle*, *hatch*.
.. versionadded:: 3.10
Allowing sequences of values in above listed Patch properties.
See Also
--------
hist2d : 2D histogram with rectangular bins
hexbin : 2D histogram with hexagonal bins
stairs : Plot a pre-computed histogram
bar : Plot a pre-computed histogram
Notes
-----
For large numbers of bins (>1000), plotting can be significantly
accelerated by using `~.Axes.stairs` to plot a pre-computed histogram
(``plt.stairs(*np.histogram(data))``), or by setting *histtype* to
'step' or 'stepfilled' rather than 'bar' or 'barstacked'.
"""
# Avoid shadowing the builtin.
bin_range = range
from builtins import range
kwargs = cbook.normalize_kwargs(kwargs, mpatches.Patch)
if np.isscalar(x):
x = [x]
bins = mpl._val_or_rc(bins, 'hist.bins')
# Validate string inputs here to avoid cluttering subsequent code.
_api.check_in_list(['bar', 'barstacked', 'step', 'stepfilled'],
histtype=histtype)
_api.check_in_list(['left', 'mid', 'right'], align=align)
_api.check_in_list(['horizontal', 'vertical'], orientation=orientation)
if histtype == 'barstacked' and not stacked:
stacked = True
# Massage 'x' for processing.
x = cbook._reshape_2D(x, 'x')
nx = len(x) # number of datasets
# Process unit information. _process_unit_info sets the unit and
# converts the first dataset; then we convert each following dataset
# one at a time.
if orientation == "vertical":
convert_units = self.convert_xunits
x = [*self._process_unit_info([("x", x[0])], kwargs),
*map(convert_units, x[1:])]
else: # horizontal
convert_units = self.convert_yunits
x = [*self._process_unit_info([("y", x[0])], kwargs),
*map(convert_units, x[1:])]
if bin_range is not None:
bin_range = convert_units(bin_range)
if not cbook.is_scalar_or_string(bins):
bins = convert_units(bins)
# We need to do to 'weights' what was done to 'x'
if weights is not None:
w = cbook._reshape_2D(weights, 'weights')
else:
w = [None] * nx
if len(w) != nx:
raise ValueError('weights should have the same shape as x')
input_empty = True
for xi, wi in zip(x, w):
len_xi = len(xi)
if wi is not None and len(wi) != len_xi:
raise ValueError('weights should have the same shape as x')
if len_xi:
input_empty = False
if color is None:
colors = [self._get_lines.get_next_color() for i in range(nx)]
else:
colors = mcolors.to_rgba_array(color)
if len(colors) != nx:
raise ValueError(f"The 'color' keyword argument must have one "
f"color per dataset, but {nx} datasets and "
f"{len(colors)} colors were provided")
hist_kwargs = dict()
# if the bin_range is not given, compute without nan numpy
# does not do this for us when guessing the range (but will
# happily ignore nans when computing the histogram).
if bin_range is None:
xmin = np.inf
xmax = -np.inf
for xi in x:
if len(xi):
# python's min/max ignore nan,
# np.minnan returns nan for all nan input
xmin = min(xmin, np.nanmin(xi))
xmax = max(xmax, np.nanmax(xi))
if xmin <= xmax: # Only happens if we have seen a finite value.
bin_range = (xmin, xmax)
# If bins are not specified either explicitly or via range,
# we need to figure out the range required for all datasets,
# and supply that to np.histogram.
if not input_empty and len(x) > 1:
if weights is not None:
_w = np.concatenate(w)
else:
_w = None
bins = np.histogram_bin_edges(
np.concatenate(x), bins, bin_range, _w)
else:
hist_kwargs['range'] = bin_range
density = bool(density)
if density and not stacked:
hist_kwargs['density'] = density
# List to store all the top coordinates of the histograms
tops = [] # Will have shape (n_datasets, n_bins).
# Loop through datasets
for i in range(nx):
# this will automatically overwrite bins,
# so that each histogram uses the same bins
m, bins = np.histogram(x[i], bins, weights=w[i], **hist_kwargs)
tops.append(m)
tops = np.array(tops, float) # causes problems later if it's an int
bins = np.array(bins, float) # causes problems if float16
if stacked:
tops = tops.cumsum(axis=0)
# If a stacked density plot, normalize so the area of all the
# stacked histograms together is 1
if density:
tops = (tops / np.diff(bins)) / tops[-1].sum()
if cumulative:
slc = slice(None)
if isinstance(cumulative, Number) and cumulative < 0:
slc = slice(None, None, -1)
if density:
tops = (tops * np.diff(bins))[:, slc].cumsum(axis=1)[:, slc]
else:
tops = tops[:, slc].cumsum(axis=1)[:, slc]
patches = []
if histtype.startswith('bar'):
totwidth = np.diff(bins)
if rwidth is not None:
dr = np.clip(rwidth, 0, 1)
elif (len(tops) > 1 and
((not stacked) or mpl.rcParams['_internal.classic_mode'])):
dr = 0.8
else:
dr = 1.0
if histtype == 'bar' and not stacked:
width = dr * totwidth / nx
dw = width
boffset = -0.5 * dr * totwidth * (1 - 1 / nx)
elif histtype == 'barstacked' or stacked:
width = dr * totwidth
boffset, dw = 0.0, 0.0
if align == 'mid':
boffset += 0.5 * totwidth
elif align == 'right':
boffset += totwidth
if orientation == 'horizontal':
_barfunc = self.barh
bottom_kwarg = 'left'
else: # orientation == 'vertical'
_barfunc = self.bar
bottom_kwarg = 'bottom'
for top, color in zip(tops, colors):
if bottom is None:
bottom = np.zeros(len(top))
if stacked:
height = top - bottom
else:
height = top
bars = _barfunc(bins[:-1]+boffset, height, width,
align='center', log=log,
color=color, **{bottom_kwarg: bottom})
patches.append(bars)
if stacked:
bottom = top
boffset += dw
# Remove stickies from all bars but the lowest ones, as otherwise
# margin expansion would be unable to cross the stickies in the
# middle of the bars.
for bars in patches[1:]:
for patch in bars:
patch.sticky_edges.x[:] = patch.sticky_edges.y[:] = []
elif histtype.startswith('step'):
# these define the perimeter of the polygon
x = np.zeros(4 * len(bins) - 3)
y = np.zeros(4 * len(bins) - 3)
x[0:2*len(bins)-1:2], x[1:2*len(bins)-1:2] = bins, bins[:-1]
x[2*len(bins)-1:] = x[1:2*len(bins)-1][::-1]
if bottom is None:
bottom = 0
y[1:2*len(bins)-1:2] = y[2:2*len(bins):2] = bottom
y[2*len(bins)-1:] = y[1:2*len(bins)-1][::-1]
if log:
if orientation == 'horizontal':
self.set_xscale('log', nonpositive='clip')
else: # orientation == 'vertical'
self.set_yscale('log', nonpositive='clip')
if align == 'left':
x -= 0.5*(bins[1]-bins[0])
elif align == 'right':
x += 0.5*(bins[1]-bins[0])
# If fill kwarg is set, it will be passed to the patch collection,
# overriding this
fill = (histtype == 'stepfilled')
xvals, yvals = [], []
for top in tops:
if stacked:
# top of the previous polygon becomes the bottom
y[2*len(bins)-1:] = y[1:2*len(bins)-1][::-1]
# set the top of this polygon
y[1:2*len(bins)-1:2] = y[2:2*len(bins):2] = top + bottom
# The starting point of the polygon has not yet been
# updated. So far only the endpoint was adjusted. This
# assignment closes the polygon. The redundant endpoint is
# later discarded (for step and stepfilled).
y[0] = y[-1]
if orientation == 'horizontal':
xvals.append(y.copy())
yvals.append(x.copy())
else:
xvals.append(x.copy())
yvals.append(y.copy())
# stepfill is closed, step is not
split = -1 if fill else 2 * len(bins)
# add patches in reverse order so that when stacking,
# items lower in the stack are plotted on top of
# items higher in the stack
for x, y, color in reversed(list(zip(xvals, yvals, colors))):
patches.append(self.fill(
x[:split], y[:split],
closed=True if fill else None,
facecolor=color,
edgecolor=None if fill else color,
fill=fill if fill else None,
zorder=None if fill else mlines.Line2D.zorder))
for patch_list in patches:
for patch in patch_list:
if orientation == 'vertical':
patch.sticky_edges.y.append(0)
elif orientation == 'horizontal':
patch.sticky_edges.x.append(0)
# we return patches, so put it back in the expected order
patches.reverse()
# If None, make all labels None (via zip_longest below); otherwise,
# cast each element to str, but keep a single str as it.
labels = [] if label is None else np.atleast_1d(np.asarray(label, str))
if histtype == "step":
ec = kwargs.get('edgecolor', colors)
else:
ec = kwargs.get('edgecolor', None)
if ec is None or cbook._str_lower_equal(ec, 'none'):
edgecolors = itertools.repeat(ec)
else:
edgecolors = itertools.cycle(mcolors.to_rgba_array(ec))
fc = kwargs.get('facecolor', colors)
if cbook._str_lower_equal(fc, 'none'):
facecolors = itertools.repeat(fc)
else:
facecolors = itertools.cycle(mcolors.to_rgba_array(fc))
hatches = itertools.cycle(np.atleast_1d(kwargs.get('hatch', None)))
linewidths = itertools.cycle(np.atleast_1d(kwargs.get('linewidth', None)))
if 'linestyle' in kwargs:
linestyles = itertools.cycle(mlines._get_dash_patterns(kwargs['linestyle']))
else:
linestyles = itertools.repeat(None)
for patch, lbl in itertools.zip_longest(patches, labels):
if not patch:
continue
p = patch[0]
kwargs.update({
'hatch': next(hatches),
'linewidth': next(linewidths),
'linestyle': next(linestyles),
'edgecolor': next(edgecolors),
'facecolor': next(facecolors),
})
p._internal_update(kwargs)
if lbl is not None:
p.set_label(lbl)
for p in patch[1:]:
p._internal_update(kwargs)
p.set_label('_nolegend_')
if nx == 1:
return tops[0], bins, patches[0]
else:
patch_type = ("BarContainer" if histtype.startswith("bar")
else "list[Polygon]")
return tops, bins, cbook.silent_list(patch_type, patches)
@_preprocess_data()
def stairs(self, values, edges=None, *,
orientation='vertical', baseline=0, fill=False, **kwargs):
"""
Draw a stepwise constant function as a line or a filled plot.
*edges* define the x-axis positions of the steps. *values* the function values
between these steps. Depending on *fill*, the function is drawn either as a
continuous line with vertical segments at the edges, or as a filled area.
Parameters
----------
values : array-like
The step heights.
edges : array-like
The step positions, with ``len(edges) == len(vals) + 1``,
between which the curve takes on vals values.
orientation : {'vertical', 'horizontal'}, default: 'vertical'
The direction of the steps. Vertical means that *values* are along
the y-axis, and edges are along the x-axis.
baseline : float, array-like or None, default: 0
The bottom value of the bounding edges or when
``fill=True``, position of lower edge. If *fill* is
True or an array is passed to *baseline*, a closed
path is drawn.
If None, then drawn as an unclosed Path.
fill : bool, default: False
Whether the area under the step curve should be filled.
Passing both ``fill=True` and ``baseline=None`` will likely result in
undesired filling: the first and last points will be connected
with a straight line and the fill will be between this line and the stairs.
Returns
-------
StepPatch : `~matplotlib.patches.StepPatch`
Other Parameters
----------------
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs
`~matplotlib.patches.StepPatch` properties
"""
if 'color' in kwargs:
_color = kwargs.pop('color')
else:
_color = self._get_lines.get_next_color()
if fill:
kwargs.setdefault('linewidth', 0)
kwargs.setdefault('facecolor', _color)
else:
kwargs.setdefault('edgecolor', _color)
if edges is None:
edges = np.arange(len(values) + 1)
edges, values, baseline = self._process_unit_info(
[("x", edges), ("y", values), ("y", baseline)], kwargs)
patch = mpatches.StepPatch(values,
edges,
baseline=baseline,
orientation=orientation,
fill=fill,
**kwargs)
self.add_patch(patch)
if baseline is None and fill:
_api.warn_external(
f"Both {baseline=} and {fill=} have been passed. "
"baseline=None is only intended for unfilled stair plots. "
"Because baseline is None, the Path used to draw the stairs will "
"not be closed, thus because fill is True the polygon will be closed "
"by drawing an (unstroked) edge from the first to last point. It is "
"very likely that the resulting fill patterns is not the desired "
"result."
)
if baseline is not None:
if orientation == 'vertical':
patch.sticky_edges.y.append(np.min(baseline))
self.update_datalim([(edges[0], np.min(baseline))])
else:
patch.sticky_edges.x.append(np.min(baseline))
self.update_datalim([(np.min(baseline), edges[0])])
self._request_autoscale_view()
return patch
@_api.make_keyword_only("3.10", "range")
@_preprocess_data(replace_names=["x", "y", "weights"])
@_docstring.interpd
def hist2d(self, x, y, bins=10, range=None, density=False, weights=None,
cmin=None, cmax=None, **kwargs):
"""
Make a 2D histogram plot.
Parameters
----------
x, y : array-like, shape (n, )
Input values
bins : None or int or [int, int] or array-like or [array, array]
The bin specification:
- If int, the number of bins for the two dimensions
(``nx = ny = bins``).
- If ``[int, int]``, the number of bins in each dimension
(``nx, ny = bins``).
- If array-like, the bin edges for the two dimensions
(``x_edges = y_edges = bins``).
- If ``[array, array]``, the bin edges in each dimension
(``x_edges, y_edges = bins``).
The default value is 10.
range : array-like shape(2, 2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the bins parameters): ``[[xmin,
xmax], [ymin, ymax]]``. All values outside of this range will be
considered outliers and not tallied in the histogram.
density : bool, default: False
Normalize histogram. See the documentation for the *density*
parameter of `~.Axes.hist` for more details.
weights : array-like, shape (n, ), optional
An array of values w_i weighing each sample (x_i, y_i).
cmin, cmax : float, default: None
All bins that has count less than *cmin* or more than *cmax* will not be
displayed (set to NaN before passing to `~.Axes.pcolormesh`) and these count
values in the return value count histogram will also be set to nan upon
return.
Returns
-------
h : 2D array
The bi-dimensional histogram of samples x and y. Values in x are
histogrammed along the first dimension and values in y are
histogrammed along the second dimension.
xedges : 1D array
The bin edges along the x-axis.
yedges : 1D array
The bin edges along the y-axis.
image : `~.matplotlib.collections.QuadMesh`
Other Parameters
----------------
%(cmap_doc)s
%(norm_doc)s
%(vmin_vmax_doc)s
%(colorizer_doc)s
alpha : ``0 <= scalar <= 1`` or ``None``, optional
The alpha blending value.
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs
Additional parameters are passed along to the
`~.Axes.pcolormesh` method and `~matplotlib.collections.QuadMesh`
constructor.
See Also
--------
hist : 1D histogram plotting
hexbin : 2D histogram with hexagonal bins
Notes
-----
Rendering the histogram with a logarithmic color scale is accomplished
by passing a `.colors.LogNorm` instance to the *norm* keyword
argument. Likewise, power-law normalization (similar in effect to gamma
correction) can be accomplished with `.colors.PowerNorm`.
.. versionchanged:: 3.11
Previously, `~.Axes.hist2d` would force the axes limits to match the
extents of the histogram; now, autoscaling also takes other plot
elements into account.
"""
h, xedges, yedges = np.histogram2d(x, y, bins=bins, range=range,
density=density, weights=weights)
if cmin is not None:
h[h < cmin] = None
if cmax is not None:
h[h > cmax] = None
pc = self.pcolormesh(xedges, yedges, h.T, **kwargs)
return h, xedges, yedges, pc
@_preprocess_data(replace_names=["x", "weights"], label_namer="x")
@_docstring.interpd
def ecdf(self, x, weights=None, *, complementary=False,
orientation="vertical", compress=False, **kwargs):
"""
Compute and plot the empirical cumulative distribution function of *x*.
.. versionadded:: 3.8
Parameters
----------
x : 1d array-like
The input data. Infinite entries are kept (and move the relevant
end of the ecdf from 0/1), but NaNs and masked values are errors.
weights : 1d array-like or None, default: None
The weights of the entries; must have the same shape as *x*.
Weights corresponding to NaN data points are dropped, and then the
remaining weights are normalized to sum to 1. If unset, all
entries have the same weight.
complementary : bool, default: False
Whether to plot a cumulative distribution function, which increases
from 0 to 1 (the default), or a complementary cumulative
distribution function, which decreases from 1 to 0.
orientation : {"vertical", "horizontal"}, default: "vertical"
Whether the entries are plotted along the x-axis ("vertical", the
default) or the y-axis ("horizontal"). This parameter takes the
same values as in `~.Axes.hist`.
compress : bool, default: False
Whether multiple entries with the same values are grouped together
(with a summed weight) before plotting. This is mainly useful if
*x* contains many identical data points, to decrease the rendering
complexity of the plot. If *x* contains no duplicate points, this
has no effect and just uses some time and memory.
Other Parameters
----------------
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs
Keyword arguments control the `.Line2D` properties:
%(Line2D:kwdoc)s
Returns
-------
`.Line2D`
Notes
-----
The ecdf plot can be thought of as a cumulative histogram with one bin
per data entry; i.e. it reports on the entire dataset without any
arbitrary binning.
If *x* contains NaNs or masked entries, either remove them first from
the array (if they should not taken into account), or replace them by
-inf or +inf (if they should be sorted at the beginning or the end of
the array).
"""
_api.check_in_list(["horizontal", "vertical"], orientation=orientation)
if "drawstyle" in kwargs or "ds" in kwargs:
raise TypeError("Cannot pass 'drawstyle' or 'ds' to ecdf()")
if np.ma.getmask(x).any():
raise ValueError("ecdf() does not support masked entries")
x = np.asarray(x)
if np.isnan(x).any():
raise ValueError("ecdf() does not support NaNs")
argsort = np.argsort(x)
x = x[argsort]
if weights is None:
# Ensure that we end at exactly 1, avoiding floating point errors.
cum_weights = (1 + np.arange(len(x))) / len(x)
else:
weights = np.take(weights, argsort) # Reorder weights like we reordered x.
cum_weights = np.cumsum(weights / np.sum(weights))
if compress:
# Get indices of unique x values.
compress_idxs = [0, *(x[:-1] != x[1:]).nonzero()[0] + 1]
x = x[compress_idxs]
cum_weights = cum_weights[compress_idxs]
if orientation == "vertical":
if not complementary:
line, = self.plot([x[0], *x], [0, *cum_weights],
drawstyle="steps-post", **kwargs)
else:
line, = self.plot([*x, x[-1]], [1, *1 - cum_weights],
drawstyle="steps-pre", **kwargs)
line.sticky_edges.y[:] = [0, 1]
else: # orientation == "horizontal":
if not complementary:
line, = self.plot([0, *cum_weights], [x[0], *x],
drawstyle="steps-pre", **kwargs)
else:
line, = self.plot([1, *1 - cum_weights], [*x, x[-1]],
drawstyle="steps-post", **kwargs)
line.sticky_edges.x[:] = [0, 1]
return line
@_api.make_keyword_only("3.10", "NFFT")
@_preprocess_data(replace_names=["x"])
@_docstring.interpd
def psd(self, x, NFFT=None, Fs=None, Fc=None, detrend=None,
window=None, noverlap=None, pad_to=None,
sides=None, scale_by_freq=None, return_line=None, **kwargs):
r"""
Plot the power spectral density.
The power spectral density :math:`P_{xx}` by Welch's average
periodogram method. The vector *x* is divided into *NFFT* length
segments. Each segment is detrended by function *detrend* and
windowed by function *window*. *noverlap* gives the length of
the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
of each segment :math:`i` are averaged to compute :math:`P_{xx}`,
with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
Parameters
----------
x : 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(PSD)s
noverlap : int, default: 0 (no overlap)
The number of points of overlap between segments.
Fc : int, default: 0
The center frequency of *x*, which offsets the x extents of the
plot to reflect the frequency range used when a signal is acquired
and then filtered and downsampled to baseband.
return_line : bool, default: False
Whether to include the line object plotted in the returned values.
Returns
-------
Pxx : 1-D array
The values for the power spectrum :math:`P_{xx}` before scaling
(real valued).
freqs : 1-D array
The frequencies corresponding to the elements in *Pxx*.
line : `~matplotlib.lines.Line2D`
The line created by this function.
Only returned if *return_line* is True.
Other Parameters
----------------
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs
Keyword arguments control the `.Line2D` properties:
%(Line2D:kwdoc)s
See Also
--------
specgram
Differs in the default overlap; in not returning the mean of the
segment periodograms; in returning the times of the segments; and
in plotting a colormap instead of a line.
magnitude_spectrum
Plots the magnitude spectrum.
csd
Plots the spectral density between two signals.
Notes
-----
For plotting, the power is plotted as
:math:`10\log_{10}(P_{xx})` for decibels, though *Pxx* itself
is returned.
References
----------
Bendat & Piersol -- Random Data: Analysis and Measurement Procedures,
John Wiley & Sons (1986)
"""
if Fc is None:
Fc = 0
pxx, freqs = mlab.psd(x=x, NFFT=NFFT, Fs=Fs, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq)
freqs += Fc
if scale_by_freq in (None, True):
psd_units = 'dB/Hz'
else:
psd_units = 'dB'
line = self.plot(freqs, 10 * np.log10(pxx), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Power Spectral Density (%s)' % psd_units)
self.grid(True)
vmin, vmax = self.get_ybound()
step = max(10 * int(np.log10(vmax - vmin)), 1)
ticks = np.arange(math.floor(vmin), math.ceil(vmax) + 1, step)
self.set_yticks(ticks)
if return_line is None or not return_line:
return pxx, freqs
else:
return pxx, freqs, line
@_api.make_keyword_only("3.10", "NFFT")
@_preprocess_data(replace_names=["x", "y"], label_namer="y")
@_docstring.interpd
def csd(self, x, y, NFFT=None, Fs=None, Fc=None, detrend=None,
window=None, noverlap=None, pad_to=None,
sides=None, scale_by_freq=None, return_line=None, **kwargs):
r"""
Plot the cross-spectral density.
The cross spectral density :math:`P_{xy}` by Welch's average
periodogram method. The vectors *x* and *y* are divided into
*NFFT* length segments. Each segment is detrended by function
*detrend* and windowed by function *window*. *noverlap* gives
the length of the overlap between segments. The product of
the direct FFTs of *x* and *y* are averaged over each segment
to compute :math:`P_{xy}`, with a scaling to correct for power
loss due to windowing.
If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
padded to *NFFT*.
Parameters
----------
x, y : 1-D arrays or sequences
Arrays or sequences containing the data.
%(Spectral)s
%(PSD)s
noverlap : int, default: 0 (no overlap)
The number of points of overlap between segments.
Fc : int, default: 0
The center frequency of *x*, which offsets the x extents of the
plot to reflect the frequency range used when a signal is acquired
and then filtered and downsampled to baseband.
return_line : bool, default: False
Whether to include the line object plotted in the returned values.
Returns
-------
Pxy : 1-D array
The values for the cross spectrum :math:`P_{xy}` before scaling
(complex valued).
freqs : 1-D array
The frequencies corresponding to the elements in *Pxy*.
line : `~matplotlib.lines.Line2D`
The line created by this function.
Only returned if *return_line* is True.
Other Parameters
----------------
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs
Keyword arguments control the `.Line2D` properties:
%(Line2D:kwdoc)s
See Also
--------
psd : is equivalent to setting ``y = x``.
Notes
-----
For plotting, the power is plotted as
:math:`10 \log_{10}(P_{xy})` for decibels, though :math:`P_{xy}` itself
is returned.
References
----------
Bendat & Piersol -- Random Data: Analysis and Measurement Procedures,
John Wiley & Sons (1986)
"""
if Fc is None:
Fc = 0
pxy, freqs = mlab.csd(x=x, y=y, NFFT=NFFT, Fs=Fs, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq)
# pxy is complex
freqs += Fc
line = self.plot(freqs, 10 * np.log10(np.abs(pxy)), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Cross Spectrum Magnitude (dB)')
self.grid(True)
vmin, vmax = self.get_ybound()
step = max(10 * int(np.log10(vmax - vmin)), 1)
ticks = np.arange(math.floor(vmin), math.ceil(vmax) + 1, step)
self.set_yticks(ticks)
if return_line is None or not return_line:
return pxy, freqs
else:
return pxy, freqs, line
@_api.make_keyword_only("3.10", "Fs")
@_preprocess_data(replace_names=["x"])
@_docstring.interpd
def magnitude_spectrum(self, x, Fs=None, Fc=None, window=None,
pad_to=None, sides=None, scale=None,
**kwargs):
"""
Plot the magnitude spectrum.
Compute the magnitude spectrum of *x*. Data is padded to a
length of *pad_to* and the windowing function *window* is applied to
the signal.
Parameters
----------
x : 1-D array or sequence
Array or sequence containing the data.
%(Spectral)s
%(Single_Spectrum)s
scale : {'default', 'linear', 'dB'}
The scaling of the values in the *spec*. 'linear' is no scaling.
'dB' returns the values in dB scale, i.e., the dB amplitude
(20 * log10). 'default' is 'linear'.
Fc : int, default: 0
The center frequency of *x*, which offsets the x extents of the
plot to reflect the frequency range used when a signal is acquired
and then filtered and downsampled to baseband.
Returns
-------
spectrum : 1-D array
The values for the magnitude spectrum before scaling (real valued).
freqs : 1-D array
The frequencies corresponding to the elements in *spectrum*.
line : `~matplotlib.lines.Line2D`
The line created by this function.
Other Parameters
----------------
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs
Keyword arguments control the `.Line2D` properties:
%(Line2D:kwdoc)s
See Also
--------
psd
Plots the power spectral density.
angle_spectrum
Plots the angles of the corresponding frequencies.
phase_spectrum
Plots the phase (unwrapped angle) of the corresponding frequencies.
specgram
Can plot the magnitude spectrum of segments within the signal in a
colormap.
"""
if Fc is None:
Fc = 0
spec, freqs = mlab.magnitude_spectrum(x=x, Fs=Fs, window=window,
pad_to=pad_to, sides=sides)
freqs += Fc
yunits = _api.check_getitem(
{None: 'energy', 'default': 'energy', 'linear': 'energy',
'dB': 'dB'},
scale=scale)
if yunits == 'energy':
Z = spec
else: # yunits == 'dB'
Z = 20. * np.log10(spec)
line, = self.plot(freqs, Z, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Magnitude (%s)' % yunits)
return spec, freqs, line
@_api.make_keyword_only("3.10", "Fs")
@_preprocess_data(replace_names=["x"])
@_docstring.interpd
def angle_spectrum(self, x, Fs=None, Fc=None, window=None,
pad_to=None, sides=None, **kwargs):
"""
Plot the angle spectrum.
Compute the angle spectrum (wrapped phase spectrum) of *x*.
Data is padded to a length of *pad_to* and the windowing function
*window* is applied to the signal.
Parameters
----------
x : 1-D array or sequence
Array or sequence containing the data.
%(Spectral)s
%(Single_Spectrum)s
Fc : int, default: 0
The center frequency of *x*, which offsets the x extents of the
plot to reflect the frequency range used when a signal is acquired
and then filtered and downsampled to baseband.
Returns
-------
spectrum : 1-D array
The values for the angle spectrum in radians (real valued).
freqs : 1-D array
The frequencies corresponding to the elements in *spectrum*.
line : `~matplotlib.lines.Line2D`
The line created by this function.
Other Parameters
----------------
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs
Keyword arguments control the `.Line2D` properties:
%(Line2D:kwdoc)s
See Also
--------
magnitude_spectrum
Plots the magnitudes of the corresponding frequencies.
phase_spectrum
Plots the unwrapped version of this function.
specgram
Can plot the angle spectrum of segments within the signal in a
colormap.
"""
if Fc is None:
Fc = 0
spec, freqs = mlab.angle_spectrum(x=x, Fs=Fs, window=window,
pad_to=pad_to, sides=sides)
freqs += Fc
lines = self.plot(freqs, spec, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Angle (radians)')
return spec, freqs, lines[0]
@_api.make_keyword_only("3.10", "Fs")
@_preprocess_data(replace_names=["x"])
@_docstring.interpd
def phase_spectrum(self, x, Fs=None, Fc=None, window=None,
pad_to=None, sides=None, **kwargs):
"""
Plot the phase spectrum.
Compute the phase spectrum (unwrapped angle spectrum) of *x*.
Data is padded to a length of *pad_to* and the windowing function
*window* is applied to the signal.
Parameters
----------
x : 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(Single_Spectrum)s
Fc : int, default: 0
The center frequency of *x*, which offsets the x extents of the
plot to reflect the frequency range used when a signal is acquired
and then filtered and downsampled to baseband.
Returns
-------
spectrum : 1-D array
The values for the phase spectrum in radians (real valued).
freqs : 1-D array
The frequencies corresponding to the elements in *spectrum*.
line : `~matplotlib.lines.Line2D`
The line created by this function.
Other Parameters
----------------
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs
Keyword arguments control the `.Line2D` properties:
%(Line2D:kwdoc)s
See Also
--------
magnitude_spectrum
Plots the magnitudes of the corresponding frequencies.
angle_spectrum
Plots the wrapped version of this function.
specgram
Can plot the phase spectrum of segments within the signal in a
colormap.
"""
if Fc is None:
Fc = 0
spec, freqs = mlab.phase_spectrum(x=x, Fs=Fs, window=window,
pad_to=pad_to, sides=sides)
freqs += Fc
lines = self.plot(freqs, spec, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Phase (radians)')
return spec, freqs, lines[0]
@_api.make_keyword_only("3.10", "NFFT")
@_preprocess_data(replace_names=["x", "y"])
@_docstring.interpd
def cohere(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
r"""
Plot the coherence between *x* and *y*.
Coherence is the normalized cross spectral density:
.. math::
C_{xy} = \frac{|P_{xy}|^2}{P_{xx}P_{yy}}
Parameters
----------
%(Spectral)s
%(PSD)s
noverlap : int, default: 0 (no overlap)
The number of points of overlap between blocks.
Fc : int, default: 0
The center frequency of *x*, which offsets the x extents of the
plot to reflect the frequency range used when a signal is acquired
and then filtered and downsampled to baseband.
Returns
-------
Cxy : 1-D array
The coherence vector.
freqs : 1-D array
The frequencies for the elements in *Cxy*.
Other Parameters
----------------
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs
Keyword arguments control the `.Line2D` properties:
%(Line2D:kwdoc)s
References
----------
Bendat & Piersol -- Random Data: Analysis and Measurement Procedures,
John Wiley & Sons (1986)
"""
cxy, freqs = mlab.cohere(x=x, y=y, NFFT=NFFT, Fs=Fs, detrend=detrend,
window=window, noverlap=noverlap,
scale_by_freq=scale_by_freq, sides=sides,
pad_to=pad_to)
freqs += Fc
self.plot(freqs, cxy, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Coherence')
self.grid(True)
return cxy, freqs
@_api.make_keyword_only("3.10", "NFFT")
@_preprocess_data(replace_names=["x"])
@_docstring.interpd
def specgram(self, x, NFFT=None, Fs=None, Fc=None, detrend=None,
window=None, noverlap=None,
cmap=None, xextent=None, pad_to=None, sides=None,
scale_by_freq=None, mode=None, scale=None,
vmin=None, vmax=None, **kwargs):
"""
Plot a spectrogram.
Compute and plot a spectrogram of data in *x*. Data are split into
*NFFT* length segments and the spectrum of each section is
computed. The windowing function *window* is applied to each
segment, and the amount of overlap of each segment is
specified with *noverlap*. The spectrogram is plotted as a colormap
(using imshow).
Parameters
----------
x : 1-D array or sequence
Array or sequence containing the data.
%(Spectral)s
%(PSD)s
mode : {'default', 'psd', 'magnitude', 'angle', 'phase'}
What sort of spectrum to use. Default is 'psd', which takes the
power spectral density. 'magnitude' returns the magnitude
spectrum. 'angle' returns the phase spectrum without unwrapping.
'phase' returns the phase spectrum with unwrapping.
noverlap : int, default: 128
The number of points of overlap between blocks.
scale : {'default', 'linear', 'dB'}
The scaling of the values in the *spec*. 'linear' is no scaling.
'dB' returns the values in dB scale. When *mode* is 'psd',
this is dB power (10 * log10). Otherwise, this is dB amplitude
(20 * log10). 'default' is 'dB' if *mode* is 'psd' or
'magnitude' and 'linear' otherwise. This must be 'linear'
if *mode* is 'angle' or 'phase'.
Fc : int, default: 0
The center frequency of *x*, which offsets the x extents of the
plot to reflect the frequency range used when a signal is acquired
and then filtered and downsampled to baseband.
cmap : `.Colormap`, default: :rc:`image.cmap`
xextent : *None* or (xmin, xmax)
The image extent along the x-axis. The default sets *xmin* to the
left border of the first bin (*spectrum* column) and *xmax* to the
right border of the last bin. Note that for *noverlap>0* the width
of the bins is smaller than those of the segments.
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
vmin, vmax : float, optional
vmin and vmax define the data range that the colormap covers.
By default, the colormap covers the complete value range of the
data.
**kwargs
Additional keyword arguments are passed on to `~.axes.Axes.imshow`
which makes the specgram image. The origin keyword argument
is not supported.
Returns
-------
spectrum : 2D array
Columns are the periodograms of successive segments.
freqs : 1-D array
The frequencies corresponding to the rows in *spectrum*.
t : 1-D array
The times corresponding to midpoints of segments (i.e., the columns
in *spectrum*).
im : `.AxesImage`
The image created by imshow containing the spectrogram.
See Also
--------
psd
Differs in the default overlap; in returning the mean of the
segment periodograms; in not returning times; and in generating a
line plot instead of colormap.
magnitude_spectrum
A single spectrum, similar to having a single segment when *mode*
is 'magnitude'. Plots a line instead of a colormap.
angle_spectrum
A single spectrum, similar to having a single segment when *mode*
is 'angle'. Plots a line instead of a colormap.
phase_spectrum
A single spectrum, similar to having a single segment when *mode*
is 'phase'. Plots a line instead of a colormap.
Notes
-----
The parameters *detrend* and *scale_by_freq* do only apply when *mode*
is set to 'psd'.
"""
if NFFT is None:
NFFT = 256 # same default as in mlab.specgram()
if Fc is None:
Fc = 0 # same default as in mlab._spectral_helper()
if noverlap is None:
noverlap = 128 # same default as in mlab.specgram()
if Fs is None:
Fs = 2 # same default as in mlab._spectral_helper()
if mode == 'complex':
raise ValueError('Cannot plot a complex specgram')
if scale is None or scale == 'default':
if mode in ['angle', 'phase']:
scale = 'linear'
else:
scale = 'dB'
elif mode in ['angle', 'phase'] and scale == 'dB':
raise ValueError('Cannot use dB scale with angle or phase mode')
spec, freqs, t = mlab.specgram(x=x, NFFT=NFFT, Fs=Fs,
detrend=detrend, window=window,
noverlap=noverlap, pad_to=pad_to,
sides=sides,
scale_by_freq=scale_by_freq,
mode=mode)
if scale == 'linear':
Z = spec
elif scale == 'dB':
if mode is None or mode == 'default' or mode == 'psd':
Z = 10. * np.log10(spec)
else:
Z = 20. * np.log10(spec)
else:
raise ValueError(f'Unknown scale {scale!r}')
Z = np.flipud(Z)
if xextent is None:
# padding is needed for first and last segment:
pad_xextent = (NFFT-noverlap) / Fs / 2
xextent = np.min(t) - pad_xextent, np.max(t) + pad_xextent
xmin, xmax = xextent
freqs += Fc
extent = xmin, xmax, freqs[0], freqs[-1]
if 'origin' in kwargs:
raise _api.kwarg_error("specgram", "origin")
im = self.imshow(Z, cmap, extent=extent, vmin=vmin, vmax=vmax,
origin='upper', **kwargs)
self.axis('auto')
return spec, freqs, t, im
@_api.make_keyword_only("3.10", "precision")
@_docstring.interpd
def spy(self, Z, precision=0, marker=None, markersize=None,
aspect='equal', origin="upper", **kwargs):
"""
Plot the sparsity pattern of a 2D array.
This visualizes the non-zero values of the array.
Two plotting styles are available: image and marker. Both
are available for full arrays, but only the marker style
works for `scipy.sparse.spmatrix` instances.
**Image style**
If *marker* and *markersize* are *None*, `~.Axes.imshow` is used. Any
extra remaining keyword arguments are passed to this method.
**Marker style**
If *Z* is a `scipy.sparse.spmatrix` or *marker* or *markersize* are
*None*, a `.Line2D` object will be returned with the value of marker
determining the marker type, and any remaining keyword arguments
passed to `~.Axes.plot`.
Parameters
----------
Z : (M, N) array-like
The array to be plotted.
precision : float or 'present', default: 0
If *precision* is 0, any non-zero value will be plotted. Otherwise,
values of :math:`|Z| > precision` will be plotted.
For `scipy.sparse.spmatrix` instances, you can also
pass 'present'. In this case any value present in the array
will be plotted, even if it is identically zero.
aspect : {'equal', 'auto', None} or float, default: 'equal'
The aspect ratio of the Axes. This parameter is particularly
relevant for images since it determines whether data pixels are
square.
This parameter is a shortcut for explicitly calling
`.Axes.set_aspect`. See there for further details.
- 'equal': Ensures an aspect ratio of 1. Pixels will be square.
- 'auto': The Axes is kept fixed and the aspect is adjusted so
that the data fit in the Axes. In general, this will result in
non-square pixels.
- *None*: Use :rc:`image.aspect`.
origin : {'upper', 'lower'}, default: :rc:`image.origin`
Place the [0, 0] index of the array in the upper left or lower left
corner of the Axes. The convention 'upper' is typically used for
matrices and images.
Returns
-------
`~matplotlib.image.AxesImage` or `.Line2D`
The return type depends on the plotting style (see above).
Other Parameters
----------------
**kwargs
The supported additional parameters depend on the plotting style.
For the image style, you can pass the following additional
parameters of `~.Axes.imshow`:
- *cmap*
- *alpha*
- *url*
- any `.Artist` properties (passed on to the `.AxesImage`)
For the marker style, you can pass any `.Line2D` property except
for *linestyle*:
%(Line2D:kwdoc)s
"""
if marker is None and markersize is None and hasattr(Z, 'tocoo'):
marker = 's'
_api.check_in_list(["upper", "lower"], origin=origin)
if marker is None and markersize is None:
Z = np.asarray(Z)
mask = np.abs(Z) > precision
if 'cmap' not in kwargs:
kwargs['cmap'] = mcolors.ListedColormap(['w', 'k'],
name='binary')
if 'interpolation' in kwargs:
raise _api.kwarg_error("spy", "interpolation")
if 'norm' not in kwargs:
kwargs['norm'] = mcolors.NoNorm()
ret = self.imshow(mask, interpolation='nearest',
aspect=aspect, origin=origin,
**kwargs)
else:
if hasattr(Z, 'tocoo'):
c = Z.tocoo()
if precision == 'present':
y = c.row
x = c.col
else:
nonzero = np.abs(c.data) > precision
y = c.row[nonzero]
x = c.col[nonzero]
else:
Z = np.asarray(Z)
nonzero = np.abs(Z) > precision
y, x = np.nonzero(nonzero)
if marker is None:
marker = 's'
if markersize is None:
markersize = 10
if 'linestyle' in kwargs:
raise _api.kwarg_error("spy", "linestyle")
ret = mlines.Line2D(
x, y, linestyle='None', marker=marker, markersize=markersize,
**kwargs)
self.add_line(ret)
nr, nc = Z.shape
self.set_xlim(-0.5, nc - 0.5)
if origin == "upper":
self.set_ylim(nr - 0.5, -0.5)
else:
self.set_ylim(-0.5, nr - 0.5)
self.set_aspect(aspect)
self.title.set_y(1.05)
if origin == "upper":
self.xaxis.tick_top()
else: # lower
self.xaxis.tick_bottom()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(
mticker.MaxNLocator(nbins=9, steps=[1, 2, 5, 10], integer=True))
self.yaxis.set_major_locator(
mticker.MaxNLocator(nbins=9, steps=[1, 2, 5, 10], integer=True))
return ret
def matshow(self, Z, **kwargs):
"""
Plot the values of a 2D matrix or array as color-coded image.
The matrix will be shown the way it would be printed, with the first
row at the top. Row and column numbering is zero-based.
Parameters
----------
Z : (M, N) array-like
The matrix to be displayed.
Returns
-------
`~matplotlib.image.AxesImage`
Other Parameters
----------------
**kwargs : `~matplotlib.axes.Axes.imshow` arguments
See Also
--------
imshow : More general function to plot data on a 2D regular raster.
Notes
-----
This is just a convenience function wrapping `.imshow` to set useful
defaults for displaying a matrix. In particular:
- Set ``origin='upper'``.
- Set ``interpolation='nearest'``.
- Set ``aspect='equal'``.
- Ticks are placed to the left and above.
- Ticks are formatted to show integer indices.
"""
Z = np.asanyarray(Z)
kw = {'origin': 'upper',
'interpolation': 'nearest',
'aspect': 'equal', # (already the imshow default)
**kwargs}
im = self.imshow(Z, **kw)
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(
mticker.MaxNLocator(nbins=9, steps=[1, 2, 5, 10], integer=True))
self.yaxis.set_major_locator(
mticker.MaxNLocator(nbins=9, steps=[1, 2, 5, 10], integer=True))
return im
@_api.make_keyword_only("3.10", "vert")
@_preprocess_data(replace_names=["dataset"])
def violinplot(self, dataset, positions=None, vert=None,
orientation='vertical', widths=0.5, showmeans=False,
showextrema=True, showmedians=False, quantiles=None,
points=100, bw_method=None, side='both',
facecolor=None, linecolor=None):
"""
Make a violin plot.
Make a violin plot for each column of *dataset* or each vector in
sequence *dataset*. Each filled area extends to represent the
entire data range, with optional lines at the mean, the median,
the minimum, the maximum, and user-specified quantiles.
Parameters
----------
dataset : Array or a sequence of vectors.
The input data.
positions : array-like, default: [1, 2, ..., n]
The positions of the violins; i.e. coordinates on the x-axis for
vertical violins (or y-axis for horizontal violins).
vert : bool, optional
.. deprecated:: 3.10
Use *orientation* instead.
If this is given during the deprecation period, it overrides
the *orientation* parameter.
If True, plots the violins vertically.
If False, plots the violins horizontally.
orientation : {'vertical', 'horizontal'}, default: 'vertical'
If 'horizontal', plots the violins horizontally.
Otherwise, plots the violins vertically.
.. versionadded:: 3.10
widths : float or array-like, default: 0.5
The maximum width of each violin in units of the *positions* axis.
The default is 0.5, which is half the available space when using default
*positions*.
showmeans : bool, default: False
Whether to show the mean with a line.
showextrema : bool, default: True
Whether to show extrema with a line.
showmedians : bool, default: False
Whether to show the median with a line.
quantiles : array-like, default: None
If not None, set a list of floats in interval [0, 1] for each violin,
which stands for the quantiles that will be rendered for that
violin.
points : int, default: 100
The number of points to evaluate each of the gaussian kernel density
estimations at.
bw_method : {'scott', 'silverman'} or float or callable, default: 'scott'
The method used to calculate the estimator bandwidth. If a
float, this will be used directly as `!kde.factor`. If a
callable, it should take a `matplotlib.mlab.GaussianKDE` instance as
its only parameter and return a float.
side : {'both', 'low', 'high'}, default: 'both'
'both' plots standard violins. 'low'/'high' only
plots the side below/above the positions value.
facecolor : :mpltype:`color` or list of :mpltype:`color`, optional
If provided, will set the face color(s) of the violins.
.. versionadded:: 3.11
linecolor : :mpltype:`color` or list of :mpltype:`color`, optional
If provided, will set the line color(s) of the violins (the
horizontal and vertical spines and body edges).
.. versionadded:: 3.11
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
Returns
-------
dict
A dictionary mapping each component of the violinplot to a
list of the corresponding collection instances created. The
dictionary has the following keys:
- ``bodies``: A list of the `~.collections.PolyCollection`
instances containing the filled area of each violin.
- ``cmeans``: A `~.collections.LineCollection` instance that marks
the mean values of each of the violin's distribution.
- ``cmins``: A `~.collections.LineCollection` instance that marks
the bottom of each violin's distribution.
- ``cmaxes``: A `~.collections.LineCollection` instance that marks
the top of each violin's distribution.
- ``cbars``: A `~.collections.LineCollection` instance that marks
the centers of each violin's distribution.
- ``cmedians``: A `~.collections.LineCollection` instance that
marks the median values of each of the violin's distribution.
- ``cquantiles``: A `~.collections.LineCollection` instance created
to identify the quantile values of each of the violin's
distribution.
See Also
--------
.Axes.violin : Draw a violin from pre-computed statistics.
boxplot : Draw a box and whisker plot.
"""
vpstats = cbook.violin_stats(dataset, ("GaussianKDE", bw_method),
points=points, quantiles=quantiles)
return self.violin(vpstats, positions=positions, vert=vert,
orientation=orientation, widths=widths,
showmeans=showmeans, showextrema=showextrema,
showmedians=showmedians, side=side,
facecolor=facecolor, linecolor=linecolor)
@_api.make_keyword_only("3.10", "vert")
def violin(self, vpstats, positions=None, vert=None,
orientation='vertical', widths=0.5, showmeans=False,
showextrema=True, showmedians=False, side='both',
facecolor=None, linecolor=None):
"""
Draw a violin plot from pre-computed statistics.
Draw a violin plot for each column of *vpstats*. Each filled area
extends to represent the entire data range, with optional lines at the
mean, the median, the minimum, the maximum, and the quantiles values.
Parameters
----------
vpstats : list of dicts
A list of dictionaries containing stats for each violin plot.
Required keys are:
- ``coords``: A list of scalars containing the coordinates that
the violin's kernel density estimate were evaluated at.
- ``vals``: A list of scalars containing the values of the
kernel density estimate at each of the coordinates given
in *coords*.
- ``mean``: The mean value for this violin's dataset.
- ``median``: The median value for this violin's dataset.
- ``min``: The minimum value for this violin's dataset.
- ``max``: The maximum value for this violin's dataset.
Optional keys are:
- ``quantiles``: A list of scalars containing the quantile values
for this violin's dataset.
positions : array-like, default: [1, 2, ..., n]
The positions of the violins; i.e. coordinates on the x-axis for
vertical violins (or y-axis for horizontal violins).
vert : bool, optional
.. deprecated:: 3.10
Use *orientation* instead.
If this is given during the deprecation period, it overrides
the *orientation* parameter.
If True, plots the violins vertically.
If False, plots the violins horizontally.
orientation : {'vertical', 'horizontal'}, default: 'vertical'
If 'horizontal', plots the violins horizontally.
Otherwise, plots the violins vertically.
.. versionadded:: 3.10
widths : float or array-like, default: 0.5
The maximum width of each violin in units of the *positions* axis.
The default is 0.5, which is half available space when using default
*positions*.
showmeans : bool, default: False
Whether to show the mean with a line.
showextrema : bool, default: True
Whether to show extrema with a line.
showmedians : bool, default: False
Whether to show the median with a line.
side : {'both', 'low', 'high'}, default: 'both'
'both' plots standard violins. 'low'/'high' only
plots the side below/above the positions value.
facecolor : :mpltype:`color` or list of :mpltype:`color`, optional
If provided, will set the face color(s) of the violins.
.. versionadded:: 3.11
For backward compatibility, if *facecolor* is not given, the body
will get an Artist-level transparency `alpha <.Artist.set_alpha>`
of 0.3, which will persist if you afterwards change the facecolor,
e.g. via ``result['bodies'][0].set_facecolor('red')``.
If *facecolor* is given, there is no Artist-level transparency.
To set transparency for *facecolor* or *edgecolor* use
``(color, alpha)`` tuples.
linecolor : :mpltype:`color` or list of :mpltype:`color`, optional
If provided, will set the line color(s) of the violins (the
horizontal and vertical spines and body edges).
.. versionadded:: 3.11
Returns
-------
dict
A dictionary mapping each component of the violinplot to a
list of the corresponding collection instances created. The
dictionary has the following keys:
- ``bodies``: A list of the `~.collections.PolyCollection`
instances containing the filled area of each violin.
- ``cmeans``: A `~.collections.LineCollection` instance that marks
the mean values of each of the violin's distribution.
- ``cmins``: A `~.collections.LineCollection` instance that marks
the bottom of each violin's distribution.
- ``cmaxes``: A `~.collections.LineCollection` instance that marks
the top of each violin's distribution.
- ``cbars``: A `~.collections.LineCollection` instance that marks
the centers of each violin's distribution.
- ``cmedians``: A `~.collections.LineCollection` instance that
marks the median values of each of the violin's distribution.
- ``cquantiles``: A `~.collections.LineCollection` instance created
to identify the quantiles values of each of the violin's
distribution.
See Also
--------
violinplot :
Draw a violin plot from data instead of pre-computed statistics.
.cbook.violin_stats:
Calculate a *vpstats* dictionary from data, suitable for passing to violin.
"""
# Statistical quantities to be plotted on the violins
means = []
mins = []
maxes = []
medians = []
quantiles = []
qlens = [] # Number of quantiles in each dataset.
artists = {} # Collections to be returned
N = len(vpstats)
datashape_message = ("List of violinplot statistics and `{0}` "
"values must have the same length")
# vert and orientation parameters are linked until vert's
# deprecation period expires. If both are selected,
# vert takes precedence.
if vert is not None:
_api.warn_deprecated(
"3.11",
name="vert: bool",
alternative="orientation: {'vertical', 'horizontal'}",
)
orientation = 'vertical' if vert else 'horizontal'
_api.check_in_list(['horizontal', 'vertical'], orientation=orientation)
# Validate positions
if positions is None:
positions = range(1, N + 1)
elif len(positions) != N:
raise ValueError(datashape_message.format("positions"))
# Validate widths
if np.isscalar(widths):
widths = [widths] * N
elif len(widths) != N:
raise ValueError(datashape_message.format("widths"))
# Validate side
_api.check_in_list(["both", "low", "high"], side=side)
# Calculate ranges for statistics lines (shape (2, N)).
line_ends = [[-0.25 if side in ['both', 'low'] else 0],
[0.25 if side in ['both', 'high'] else 0]] \
* np.array(widths) + positions
# Make a cycle of color to iterate through, using 'none' as fallback
def cycle_color(color, alpha=None):
rgba = mcolors.to_rgba_array(color, alpha=alpha)
color_cycler = itertools.chain(itertools.cycle(rgba),
itertools.repeat('none'))
color_list = []
for _ in range(N):
color_list.append(next(color_cycler))
return color_list
# Convert colors to chain (number of colors can be different from len(vpstats))
if facecolor is None or linecolor is None:
if not mpl.rcParams['_internal.classic_mode']:
next_color = self._get_lines.get_next_color()
if facecolor is not None:
facecolor = cycle_color(facecolor)
body_artist_alpha = None
else:
body_artist_alpha = 0.3
# Use default colors if user doesn't provide them
if mpl.rcParams['_internal.classic_mode']:
facecolor = cycle_color('y')
else:
facecolor = cycle_color(next_color)
if mpl.rcParams['_internal.classic_mode']:
# Classic mode uses patch.force_edgecolor=True, so we need to
# set the edgecolor to make sure it has an alpha.
body_edgecolor = ("k", 0.3)
else:
body_edgecolor = None
if linecolor is not None:
linecolor = cycle_color(linecolor)
else:
if mpl.rcParams['_internal.classic_mode']:
linecolor = cycle_color('r')
else:
linecolor = cycle_color(next_color)
# Check whether we are rendering vertically or horizontally
if orientation == 'vertical':
fill = self.fill_betweenx
if side in ['low', 'high']:
perp_lines = functools.partial(self.hlines, colors=linecolor,
capstyle='projecting')
par_lines = functools.partial(self.vlines, colors=linecolor,
capstyle='projecting')
else:
perp_lines = functools.partial(self.hlines, colors=linecolor)
par_lines = functools.partial(self.vlines, colors=linecolor)
else:
fill = self.fill_between
if side in ['low', 'high']:
perp_lines = functools.partial(self.vlines, colors=linecolor,
capstyle='projecting')
par_lines = functools.partial(self.hlines, colors=linecolor,
capstyle='projecting')
else:
perp_lines = functools.partial(self.vlines, colors=linecolor)
par_lines = functools.partial(self.hlines, colors=linecolor)
# Render violins
bodies = []
bodies_zip = zip(vpstats, positions, widths, facecolor)
for stats, pos, width, facecolor in bodies_zip:
# The 0.5 factor reflects the fact that we plot from v-p to v+p.
vals = np.array(stats['vals'])
vals = 0.5 * width * vals / vals.max()
bodies += [fill(stats['coords'],
-vals + pos if side in ['both', 'low'] else pos,
vals + pos if side in ['both', 'high'] else pos,
facecolor=facecolor, edgecolor=body_edgecolor,
alpha=body_artist_alpha)]
means.append(stats['mean'])
mins.append(stats['min'])
maxes.append(stats['max'])
medians.append(stats['median'])
q = stats.get('quantiles') # a list of floats, or None
if q is None:
q = []
quantiles.extend(q)
qlens.append(len(q))
artists['bodies'] = bodies
if showmeans: # Render means
artists['cmeans'] = perp_lines(means, *line_ends)
if showextrema: # Render extrema
artists['cmaxes'] = perp_lines(maxes, *line_ends)
artists['cmins'] = perp_lines(mins, *line_ends)
artists['cbars'] = par_lines(positions, mins, maxes)
if showmedians: # Render medians
artists['cmedians'] = perp_lines(medians, *line_ends)
if quantiles: # Render quantiles: each width is repeated qlen times.
artists['cquantiles'] = perp_lines(
quantiles, *np.repeat(line_ends, qlens, axis=1))
return artists
# Methods that are entirely implemented in other modules.
table = _make_axes_method(mtable.table)
# args can be either Y or y1, y2, ... and all should be replaced
stackplot = _preprocess_data()(_make_axes_method(mstack.stackplot))
streamplot = _preprocess_data(
replace_names=["x", "y", "u", "v", "start_points"])(
_make_axes_method(mstream.streamplot))
tricontour = _make_axes_method(mtri.tricontour)
tricontourf = _make_axes_method(mtri.tricontourf)
tripcolor = _make_axes_method(mtri.tripcolor)
triplot = _make_axes_method(mtri.triplot)
def _get_aspect_ratio(self):
"""
Convenience method to calculate the aspect ratio of the Axes in
the display coordinate system.
"""
figure_size = self.get_figure().get_size_inches()
ll, ur = self.get_position() * figure_size
width, height = ur - ll
return height / (width * self.get_data_ratio())
|
Axes
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 954349,
"end": 954761
}
|
class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("ReviewDismissalAllowance", graphql_name="node")
"""The item at the end of the edge."""
|
ReviewDismissalAllowanceEdge
|
python
|
pytorch__pytorch
|
torch/distributions/binomial.py
|
{
"start": 488,
"end": 6484
}
|
class ____(Distribution):
r"""
Creates a Binomial distribution parameterized by :attr:`total_count` and
either :attr:`probs` or :attr:`logits` (but not both). :attr:`total_count` must be
broadcastable with :attr:`probs`/:attr:`logits`.
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> m = Binomial(100, torch.tensor([0 , .2, .8, 1]))
>>> x = m.sample()
tensor([ 0., 22., 71., 100.])
>>> m = Binomial(torch.tensor([[5.], [10.]]), torch.tensor([0.5, 0.8]))
>>> x = m.sample()
tensor([[ 4., 5.],
[ 7., 6.]])
Args:
total_count (int or Tensor): number of Bernoulli trials
probs (Tensor): Event probabilities
logits (Tensor): Event log-odds
"""
# pyrefly: ignore [bad-override]
arg_constraints = {
"total_count": constraints.nonnegative_integer,
"probs": constraints.unit_interval,
"logits": constraints.real,
}
has_enumerate_support = True
def __init__(
self,
total_count: Union[Tensor, int] = 1,
probs: Optional[Tensor] = None,
logits: Optional[Tensor] = None,
validate_args: Optional[bool] = None,
) -> None:
if (probs is None) == (logits is None):
raise ValueError(
"Either `probs` or `logits` must be specified, but not both."
)
if probs is not None:
(
self.total_count,
# pyrefly: ignore [read-only]
self.probs,
) = broadcast_all(total_count, probs)
self.total_count = self.total_count.type_as(self.probs)
else:
assert logits is not None # helps mypy
(
self.total_count,
# pyrefly: ignore [read-only]
self.logits,
) = broadcast_all(total_count, logits)
self.total_count = self.total_count.type_as(self.logits)
self._param = self.probs if probs is not None else self.logits
batch_shape = self._param.size()
super().__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Binomial, _instance)
batch_shape = torch.Size(batch_shape)
new.total_count = self.total_count.expand(batch_shape)
if "probs" in self.__dict__:
new.probs = self.probs.expand(batch_shape)
new._param = new.probs
if "logits" in self.__dict__:
new.logits = self.logits.expand(batch_shape)
new._param = new.logits
super(Binomial, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def _new(self, *args, **kwargs):
return self._param.new(*args, **kwargs)
@constraints.dependent_property(is_discrete=True, event_dim=0)
# pyrefly: ignore [bad-override]
def support(self):
return constraints.integer_interval(0, self.total_count)
@property
def mean(self) -> Tensor:
return self.total_count * self.probs
@property
def mode(self) -> Tensor:
return ((self.total_count + 1) * self.probs).floor().clamp(max=self.total_count)
@property
def variance(self) -> Tensor:
return self.total_count * self.probs * (1 - self.probs)
@lazy_property
def logits(self) -> Tensor:
return probs_to_logits(self.probs, is_binary=True)
@lazy_property
def probs(self) -> Tensor:
return logits_to_probs(self.logits, is_binary=True)
@property
def param_shape(self) -> torch.Size:
return self._param.size()
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
with torch.no_grad():
return torch.binomial(
self.total_count.expand(shape), self.probs.expand(shape)
)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
log_factorial_n = torch.lgamma(self.total_count + 1)
log_factorial_k = torch.lgamma(value + 1)
log_factorial_nmk = torch.lgamma(self.total_count - value + 1)
# k * log(p) + (n - k) * log(1 - p) = k * (log(p) - log(1 - p)) + n * log(1 - p)
# (case logit < 0) = k * logit - n * log1p(e^logit)
# (case logit > 0) = k * logit - n * (log(p) - log(1 - p)) + n * log(p)
# = k * logit - n * logit - n * log1p(e^-logit)
# (merge two cases) = k * logit - n * max(logit, 0) - n * log1p(e^-|logit|)
normalize_term = (
self.total_count * _clamp_by_zero(self.logits)
+ self.total_count * torch.log1p(torch.exp(-torch.abs(self.logits)))
- log_factorial_n
)
return (
value * self.logits - log_factorial_k - log_factorial_nmk - normalize_term
)
def entropy(self):
total_count = int(self.total_count.max())
if not self.total_count.min() == total_count:
raise NotImplementedError(
"Inhomogeneous total count not supported by `entropy`."
)
log_prob = self.log_prob(self.enumerate_support(False))
return -(torch.exp(log_prob) * log_prob).sum(0)
def enumerate_support(self, expand=True):
total_count = int(self.total_count.max())
if not self.total_count.min() == total_count:
raise NotImplementedError(
"Inhomogeneous total count not supported by `enumerate_support`."
)
values = torch.arange(
1 + total_count, dtype=self._param.dtype, device=self._param.device
)
values = values.view((-1,) + (1,) * len(self._batch_shape))
if expand:
values = values.expand((-1,) + self._batch_shape)
return values
|
Binomial
|
python
|
langchain-ai__langchain
|
libs/langchain_v1/tests/unit_tests/agents/test_responses_spec.py
|
{
"start": 498,
"end": 712
}
|
class ____(BaseSchema):
prompt: str
tools_with_expected_calls: ToolCalls
expected_last_message: str
expected_structured_response: dict[str, Any] | None
llm_request_count: int
|
AssertionByInvocation
|
python
|
doocs__leetcode
|
solution/1200-1299/1295.Find Numbers with Even Number of Digits/Solution.py
|
{
"start": 0,
"end": 122
}
|
class ____:
def findNumbers(self, nums: List[int]) -> int:
return sum(len(str(x)) % 2 == 0 for x in nums)
|
Solution
|
python
|
cookiecutter__cookiecutter
|
cookiecutter/exceptions.py
|
{
"start": 3730,
"end": 3899
}
|
class ____(CookiecutterException):
"""
Exception for missing repo.
Raised when the specified cookiecutter repository doesn't exist.
"""
|
RepositoryNotFound
|
python
|
ApeWorX__ape
|
tests/functional/utils/test_basemodel.py
|
{
"start": 291,
"end": 1683
}
|
class ____(ManagerAccessMixin):
pass
@pytest.mark.parametrize("accessor", (CustomClass, CustomClass()))
def test_provider(accessor, eth_tester_provider):
assert accessor.provider == eth_tester_provider
@pytest.mark.parametrize("accessor", (CustomClass, CustomClass()))
def test_provider_not_active(networks, accessor):
initial = networks.active_provider
networks.active_provider = None
try:
with pytest.raises(ProviderNotConnectedError):
_ = accessor.provider
finally:
networks.active_provider = initial
def test_only_raise_attribute_error(mocker, ape_caplog):
spy = mocker.spy(logger, "log_debug_stack_trace")
@only_raise_attribute_error
def fn():
raise ValueError("foo bar error")
with pytest.raises(AttributeError, match="foo bar error"):
fn()
assert spy.call_count
def test_only_raise_attribute_error_when_already_raises(mocker, ape_caplog):
spy = mocker.spy(logger, "log_debug_stack_trace")
@only_raise_attribute_error
def fn():
raise AttributeError("foo bar error")
with pytest.raises(AttributeError, match="foo bar error"):
fn()
# Does not log because is already an attr err
assert not spy.call_count
def test_dependency_manager():
actual = ManagerAccessMixin.dependency_manager
assert isinstance(actual, DependencyManager)
|
CustomClass
|
python
|
huggingface__transformers
|
src/transformers/models/cvt/configuration_cvt.py
|
{
"start": 780,
"end": 6684
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`CvtModel`]. It is used to instantiate a CvT model
according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the CvT
[microsoft/cvt-13](https://huggingface.co/microsoft/cvt-13) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
patch_sizes (`list[int]`, *optional*, defaults to `[7, 3, 3]`):
The kernel size of each encoder's patch embedding.
patch_stride (`list[int]`, *optional*, defaults to `[4, 2, 2]`):
The stride size of each encoder's patch embedding.
patch_padding (`list[int]`, *optional*, defaults to `[2, 1, 1]`):
The padding size of each encoder's patch embedding.
embed_dim (`list[int]`, *optional*, defaults to `[64, 192, 384]`):
Dimension of each of the encoder blocks.
num_heads (`list[int]`, *optional*, defaults to `[1, 3, 6]`):
Number of attention heads for each attention layer in each block of the Transformer encoder.
depth (`list[int]`, *optional*, defaults to `[1, 2, 10]`):
The number of layers in each encoder block.
mlp_ratios (`list[float]`, *optional*, defaults to `[4.0, 4.0, 4.0, 4.0]`):
Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the
encoder blocks.
attention_drop_rate (`list[float]`, *optional*, defaults to `[0.0, 0.0, 0.0]`):
The dropout ratio for the attention probabilities.
drop_rate (`list[float]`, *optional*, defaults to `[0.0, 0.0, 0.0]`):
The dropout ratio for the patch embeddings probabilities.
drop_path_rate (`list[float]`, *optional*, defaults to `[0.0, 0.0, 0.1]`):
The dropout probability for stochastic depth, used in the blocks of the Transformer encoder.
qkv_bias (`list[bool]`, *optional*, defaults to `[True, True, True]`):
The bias bool for query, key and value in attentions
cls_token (`list[bool]`, *optional*, defaults to `[False, False, True]`):
Whether or not to add a classification token to the output of each of the last 3 stages.
qkv_projection_method (`list[string]`, *optional*, defaults to ["dw_bn", "dw_bn", "dw_bn"]`):
The projection method for query, key and value Default is depth-wise convolutions with batch norm. For
Linear projection use "avg".
kernel_qkv (`list[int]`, *optional*, defaults to `[3, 3, 3]`):
The kernel size for query, key and value in attention layer
padding_kv (`list[int]`, *optional*, defaults to `[1, 1, 1]`):
The padding size for key and value in attention layer
stride_kv (`list[int]`, *optional*, defaults to `[2, 2, 2]`):
The stride size for key and value in attention layer
padding_q (`list[int]`, *optional*, defaults to `[1, 1, 1]`):
The padding size for query in attention layer
stride_q (`list[int]`, *optional*, defaults to `[1, 1, 1]`):
The stride size for query in attention layer
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-6):
The epsilon used by the layer normalization layers.
Example:
```python
>>> from transformers import CvtConfig, CvtModel
>>> # Initializing a Cvt msft/cvt style configuration
>>> configuration = CvtConfig()
>>> # Initializing a model (with random weights) from the msft/cvt style configuration
>>> model = CvtModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "cvt"
def __init__(
self,
num_channels=3,
patch_sizes=[7, 3, 3],
patch_stride=[4, 2, 2],
patch_padding=[2, 1, 1],
embed_dim=[64, 192, 384],
num_heads=[1, 3, 6],
depth=[1, 2, 10],
mlp_ratio=[4.0, 4.0, 4.0],
attention_drop_rate=[0.0, 0.0, 0.0],
drop_rate=[0.0, 0.0, 0.0],
drop_path_rate=[0.0, 0.0, 0.1],
qkv_bias=[True, True, True],
cls_token=[False, False, True],
qkv_projection_method=["dw_bn", "dw_bn", "dw_bn"],
kernel_qkv=[3, 3, 3],
padding_kv=[1, 1, 1],
stride_kv=[2, 2, 2],
padding_q=[1, 1, 1],
stride_q=[1, 1, 1],
initializer_range=0.02,
layer_norm_eps=1e-12,
**kwargs,
):
super().__init__(**kwargs)
self.num_channels = num_channels
self.patch_sizes = patch_sizes
self.patch_stride = patch_stride
self.patch_padding = patch_padding
self.embed_dim = embed_dim
self.num_heads = num_heads
self.depth = depth
self.mlp_ratio = mlp_ratio
self.attention_drop_rate = attention_drop_rate
self.drop_rate = drop_rate
self.drop_path_rate = drop_path_rate
self.qkv_bias = qkv_bias
self.cls_token = cls_token
self.qkv_projection_method = qkv_projection_method
self.kernel_qkv = kernel_qkv
self.padding_kv = padding_kv
self.stride_kv = stride_kv
self.padding_q = padding_q
self.stride_q = stride_q
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
__all__ = ["CvtConfig"]
|
CvtConfig
|
python
|
ray-project__ray
|
python/ray/serve/_private/replica.py
|
{
"start": 47962,
"end": 56242
}
|
class ____:
"""Actor definition for replicas of Ray Serve deployments.
This class defines the interface that the controller and deployment handles
(i.e., from proxies and other replicas) use to interact with a replica.
All interaction with the user-provided callable is done via the
`UserCallableWrapper` class.
"""
async def __init__(
self,
replica_id: ReplicaID,
serialized_deployment_def: bytes,
serialized_init_args: bytes,
serialized_init_kwargs: bytes,
deployment_config_proto_bytes: bytes,
version: DeploymentVersion,
ingress: bool,
route_prefix: str,
):
deployment_config = DeploymentConfig.from_proto_bytes(
deployment_config_proto_bytes
)
deployment_def = cloudpickle.loads(serialized_deployment_def)
if isinstance(deployment_def, str):
deployment_def = _load_deployment_def_from_import_path(deployment_def)
self._replica_impl: ReplicaBase = create_replica_impl(
replica_id=replica_id,
deployment_def=deployment_def,
init_args=cloudpickle.loads(serialized_init_args),
init_kwargs=cloudpickle.loads(serialized_init_kwargs),
deployment_config=deployment_config,
version=version,
ingress=ingress,
route_prefix=route_prefix,
)
def push_proxy_handle(self, handle: ActorHandle):
# NOTE(edoakes): it's important to call a method on the proxy handle to
# initialize its state in the C++ core worker.
handle.pong.remote()
def get_num_ongoing_requests(self) -> int:
"""Fetch the number of ongoing requests at this replica (queue length).
This runs on a separate thread (using a Ray concurrency group) so it will
not be blocked by user code.
"""
return self._replica_impl.get_num_ongoing_requests()
async def is_allocated(self) -> str:
"""poke the replica to check whether it's alive.
When calling this method on an ActorHandle, it will complete as
soon as the actor has started running. We use this mechanism to
detect when a replica has been allocated a worker slot.
At this time, the replica can transition from PENDING_ALLOCATION
to PENDING_INITIALIZATION startup state.
Returns:
The PID, actor ID, node ID, node IP, and log filepath id of the replica.
"""
return (
os.getpid(),
ray.get_runtime_context().get_actor_id(),
ray.get_runtime_context().get_worker_id(),
ray.get_runtime_context().get_node_id(),
ray.util.get_node_ip_address(),
ray.util.get_node_instance_id(),
get_component_logger_file_path(),
)
def list_outbound_deployments(self) -> Optional[List[DeploymentID]]:
return self._replica_impl.list_outbound_deployments()
async def initialize_and_get_metadata(
self, deployment_config: DeploymentConfig = None, rank: ReplicaRank = None
) -> ReplicaMetadata:
"""Handles initializing the replica.
Returns: 5-tuple containing
1. DeploymentConfig of the replica
2. DeploymentVersion of the replica
3. Initialization duration in seconds
4. Port
5. FastAPI `docs_path`, if relevant (i.e. this is an ingress deployment integrated with FastAPI).
"""
# Unused `_after` argument is for scheduling: passing an ObjectRef
# allows delaying this call until after the `_after` call has returned.
await self._replica_impl.initialize(deployment_config, rank)
return self._replica_impl.get_metadata()
async def check_health(self):
await self._replica_impl.check_health()
async def record_routing_stats(self) -> Dict[str, Any]:
return await self._replica_impl.record_routing_stats()
async def reconfigure(
self, deployment_config, rank: ReplicaRank, route_prefix: Optional[str] = None
) -> ReplicaMetadata:
await self._replica_impl.reconfigure(deployment_config, rank, route_prefix)
return self._replica_impl.get_metadata()
def _preprocess_request_args(
self,
pickled_request_metadata: bytes,
request_args: Tuple[Any],
) -> Tuple[RequestMetadata, Tuple[Any]]:
request_metadata = pickle.loads(pickled_request_metadata)
if request_metadata.is_http_request or request_metadata.is_grpc_request:
request_args = (pickle.loads(request_args[0]),)
return request_metadata, request_args
async def handle_request(
self,
pickled_request_metadata: bytes,
*request_args,
**request_kwargs,
) -> Tuple[bytes, Any]:
"""Entrypoint for `stream=False` calls."""
request_metadata, request_args = self._preprocess_request_args(
pickled_request_metadata, request_args
)
result = await self._replica_impl.handle_request(
request_metadata, *request_args, **request_kwargs
)
if request_metadata.is_grpc_request:
result = (request_metadata.grpc_context, result.SerializeToString())
return result
async def handle_request_streaming(
self,
pickled_request_metadata: bytes,
*request_args,
**request_kwargs,
) -> AsyncGenerator[Any, None]:
"""Generator that is the entrypoint for all `stream=True` handle calls."""
request_metadata, request_args = self._preprocess_request_args(
pickled_request_metadata, request_args
)
async for result in self._replica_impl.handle_request_streaming(
request_metadata, *request_args, **request_kwargs
):
if request_metadata.is_grpc_request:
result = (request_metadata.grpc_context, result.SerializeToString())
yield result
async def handle_request_with_rejection(
self,
pickled_request_metadata: bytes,
*request_args,
**request_kwargs,
) -> AsyncGenerator[Any, None]:
"""Entrypoint for all requests with strict max_ongoing_requests enforcement.
The first response from this generator is always a system message indicating
if the request was accepted (the replica has capacity for the request) or
rejected (the replica is already at max_ongoing_requests).
For non-streaming requests, there will only be one more message, the unary
result of the user request handler.
For streaming requests, the subsequent messages will be the results of the
user request handler (which must be a generator).
"""
request_metadata, request_args = self._preprocess_request_args(
pickled_request_metadata, request_args
)
async for result in self._replica_impl.handle_request_with_rejection(
request_metadata, *request_args, **request_kwargs
):
if isinstance(result, ReplicaQueueLengthInfo):
yield pickle.dumps(result)
else:
if request_metadata.is_grpc_request:
result = (request_metadata.grpc_context, result.SerializeToString())
yield result
async def handle_request_from_java(
self,
proto_request_metadata: bytes,
*request_args,
**request_kwargs,
) -> Any:
from ray.serve.generated.serve_pb2 import (
RequestMetadata as RequestMetadataProto,
)
proto = RequestMetadataProto.FromString(proto_request_metadata)
request_metadata: RequestMetadata = RequestMetadata(
request_id=proto.request_id,
internal_request_id=proto.internal_request_id,
call_method=proto.call_method,
multiplexed_model_id=proto.multiplexed_model_id,
route=proto.route,
)
return await self._replica_impl.handle_request(
request_metadata, *request_args, **request_kwargs
)
async def perform_graceful_shutdown(self):
await self._replica_impl.perform_graceful_shutdown()
@dataclass
|
ReplicaActor
|
python
|
cython__cython
|
tests/run/test_patma.py
|
{
"start": 81967,
"end": 90262
}
|
class ____(unittest.TestCase):
def assert_syntax_error(self, code: str):
with self.assertRaises(SyntaxError):
compile(inspect.cleandoc(code), "<test>", "exec")
def test_alternative_patterns_bind_different_names_0(self):
self.assert_syntax_error("""
match ...:
case "a" | a:
pass
""")
def test_alternative_patterns_bind_different_names_1(self):
self.assert_syntax_error("""
match ...:
case [a, [b] | [c] | [d]]:
pass
""")
@disable # validation will be added when class patterns are added
def test_attribute_name_repeated_in_class_pattern(self):
self.assert_syntax_error("""
match ...:
case Class(a=_, a=_):
pass
""")
def test_imaginary_number_required_in_complex_literal_0(self):
self.assert_syntax_error("""
match ...:
case 0+0:
pass
""")
def test_imaginary_number_required_in_complex_literal_1(self):
self.assert_syntax_error("""
match ...:
case {0+0: _}:
pass
""")
def test_invalid_syntax_0(self):
self.assert_syntax_error("""
match ...:
case {**rest, "key": value}:
pass
""")
def test_invalid_syntax_1(self):
self.assert_syntax_error("""
match ...:
case {"first": first, **rest, "last": last}:
pass
""")
def test_invalid_syntax_2(self):
self.assert_syntax_error("""
match ...:
case {**_}:
pass
""")
def test_invalid_syntax_3(self):
self.assert_syntax_error("""
match ...:
case 42 as _:
pass
""")
def test_mapping_pattern_keys_may_only_match_literals_and_attribute_lookups(self):
self.assert_syntax_error("""
match ...:
case {f"": _}:
pass
""")
def test_multiple_assignments_to_name_in_pattern_0(self):
self.assert_syntax_error("""
match ...:
case a, a:
pass
""")
def test_multiple_assignments_to_name_in_pattern_1(self):
self.assert_syntax_error("""
match ...:
case {"k": a, "l": a}:
pass
""")
def test_multiple_assignments_to_name_in_pattern_2(self):
self.assert_syntax_error("""
match ...:
case MyClass(x, x):
pass
""")
def test_multiple_assignments_to_name_in_pattern_3(self):
self.assert_syntax_error("""
match ...:
case MyClass(x=x, y=x):
pass
""")
def test_multiple_assignments_to_name_in_pattern_4(self):
self.assert_syntax_error("""
match ...:
case MyClass(x, y=x):
pass
""")
def test_multiple_assignments_to_name_in_pattern_5(self):
self.assert_syntax_error("""
match ...:
case a as a:
pass
""")
@disable # will be implemented as part of sequence patterns
def test_multiple_starred_names_in_sequence_pattern_0(self):
self.assert_syntax_error("""
match ...:
case *a, b, *c, d, *e:
pass
""")
@disable # will be implemented as part of sequence patterns
def test_multiple_starred_names_in_sequence_pattern_1(self):
self.assert_syntax_error("""
match ...:
case a, *b, c, *d, e:
pass
""")
def test_name_capture_makes_remaining_patterns_unreachable_0(self):
self.assert_syntax_error("""
match ...:
case a | "a":
pass
""")
def test_name_capture_makes_remaining_patterns_unreachable_1(self):
self.assert_syntax_error("""
match 42:
case x:
pass
case y:
pass
""")
def test_name_capture_makes_remaining_patterns_unreachable_2(self):
self.assert_syntax_error("""
match ...:
case x | [_ as x] if x:
pass
""")
def test_name_capture_makes_remaining_patterns_unreachable_3(self):
self.assert_syntax_error("""
match ...:
case x:
pass
case [x] if x:
pass
""")
def test_name_capture_makes_remaining_patterns_unreachable_4(self):
self.assert_syntax_error("""
match ...:
case x:
pass
case _:
pass
""")
def test_patterns_may_only_match_literals_and_attribute_lookups_0(self):
self.assert_syntax_error("""
match ...:
case f"":
pass
""")
def test_patterns_may_only_match_literals_and_attribute_lookups_1(self):
self.assert_syntax_error("""
match ...:
case f"{x}":
pass
""")
def test_real_number_required_in_complex_literal_0(self):
self.assert_syntax_error("""
match ...:
case 0j+0:
pass
""")
def test_real_number_required_in_complex_literal_1(self):
self.assert_syntax_error("""
match ...:
case 0j+0j:
pass
""")
def test_real_number_required_in_complex_literal_2(self):
self.assert_syntax_error("""
match ...:
case {0j+0: _}:
pass
""")
def test_real_number_required_in_complex_literal_3(self):
self.assert_syntax_error("""
match ...:
case {0j+0j: _}:
pass
""")
def test_wildcard_makes_remaining_patterns_unreachable_0(self):
self.assert_syntax_error("""
match ...:
case _ | _:
pass
""")
def test_wildcard_makes_remaining_patterns_unreachable_1(self):
self.assert_syntax_error("""
match ...:
case (_ as x) | [x]:
pass
""")
def test_wildcard_makes_remaining_patterns_unreachable_2(self):
self.assert_syntax_error("""
match ...:
case _ | _ if condition():
pass
""")
def test_wildcard_makes_remaining_patterns_unreachable_3(self):
self.assert_syntax_error("""
match ...:
case _:
pass
case None:
pass
""")
def test_wildcard_makes_remaining_patterns_unreachable_4(self):
self.assert_syntax_error("""
match ...:
case (None | _) | _:
pass
""")
def test_wildcard_makes_remaining_patterns_unreachable_5(self):
self.assert_syntax_error("""
match ...:
case _ | (True | False):
pass
""")
@disable # validation will be added when class patterns are added
def test_mapping_pattern_duplicate_key(self):
self.assert_syntax_error("""
match ...:
case {"a": _, "a": _}:
pass
""")
@disable # validation will be added when class patterns are added
def test_mapping_pattern_duplicate_key_edge_case0(self):
self.assert_syntax_error("""
match ...:
case {0: _, False: _}:
pass
""")
@disable # validation will be added when class patterns are added
def test_mapping_pattern_duplicate_key_edge_case1(self):
self.assert_syntax_error("""
match ...:
case {0: _, 0.0: _}:
pass
""")
@disable # validation will be added when class patterns are added
def test_mapping_pattern_duplicate_key_edge_case2(self):
self.assert_syntax_error("""
match ...:
case {0: _, -0: _}:
pass
""")
@disable # validation will be added when class patterns are added
def test_mapping_pattern_duplicate_key_edge_case3(self):
self.assert_syntax_error("""
match ...:
case {0: _, 0j: _}:
pass
""")
|
TestSyntaxErrors
|
python
|
ipython__ipython
|
IPython/utils/capture.py
|
{
"start": 1716,
"end": 3447
}
|
class ____:
"""Simple object for containing captured stdout/err and rich display StringIO objects
Each instance `c` has three attributes:
- ``c.stdout`` : standard output as a string
- ``c.stderr`` : standard error as a string
- ``c.outputs``: a list of rich display outputs
Additionally, there's a ``c.show()`` method which will print all of the
above in the same order, and can be invoked simply via ``c()``.
"""
def __init__(self, stdout, stderr, outputs=None):
self._stdout = stdout
self._stderr = stderr
if outputs is None:
outputs = []
self._outputs = outputs
def __str__(self):
return self.stdout
@property
def stdout(self):
"Captured standard output"
if not self._stdout:
return ''
return self._stdout.getvalue()
@property
def stderr(self):
"Captured standard error"
if not self._stderr:
return ''
return self._stderr.getvalue()
@property
def outputs(self):
"""A list of the captured rich display outputs, if any.
If you have a CapturedIO object ``c``, these can be displayed in IPython
using::
from IPython.display import display
for o in c.outputs:
display(o)
"""
return [ RichOutput(**kargs) for kargs in self._outputs ]
def show(self):
"""write my output to sys.stdout/err as appropriate"""
sys.stdout.write(self.stdout)
sys.stderr.write(self.stderr)
sys.stdout.flush()
sys.stderr.flush()
for kargs in self._outputs:
RichOutput(**kargs).display()
__call__ = show
|
CapturedIO
|
python
|
kamyu104__LeetCode-Solutions
|
Python/design-search-autocomplete-system.py
|
{
"start": 860,
"end": 2083
}
|
class ____(object):
def __init__(self, sentences, times):
"""
:type sentences: List[str]
:type times: List[int]
"""
self.__trie = TrieNode()
self.__cur_node = self.__trie
self.__search = []
self.__sentence_to_count = collections.defaultdict(int)
for sentence, count in zip(sentences, times):
self.__sentence_to_count[sentence] = count
self.__trie.insert(sentence, count)
def input(self, c):
"""
:type c: str
:rtype: List[str]
"""
result = []
if c == '#':
self.__sentence_to_count["".join(self.__search)] += 1
self.__trie.insert("".join(self.__search), self.__sentence_to_count["".join(self.__search)])
self.__cur_node = self.__trie
self.__search = []
else:
self.__search.append(c)
if self.__cur_node:
if c not in self.__cur_node.leaves:
self.__cur_node = None
return []
self.__cur_node = self.__cur_node.leaves[c]
result = [p[1] for p in self.__cur_node.infos]
return result
|
AutocompleteSystem
|
python
|
psf__black
|
tests/data/cases/preview_long_dict_values.py
|
{
"start": 3503,
"end": 7642
}
|
class ____:
def func():
random_service.status.active_states.inactive = (
make_new_top_level_state_from_dict(
{
"topLevelBase": {
"secondaryBase": {
"timestamp": 1234,
"latitude": 1,
"longitude": 2,
"actionTimestamp": Timestamp(
seconds=1530584000, nanos=0
).ToJsonString(),
}
},
}
)
)
# output
x = {
"xx_xxxxx_xxxxxxxxxx_xxxxxxxxx_xx": (
"xx:xxxxxxxxxxxxxxxxx_xxxxx_xxxxxxx_xxxxxxxxxxx{xx}xxx_xxxxx_xxxxxxxxx_xxxxxxxxxxxx_xxxx"
)
}
x = {
"xx_xxxxx_xxxxxxxxxx_xxxxxxxxx_xx": (
"xx:xxxxxxxxxxxxxxxxx_xxxxx_xxxxxxx_xxxxxxxxxxx{xx}xxx_xxxxx_xxxxxxxxx_xxxxxxxxxxxx_xxxx"
),
}
x = {
"foo": bar,
"foo": bar,
"foo": (
xx_xxxxxxxxxxxxxxxxx_xxxxx_xxxxxxx_xxxxxxxxxxxxxx_xxxxx_xxxxxxxxx_xxxxxxxxxxxx_xxxx
),
}
x = {
"xx_xxxxx_xxxxxxxxxx_xxxxxxxxx_xx": "xx:xxxxxxxxxxxxxxxxx_xxxxx_xxxxxxx_xxxxxxxxxx"
}
my_dict = {
"something_something": (
r"Lorem ipsum dolor sit amet, an sed convenire eloquentiam \t"
r"signiferumque, duo ea vocibus consetetur scriptorem. Facer \t"
r"signiferumque, duo ea vocibus consetetur scriptorem. Facer \t"
),
}
# Function calls as keys
tasks = {
get_key_name(
foo,
bar,
baz,
): src,
loop.run_in_executor(): src,
loop.run_in_executor(xx_xxxxxxxxxxxxxxxxx_xxxxx_xxxxxxx_xxxxxxxxxxxxxx): src,
loop.run_in_executor(
xx_xxxxxxxxxxxxxxxxx_xxxxx_xxxxxxx_xxxxxxxxxxxxxx_xxxxx_xxxxx
): src,
loop.run_in_executor(): (
xx_xxxxxxxxxxxxxxxxx_xxxxx_xxxxxxx_xxxxxxxxxxxxxx_xxxxx_xxxxxxxxx_xxxxxxxxxxxx_xxxx
),
}
# Dictionary comprehensions
tasks = {
key_name: (
xx_xxxxxxxxxxxxxxxxx_xxxxx_xxxxxxx_xxxxxxxxxxxxxx_xxxxx_xxxxxxxxx_xxxxxxxxxxxx_xxxx
)
for src in sources
}
tasks = {key_name: foobar for src in sources}
tasks = {
get_key_name(
src,
): "foo"
for src in sources
}
tasks = {
get_key_name(
foo,
bar,
baz,
): src
for src in sources
}
tasks = {
get_key_name(): (
xx_xxxxxxxxxxxxxxxxx_xxxxx_xxxxxxx_xxxxxxxxxxxxxx_xxxxx_xxxxxxxxx_xxxxxxxxxxxx_xxxx
)
for src in sources
}
tasks = {get_key_name(): foobar for src in sources}
# Delimiters inside the value
def foo():
def bar():
x = {
common.models.DateTimeField: (
datetime(2020, 1, 31, tzinfo=utc) + timedelta(days=i)
),
}
x = {
common.models.DateTimeField: (
datetime(2020, 1, 31, tzinfo=utc) + timedelta(days=i)
),
}
x = {
"foobar": 123 + 456,
}
x = {
"foobar": (123) + 456,
}
my_dict = {
"a key in my dict": (
a_very_long_variable * and_a_very_long_function_call() / 100000.0
)
}
my_dict = {
"a key in my dict": (
a_very_long_variable
* and_a_very_long_function_call()
* and_another_long_func()
/ 100000.0
)
}
my_dict = {
"a key in my dict": (
MyClass.some_attribute.first_call()
.second_call()
.third_call(some_args="some value")
)
}
{
"xxxxxx": xxxxxxxxxxxxxxxxxxx.xxxxxxxxxxxxxx(
xxxxxxxxxxxxxx={
"x": xxxxxxxxxxxxxxxxxxxxxxxxxx.xxxxxxxxxxxxxxxxxxxxxxxxxxxxx(
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx=(
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx(
xxxxxxxxxxxxx=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx(
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx={
"x": x.xx,
"x": x.x,
}
)
)
)
)
}
),
}
|
Random
|
python
|
scipy__scipy
|
scipy/stats/_continuous_distns.py
|
{
"start": 223105,
"end": 232261
}
|
class ____(rv_continuous):
r"""Kappa 4 parameter distribution.
%(before_notes)s
Notes
-----
The probability density function for kappa4 is:
.. math::
f(x, h, k) = (1 - k x)^{1/k - 1} (1 - h (1 - k x)^{1/k})^{1/h-1}
if :math:`h` and :math:`k` are not equal to 0.
If :math:`h` or :math:`k` are zero then the pdf can be simplified:
:math:`h = 0` and :math:`k \neq 0`::
kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
exp(-(1.0 - k*x)**(1.0/k))
:math:`h \neq 0` and :math:`k = 0`::
kappa4.pdf(x, h, k) = exp(-x)*(1.0 - h*exp(-x))**(1.0/h - 1.0)
:math:`h = 0` and :math:`k = 0`::
kappa4.pdf(x, h, k) = exp(-x)*exp(-exp(-x))
kappa4 takes :math:`h` and :math:`k` as shape parameters.
The kappa4 distribution returns other distributions when certain
:math:`h` and :math:`k` values are used.
+------+-------------+----------------+------------------+
| h | k=0.0 | k=1.0 | -inf<=k<=inf |
+======+=============+================+==================+
| -1.0 | Logistic | | Generalized |
| | | | Logistic(1) |
| | | | |
| | logistic(x) | | |
+------+-------------+----------------+------------------+
| 0.0 | Gumbel | Reverse | Generalized |
| | | Exponential(2) | Extreme Value |
| | | | |
| | gumbel_r(x) | | genextreme(x, k) |
+------+-------------+----------------+------------------+
| 1.0 | Exponential | Uniform | Generalized |
| | | | Pareto |
| | | | |
| | expon(x) | uniform(x) | genpareto(x, -k) |
+------+-------------+----------------+------------------+
(1) There are at least five generalized logistic distributions.
Four are described here:
https://en.wikipedia.org/wiki/Generalized_logistic_distribution
The "fifth" one is the one kappa4 should match which currently
isn't implemented in scipy:
https://en.wikipedia.org/wiki/Talk:Generalized_logistic_distribution
https://www.mathwave.com/help/easyfit/html/analyses/distributions/gen_logistic.html
(2) This distribution is currently not in scipy.
References
----------
J.C. Finney, "Optimization of a Skewed Logistic Distribution With Respect
to the Kolmogorov-Smirnov Test", A Dissertation Submitted to the Graduate
Faculty of the Louisiana State University and Agricultural and Mechanical
College, (August, 2004),
https://digitalcommons.lsu.edu/gradschool_dissertations/3672
J.R.M. Hosking, "The four-parameter kappa distribution". IBM J. Res.
Develop. 38 (3), 25 1-258 (1994).
B. Kumphon, A. Kaew-Man, P. Seenoi, "A Rainfall Distribution for the Lampao
Site in the Chi River Basin, Thailand", Journal of Water Resource and
Protection, vol. 4, 866-869, (2012).
:doi:`10.4236/jwarp.2012.410101`
C. Winchester, "On Estimation of the Four-Parameter Kappa Distribution", A
Thesis Submitted to Dalhousie University, Halifax, Nova Scotia, (March
2000).
http://www.nlc-bnc.ca/obj/s4/f2/dsk2/ftp01/MQ57336.pdf
%(after_notes)s
%(example)s
"""
def _argcheck(self, h, k):
shape = np.broadcast_arrays(h, k)[0].shape
return np.full(shape, fill_value=True)
def _shape_info(self):
ih = _ShapeInfo("h", False, (-np.inf, np.inf), (False, False))
ik = _ShapeInfo("k", False, (-np.inf, np.inf), (False, False))
return [ih, ik]
def _get_support(self, h, k):
condlist = [np.logical_and(h > 0, k > 0),
np.logical_and(h > 0, k == 0),
np.logical_and(h > 0, k < 0),
np.logical_and(h <= 0, k > 0),
np.logical_and(h <= 0, k == 0),
np.logical_and(h <= 0, k < 0)]
def f0(h, k):
return (1.0 - np.float_power(h, -k))/k
def f1(h, k):
return np.log(h)
def f3(h, k):
a = np.empty(np.shape(h))
a[:] = -np.inf
return a
def f5(h, k):
return 1.0/k
_a = _lazyselect(condlist,
[f0, f1, f0, f3, f3, f5],
[h, k],
default=np.nan)
def f0(h, k):
return 1.0/k
def f1(h, k):
a = np.empty(np.shape(h))
a[:] = np.inf
return a
_b = _lazyselect(condlist,
[f0, f1, f1, f0, f1, f1],
[h, k],
default=np.nan)
return _a, _b
def _pdf(self, x, h, k):
# kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
# (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1)
return np.exp(self._logpdf(x, h, k))
def _logpdf(self, x, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(x, h, k):
'''pdf = (1.0 - k*x)**(1.0/k - 1.0)*(
1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1.0)
logpdf = ...
'''
return (sc.xlog1py(1.0/k - 1.0, -k*x) +
sc.xlog1py(1.0/h - 1.0, -h*(1.0 - k*x)**(1.0/k)))
def f1(x, h, k):
'''pdf = (1.0 - k*x)**(1.0/k - 1.0)*np.exp(-(
1.0 - k*x)**(1.0/k))
logpdf = ...
'''
return sc.xlog1py(1.0/k - 1.0, -k*x) - (1.0 - k*x)**(1.0/k)
def f2(x, h, k):
'''pdf = np.exp(-x)*(1.0 - h*np.exp(-x))**(1.0/h - 1.0)
logpdf = ...
'''
return -x + sc.xlog1py(1.0/h - 1.0, -h*np.exp(-x))
def f3(x, h, k):
'''pdf = np.exp(-x-np.exp(-x))
logpdf = ...
'''
return -x - np.exp(-x)
return _lazyselect(condlist,
[f0, f1, f2, f3],
[x, h, k],
default=np.nan)
def _cdf(self, x, h, k):
return np.exp(self._logcdf(x, h, k))
def _logcdf(self, x, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(x, h, k):
'''cdf = (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h)
logcdf = ...
'''
return (1.0/h)*sc.log1p(-h*(1.0 - k*x)**(1.0/k))
def f1(x, h, k):
'''cdf = np.exp(-(1.0 - k*x)**(1.0/k))
logcdf = ...
'''
return -(1.0 - k*x)**(1.0/k)
def f2(x, h, k):
'''cdf = (1.0 - h*np.exp(-x))**(1.0/h)
logcdf = ...
'''
return (1.0/h)*sc.log1p(-h*np.exp(-x))
def f3(x, h, k):
'''cdf = np.exp(-np.exp(-x))
logcdf = ...
'''
return -np.exp(-x)
return _lazyselect(condlist,
[f0, f1, f2, f3],
[x, h, k],
default=np.nan)
def _ppf(self, q, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(q, h, k):
return 1.0/k*(1.0 - ((1.0 - (q**h))/h)**k)
def f1(q, h, k):
return 1.0/k*(1.0 - (-np.log(q))**k)
def f2(q, h, k):
'''ppf = -np.log((1.0 - (q**h))/h)
'''
return -sc.log1p(-(q**h)) + np.log(h)
def f3(q, h, k):
return -np.log(-np.log(q))
return _lazyselect(condlist,
[f0, f1, f2, f3],
[q, h, k],
default=np.nan)
def _get_stats_info(self, h, k):
condlist = [
np.logical_and(h < 0, k >= 0),
k < 0,
]
def f0(h, k):
return (-1.0/h*k).astype(int)
def f1(h, k):
return (-1.0/k).astype(int)
return _lazyselect(condlist, [f0, f1], [h, k], default=5)
def _stats(self, h, k):
maxr = self._get_stats_info(h, k)
outputs = [None if np.any(r < maxr) else np.nan for r in range(1, 5)]
return outputs[:]
def _mom1_sc(self, m, *args):
maxr = self._get_stats_info(args[0], args[1])
if m >= maxr:
return np.nan
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
kappa4 = kappa4_gen(name='kappa4')
|
kappa4_gen
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-aws/prefect_aws/credentials.py
|
{
"start": 5785,
"end": 9553
}
|
class ____(CredentialsBlock):
"""
Block used to manage authentication with MinIO. Refer to the
[MinIO docs](https://docs.min.io/docs/minio-server-configuration-guide.html)
for more info about the possible credential configurations.
Attributes:
minio_root_user: Admin or root user.
minio_root_password: Admin or root password.
region_name: Location of server, e.g. "us-east-1".
Example:
Load stored MinIO credentials:
```python
from prefect_aws import MinIOCredentials
minio_credentials_block = MinIOCredentials.load("BLOCK_NAME")
```
""" # noqa E501
model_config = ConfigDict(arbitrary_types_allowed=True)
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/676cb17bcbdff601f97e0a02ff8bcb480e91ff40-250x250.png" # noqa
_block_type_name = "MinIO Credentials"
_description = (
"Block used to manage authentication with MinIO. Refer to the MinIO "
"docs: https://docs.min.io/docs/minio-server-configuration-guide.html "
"for more info about the possible credential configurations."
)
_documentation_url = "https://docs.prefect.io/integrations/prefect-aws" # noqa
minio_root_user: str = Field(default=..., description="Admin or root user.")
minio_root_password: SecretStr = Field(
default=..., description="Admin or root password."
)
region_name: Optional[str] = Field(
default=None,
description="The AWS Region where you want to create new connections.",
)
aws_client_parameters: AwsClientParameters = Field(
default_factory=AwsClientParameters,
description="Extra parameters to initialize the Client.",
)
def __hash__(self):
return hash(
(
hash(self.minio_root_user),
hash(self.minio_root_password),
hash(self.region_name),
hash(self.aws_client_parameters),
)
)
def get_boto3_session(self) -> boto3.Session:
"""
Returns an authenticated boto3 session that can be used to create clients
and perform object operations on MinIO server.
Example:
Create an S3 client from an authorized boto3 session
```python
minio_credentials = MinIOCredentials(
minio_root_user = "minio_root_user",
minio_root_password = "minio_root_password"
)
s3_client = minio_credentials.get_boto3_session().client(
service_name="s3",
endpoint_url="http://localhost:9000"
)
```
"""
minio_root_password = (
self.minio_root_password.get_secret_value()
if self.minio_root_password
else None
)
return boto3.Session(
aws_access_key_id=self.minio_root_user,
aws_secret_access_key=minio_root_password,
region_name=self.region_name,
)
def get_client(self, client_type: Union[str, ClientType]):
"""
Helper method to dynamically get a client type.
Args:
client_type: The client's service name.
Returns:
An authenticated client.
Raises:
ValueError: if the client is not supported.
"""
if isinstance(client_type, ClientType):
client_type = client_type.value
return _get_client_cached(ctx=self, client_type=client_type)
def get_s3_client(self) -> "S3Client":
"""
Gets an authenticated S3 client.
Returns:
An authenticated S3 client.
"""
return self.get_client(client_type=ClientType.S3)
|
MinIOCredentials
|
python
|
PyCQA__pylint
|
tests/functional/ext/docstyle/docstyle_first_line_empty.py
|
{
"start": 266,
"end": 486
}
|
class ____: # [docstring-first-line-empty]
"""
Test Docstring First Line Empty
"""
def method1(self): # [docstring-first-line-empty]
'''
Test Triple Single Quotes docstring
'''
|
FFFF
|
python
|
qdrant__qdrant-client
|
qdrant_client/http/api/aliases_api.py
|
{
"start": 3043,
"end": 3934
}
|
class ____(_AliasesApi):
async def get_collection_aliases(
self,
collection_name: str,
) -> m.InlineResponse2008:
"""
Get list of all aliases for a collection
"""
return await self._build_for_get_collection_aliases(
collection_name=collection_name,
)
async def get_collections_aliases(
self,
) -> m.InlineResponse2008:
"""
Get list of all existing collections aliases
"""
return await self._build_for_get_collections_aliases()
async def update_aliases(
self,
timeout: int = None,
change_aliases_operation: m.ChangeAliasesOperation = None,
) -> m.InlineResponse200:
return await self._build_for_update_aliases(
timeout=timeout,
change_aliases_operation=change_aliases_operation,
)
|
AsyncAliasesApi
|
python
|
openai__openai-python
|
src/openai/types/eval_list_params.py
|
{
"start": 204,
"end": 754
}
|
class ____(TypedDict, total=False):
after: str
"""Identifier for the last eval from the previous pagination request."""
limit: int
"""Number of evals to retrieve."""
order: Literal["asc", "desc"]
"""Sort order for evals by timestamp.
Use `asc` for ascending order or `desc` for descending order.
"""
order_by: Literal["created_at", "updated_at"]
"""Evals can be ordered by creation time or last updated time.
Use `created_at` for creation time or `updated_at` for last updated time.
"""
|
EvalListParams
|
python
|
kamyu104__LeetCode-Solutions
|
Python/create-maximum-number.py
|
{
"start": 76,
"end": 1539
}
|
class ____(object):
def maxNumber(self, nums1, nums2, k):
"""
:type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[int]
"""
def get_max_digits(nums, start, end, max_digits):
max_digits[end] = max_digit(nums, end)
for i in reversed(xrange(start, end)):
max_digits[i] = delete_digit(max_digits[i + 1])
def max_digit(nums, k):
drop = len(nums) - k
res = []
for num in nums:
while drop and res and res[-1] < num:
res.pop()
drop -= 1
res.append(num)
return res[:k]
def delete_digit(nums):
res = list(nums)
for i in xrange(len(res)):
if i == len(res) - 1 or res[i] < res[i + 1]:
res = res[:i] + res[i+1:]
break
return res
def merge(a, b):
return [max(a, b).pop(0) for _ in xrange(len(a)+len(b))]
m, n = len(nums1), len(nums2)
max_digits1, max_digits2 = [[] for _ in xrange(k + 1)], [[] for _ in xrange(k + 1)]
get_max_digits(nums1, max(0, k - n), min(k, m), max_digits1)
get_max_digits(nums2, max(0, k - m), min(k, n), max_digits2)
return max(merge(max_digits1[i], max_digits2[k-i]) \
for i in xrange(max(0, k - n), min(k, m) + 1))
|
Solution
|
python
|
kamyu104__LeetCode-Solutions
|
Python/online-election.py
|
{
"start": 110,
"end": 760
}
|
class ____(object):
def __init__(self, persons, times):
"""
:type persons: List[int]
:type times: List[int]
"""
lead = -1
self.__lookup, count = [], collections.defaultdict(int)
for t, p in itertools.izip(times, persons):
count[p] += 1
if count[p] >= count[lead]:
lead = p
self.__lookup.append((t, lead))
def q(self, t):
"""
:type t: int
:rtype: int
"""
return self.__lookup[bisect.bisect(self.__lookup,
(t, float("inf")))-1][1]
|
TopVotedCandidate
|
python
|
keras-team__keras
|
keras/src/ops/numpy.py
|
{
"start": 129342,
"end": 132743
}
|
class ____(Operation):
def __init__(
self,
num=50,
endpoint=True,
retstep=False,
dtype=None,
axis=0,
*,
name=None,
):
super().__init__(name=name)
self.num = num
self.endpoint = endpoint
self.retstep = retstep
self.dtype = dtype
self.axis = axis
def call(self, start, stop):
return backend.numpy.linspace(
start,
stop,
num=self.num,
endpoint=self.endpoint,
retstep=self.retstep,
dtype=self.dtype,
axis=self.axis,
)
def compute_output_spec(self, start, stop):
start_shape = getattr(start, "shape", [])
stop_shape = getattr(stop, "shape", [])
output_shape = broadcast_shapes(start_shape, stop_shape)
if self.axis == -1:
output_shape = output_shape + [self.num]
elif self.axis >= 0:
output_shape = (
output_shape[: self.axis]
+ [self.num]
+ output_shape[self.axis :]
)
else:
output_shape = (
output_shape[: self.axis + 1]
+ [self.num]
+ output_shape[self.axis + 1 :]
)
dtype = (
self.dtype
if self.dtype is not None
else backend.standardize_dtype(getattr(start, "dtype", type(start)))
)
dtype = backend.result_type(dtype, float)
if self.retstep:
return (KerasTensor(output_shape, dtype=dtype), None)
return KerasTensor(output_shape, dtype=dtype)
@keras_export(["keras.ops.linspace", "keras.ops.numpy.linspace"])
def linspace(
start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0
):
"""Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the interval
`[start, stop]`.
The endpoint of the interval can optionally be excluded.
Args:
start: The starting value of the sequence.
stop: The end value of the sequence, unless `endpoint` is set to
`False`. In that case, the sequence consists of all but the last
of `num + 1` evenly spaced samples, so that `stop` is excluded.
Note that the step size changes when `endpoint` is `False`.
num: Number of samples to generate. Defaults to `50`. Must be
non-negative.
endpoint: If `True`, `stop` is the last sample. Otherwise, it is
not included. Defaults to `True`.
retstep: If `True`, return `(samples, step)`, where `step` is the
spacing between samples.
dtype: The type of the output tensor.
axis: The axis in the result to store the samples. Relevant only if
start or stop are array-like. Defaults to `0`.
Note:
Torch backend does not support `axis` argument.
Returns:
A tensor of evenly spaced numbers.
If `retstep` is `True`, returns `(samples, step)`
"""
if any_symbolic_tensors((start, stop)):
return Linspace(num, endpoint, retstep, dtype, axis)(start, stop)
return backend.numpy.linspace(
start,
stop,
num=num,
endpoint=endpoint,
retstep=retstep,
dtype=dtype,
axis=axis,
)
|
Linspace
|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/migrations/0112_alter_project_help_text.py
|
{
"start": 184,
"end": 1574
}
|
class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0111_add_multiple_versions_without_translations"),
]
operations = [
migrations.AlterField(
model_name="historicalproject",
name="external_builds_privacy_level",
field=models.CharField(
choices=[("public", "Public"), ("private", "Private")],
default=readthedocs.projects.models.default_privacy_level,
help_text="Should builds from pull requests be public? <strong>If your repository is public, don't set this to private</strong>.",
max_length=20,
null=True,
verbose_name="Privacy level of Pull Requests",
),
),
migrations.AlterField(
model_name="project",
name="external_builds_privacy_level",
field=models.CharField(
choices=[("public", "Public"), ("private", "Private")],
default=readthedocs.projects.models.default_privacy_level,
help_text="Should builds from pull requests be public? <strong>If your repository is public, don't set this to private</strong>.",
max_length=20,
null=True,
verbose_name="Privacy level of Pull Requests",
),
),
]
|
Migration
|
python
|
doocs__leetcode
|
solution/2800-2899/2857.Count Pairs of Points With Distance k/Solution.py
|
{
"start": 0,
"end": 350
}
|
class ____:
def countPairs(self, coordinates: List[List[int]], k: int) -> int:
cnt = Counter()
ans = 0
for x2, y2 in coordinates:
for a in range(k + 1):
b = k - a
x1, y1 = a ^ x2, b ^ y2
ans += cnt[(x1, y1)]
cnt[(x2, y2)] += 1
return ans
|
Solution
|
python
|
catalyst-team__catalyst
|
tests/catalyst/callbacks/test_profiler.py
|
{
"start": 482,
"end": 1105
}
|
class ____(Dataset):
"""Dummy dataset."""
features_dim: int = 4
out_dim: int = 2
def __init__(self, num_records: int):
self.num_records = num_records
def __len__(self):
"""
Returns:
dataset's length.
"""
return self.num_records
def __getitem__(self, idx: int):
"""
Args:
idx: index of sample
Returns:
dummy features and targets vector
"""
x = torch.ones(self.features_dim, dtype=torch.float)
y = torch.ones(self.out_dim, dtype=torch.float)
return x, y
|
DummyDataset
|
python
|
networkx__networkx
|
networkx/algorithms/tests/test_euler.py
|
{
"start": 6429,
"end": 9603
}
|
class ____:
def test_eulerian_path(self):
x = [(4, 0), (0, 1), (1, 2), (2, 0)]
for e1, e2 in zip(x, nx.eulerian_path(nx.DiGraph(x))):
assert e1 == e2
def test_eulerian_path_straight_link(self):
G = nx.DiGraph()
result = [(1, 2), (2, 3), (3, 4), (4, 5)]
G.add_edges_from(result)
assert result == list(nx.eulerian_path(G))
assert result == list(nx.eulerian_path(G, source=1))
with pytest.raises(nx.NetworkXError):
list(nx.eulerian_path(G, source=3))
with pytest.raises(nx.NetworkXError):
list(nx.eulerian_path(G, source=4))
with pytest.raises(nx.NetworkXError):
list(nx.eulerian_path(G, source=5))
def test_eulerian_path_multigraph(self):
G = nx.MultiDiGraph()
result = [(2, 1), (1, 2), (2, 1), (1, 2), (2, 3), (3, 4), (4, 3)]
G.add_edges_from(result)
assert result == list(nx.eulerian_path(G))
assert result == list(nx.eulerian_path(G, source=2))
with pytest.raises(nx.NetworkXError):
list(nx.eulerian_path(G, source=3))
with pytest.raises(nx.NetworkXError):
list(nx.eulerian_path(G, source=4))
def test_eulerian_path_eulerian_circuit(self):
G = nx.DiGraph()
result = [(1, 2), (2, 3), (3, 4), (4, 1)]
result2 = [(2, 3), (3, 4), (4, 1), (1, 2)]
result3 = [(3, 4), (4, 1), (1, 2), (2, 3)]
G.add_edges_from(result)
assert result == list(nx.eulerian_path(G))
assert result == list(nx.eulerian_path(G, source=1))
assert result2 == list(nx.eulerian_path(G, source=2))
assert result3 == list(nx.eulerian_path(G, source=3))
def test_eulerian_path_undirected(self):
G = nx.Graph()
result = [(1, 2), (2, 3), (3, 4), (4, 5)]
result2 = [(5, 4), (4, 3), (3, 2), (2, 1)]
G.add_edges_from(result)
assert list(nx.eulerian_path(G)) in (result, result2)
assert result == list(nx.eulerian_path(G, source=1))
assert result2 == list(nx.eulerian_path(G, source=5))
with pytest.raises(nx.NetworkXError):
list(nx.eulerian_path(G, source=3))
with pytest.raises(nx.NetworkXError):
list(nx.eulerian_path(G, source=2))
def test_eulerian_path_multigraph_undirected(self):
G = nx.MultiGraph()
result = [(2, 1), (1, 2), (2, 1), (1, 2), (2, 3), (3, 4)]
G.add_edges_from(result)
assert result == list(nx.eulerian_path(G))
assert result == list(nx.eulerian_path(G, source=2))
with pytest.raises(nx.NetworkXError):
list(nx.eulerian_path(G, source=3))
with pytest.raises(nx.NetworkXError):
list(nx.eulerian_path(G, source=1))
@pytest.mark.parametrize(
("graph_type", "result"),
(
(nx.MultiGraph, [(0, 1, 0), (1, 0, 1)]),
(nx.MultiDiGraph, [(0, 1, 0), (1, 0, 0)]),
),
)
def test_eulerian_with_keys(self, graph_type, result):
G = graph_type([(0, 1), (1, 0)])
answer = nx.eulerian_path(G, keys=True)
assert list(answer) == result
|
TestEulerianPath
|
python
|
mlflow__mlflow
|
mlflow/evaluation/assessment.py
|
{
"start": 7333,
"end": 13374
}
|
class ____(_MlflowObject):
"""
Assessment data associated with an evaluation result.
Assessment is an enriched output from the evaluation that provides more context,
such as the rationale, source, and metadata for the evaluation result.
Example:
.. code-block:: python
from mlflow.evaluation import Assessment
assessment = Assessment(
name="answer_correctness",
value=0.5,
rationale="The answer is partially correct.",
)
"""
def __init__(
self,
name: str,
source: AssessmentSource | None = None,
value: bool | float | str | None = None,
rationale: str | None = None,
metadata: dict[str, Any] | None = None,
error_code: str | None = None,
error_message: str | None = None,
):
"""Construct a new Assessment instance.
Args:
name: The name of the piece of assessment.
source: The source of the assessment (AssessmentSource instance).
value: The value of the assessment. This can be a boolean, numeric, or string value.
rationale: The rationale / justification for the value.
metadata: Additional metadata for the assessment, e.g. the index of the chunk in the
retrieved documents that the assessment applies to.
error_code: An error code representing any issues encountered during the assessment.
error_message: A descriptive error message representing any issues encountered during
the assessment.
"""
if (value is None) == (error_code is None):
raise MlflowException(
"Exactly one of value or error_code must be specified for an assessment.",
INVALID_PARAMETER_VALUE,
)
if value is not None and error_message is not None:
raise MlflowException(
"error_message cannot be specified when value is specified.",
INVALID_PARAMETER_VALUE,
)
self._name = name
self._source = source or AssessmentSource(
source_type=AssessmentSourceType.SOURCE_TYPE_UNSPECIFIED,
source_id="unknown",
)
self._value = value
self._rationale = rationale
self._metadata = metadata or {}
self._error_code = error_code
self._error_message = error_message
self._boolean_value = None
self._numeric_value = None
self._string_value = None
if isinstance(value, bool):
self._boolean_value = value
elif isinstance(value, numbers.Number):
self._numeric_value = float(value)
elif value is not None:
self._string_value = str(value)
@property
def name(self) -> str:
"""The name of the assessment."""
return self._name
@property
def value(self) -> bool | float | str:
"""The assessment value."""
return self._value
@property
def rationale(self) -> str | None:
"""The rationale / justification for the assessment."""
return self._rationale
@property
def source(self) -> AssessmentSource:
"""The source of the assessment."""
return self._source
@property
def metadata(self) -> dict[str, Any]:
"""The metadata associated with the assessment."""
return self._metadata
@property
def error_code(self) -> str | None:
"""The error code."""
return self._error_code
@property
def error_message(self) -> str | None:
"""The error message."""
return self._error_message
def __eq__(self, __o):
if isinstance(__o, self.__class__):
return self.to_dictionary() == __o.to_dictionary()
return False
def to_dictionary(self) -> dict[str, Any]:
return {
"name": self.name,
"source": self.source.to_dictionary() if self.source is not None else None,
"value": self.value,
"rationale": self.rationale,
"metadata": self.metadata,
"error_code": self.error_code,
"error_message": self.error_message,
}
@classmethod
def from_dictionary(cls, assessment_dict: dict[str, Any]) -> "Assessment":
"""
Create an Assessment object from a dictionary.
Args:
assessment_dict (dict): Dictionary containing assessment information.
Returns:
Assessment: The Assessment object created from the dictionary.
"""
return cls(
name=assessment_dict["name"],
source=AssessmentSource.from_dictionary(assessment_dict["source"]),
value=assessment_dict.get("value"),
rationale=assessment_dict.get("rationale"),
metadata=assessment_dict.get("metadata"),
error_code=assessment_dict.get("error_code"),
error_message=assessment_dict.get("error_message"),
)
def _to_entity(self, evaluation_id: str) -> AssessmentEntity:
# We require that the source be specified for an assessment before sending it to the backend
if self._source is None:
raise MlflowException(
message=(
f"Assessment source must be specified."
f"Got empty source for assessment with name {self._name}"
),
error_code=INVALID_PARAMETER_VALUE,
)
return AssessmentEntity(
evaluation_id=evaluation_id,
name=self._name,
source=self._source,
timestamp=int(time.time() * 1000),
boolean_value=self._boolean_value,
numeric_value=self._numeric_value,
string_value=self._string_value,
rationale=self._rationale,
metadata=self._metadata,
error_code=self._error_code,
error_message=self._error_message,
)
|
Assessment
|
python
|
ray-project__ray
|
rllib/env/multi_agent_env.py
|
{
"start": 26795,
"end": 30904
}
|
class ____:
def __init__(self, env: MultiAgentEnv, return_error_as_obs: bool = False):
assert isinstance(env, MultiAgentEnv)
self.env = env
self.return_error_as_obs = return_error_as_obs
self.initialized = False
self.last_obs = {}
self.last_rewards = {}
self.last_terminateds = {"__all__": False}
self.last_truncateds = {"__all__": False}
self.last_infos = {}
def poll(
self,
) -> Tuple[
MultiAgentDict,
MultiAgentDict,
MultiAgentDict,
MultiAgentDict,
MultiAgentDict,
]:
if not self.initialized:
# TODO(sven): Should we make it possible to pass in a seed here?
self.reset()
self.initialized = True
observations = self.last_obs
rewards = {}
terminateds = {"__all__": self.last_terminateds["__all__"]}
truncateds = {"__all__": self.last_truncateds["__all__"]}
infos = self.last_infos
# If episode is done or we have an error, release everything we have.
if (
terminateds["__all__"]
or truncateds["__all__"]
or isinstance(observations, Exception)
):
rewards = self.last_rewards
self.last_rewards = {}
terminateds = self.last_terminateds
if isinstance(observations, Exception):
terminateds["__all__"] = True
truncateds["__all__"] = False
self.last_terminateds = {}
truncateds = self.last_truncateds
self.last_truncateds = {}
self.last_obs = {}
infos = self.last_infos
self.last_infos = {}
# Only release those agents' rewards/terminateds/truncateds/infos, whose
# observations we have.
else:
for ag in observations.keys():
if ag in self.last_rewards:
rewards[ag] = self.last_rewards[ag]
del self.last_rewards[ag]
if ag in self.last_terminateds:
terminateds[ag] = self.last_terminateds[ag]
del self.last_terminateds[ag]
if ag in self.last_truncateds:
truncateds[ag] = self.last_truncateds[ag]
del self.last_truncateds[ag]
self.last_terminateds["__all__"] = False
self.last_truncateds["__all__"] = False
return observations, rewards, terminateds, truncateds, infos
def observe(
self,
obs: MultiAgentDict,
rewards: MultiAgentDict,
terminateds: MultiAgentDict,
truncateds: MultiAgentDict,
infos: MultiAgentDict,
):
self.last_obs = obs
for ag, r in rewards.items():
if ag in self.last_rewards:
self.last_rewards[ag] += r
else:
self.last_rewards[ag] = r
for ag, d in terminateds.items():
if ag in self.last_terminateds:
self.last_terminateds[ag] = self.last_terminateds[ag] or d
else:
self.last_terminateds[ag] = d
for ag, t in truncateds.items():
if ag in self.last_truncateds:
self.last_truncateds[ag] = self.last_truncateds[ag] or t
else:
self.last_truncateds[ag] = t
self.last_infos = infos
def reset(
self,
*,
seed: Optional[int] = None,
options: Optional[dict] = None,
) -> Tuple[MultiAgentDict, MultiAgentDict]:
try:
obs_and_infos = self.env.reset(seed=seed, options=options)
except Exception as e:
if self.return_error_as_obs:
logger.exception(e.args[0])
obs_and_infos = e, e
else:
raise e
self.last_obs, self.last_infos = obs_and_infos
self.last_rewards = {}
self.last_terminateds = {"__all__": False}
self.last_truncateds = {"__all__": False}
return self.last_obs, self.last_infos
|
_MultiAgentEnvState
|
python
|
kamyu104__LeetCode-Solutions
|
Python/unique-length-3-palindromic-subsequences.py
|
{
"start": 29,
"end": 467
}
|
class ____(object):
def countPalindromicSubsequence(self, s):
"""
:type s: str
:rtype: int
"""
first, last = [len(s)]*26, [-1]*26
for i, c in enumerate(s):
first[ord(c)-ord('a')] = min(first[ord(c)-ord('a')], i)
last[ord(c)-ord('a')] = max(last[ord(c)-ord('a')], i)
return sum(len(set(s[i] for i in xrange(first[c]+1, last[c]))) for c in xrange(26))
|
Solution
|
python
|
huggingface__transformers
|
tests/models/siglip/test_tokenization_siglip.py
|
{
"start": 1009,
"end": 11596
}
|
class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "google/siglip-base-patch16-224"
tokenizer_class = SiglipTokenizer
test_rust_tokenizer = False
test_sentencepiece = True
test_sentencepiece_ignore_case = True
@classmethod
def setUpClass(cls):
super().setUpClass()
# We have a SentencePiece fixture for testing
tokenizer = SiglipTokenizer(SAMPLE_VOCAB)
tokenizer.save_pretrained(cls.tmpdirname)
def test_convert_token_and_id(self):
"""Test ``_convert_token_to_id`` and ``_convert_id_to_token``."""
token = "<s>"
token_id = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
def test_get_vocab(self):
vocab_keys = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], "<unk>")
self.assertEqual(vocab_keys[1], "<s>")
def test_full_tokenizer(self):
tokenizer = SiglipTokenizer(SAMPLE_VOCAB)
tokens = tokenizer.tokenize("This is a test")
self.assertListEqual(tokens, ["▁this", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [66, 46, 10, 170, 382])
tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
tokens,
[
SPIECE_UNDERLINE,
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
],
)
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(ids, [7, 23, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 12, 66, 46, 72, 80, 6, 0])
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(
back_tokens,
[
SPIECE_UNDERLINE,
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
],
)
@cached_property
def siglip_tokenizer(self):
return SiglipTokenizer.from_pretrained("google/siglip-base-patch16-224")
@classmethod
def get_tokenizer(cls, pretrained_name=None, **kwargs) -> SiglipTokenizer:
pretrained_name = pretrained_name or cls.tmpdirname
return cls.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
def test_eos_treatment(self):
tokenizer = self.siglip_tokenizer
batch_with_eos_added = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"])
batch_without_eos_added = tokenizer(["hi", "I went to the gym", ""])
self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"])
def test_prepare_batch(self):
tokenizer = self.siglip_tokenizer
src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."]
expected_src_tokens = [262, 266, 476, 8532, 270, 4460, 3949, 1682, tokenizer.eos_token_id]
batch = tokenizer(src_text, padding=True, return_tensors="pt")
self.assertIsInstance(batch, BatchEncoding)
result = list(batch.input_ids.numpy()[0])
self.assertListEqual(expected_src_tokens, result)
self.assertEqual((2, 9), batch.input_ids.shape)
def test_empty_target_text(self):
tokenizer = self.siglip_tokenizer
src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."]
batch = tokenizer(src_text, padding=True, return_tensors="pt")
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids", batch)
self.assertNotIn("decoder_input_ids", batch)
self.assertNotIn("decoder_attention_mask", batch)
def test_max_length(self):
tokenizer = self.siglip_tokenizer
tgt_text = ["Summary of the text.", "Another summary."]
targets = tokenizer(
text_target=tgt_text, max_length=32, padding="max_length", truncation=True, return_tensors="pt"
)
self.assertEqual(32, targets["input_ids"].shape[1])
def test_eos_in_input(self):
tokenizer = self.siglip_tokenizer
src_text = ["A long paragraph for summarization. </s>"]
tgt_text = ["Summary of the text. </s>"]
expected_src_tokens = [262, 266, 476, 8532, 270, 4460, 3949, 1682, 1]
expected_tgt_tokens = [6254, 267, 260, 1443, 1]
batch = tokenizer(src_text, text_target=tgt_text)
self.assertEqual(expected_src_tokens, batch["input_ids"][0])
self.assertEqual(expected_tgt_tokens, batch["labels"][0])
@unittest.skip(reason="SiglipTokenizer strips the punctuation")
def test_subword_regularization_tokenizer(self):
pass
def test_special_tokens_initialization(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
added_tokens = [f"<extra_id_{i}>" for i in range(100)] + [AddedToken("<special>", lstrip=True)]
tokenizer_r = self.get_tokenizer(pretrained_name, additional_special_tokens=added_tokens, **kwargs)
r_output = tokenizer_r.encode("Hey this is a <special> token")
special_token_id = tokenizer_r.encode("<special>", add_special_tokens=False)[0]
self.assertTrue(special_token_id in r_output)
def test_sentencepiece_tokenize_and_convert_tokens_to_string(self):
"""Test ``_tokenize`` and ``convert_tokens_to_string``."""
if not self.test_sentencepiece:
self.skipTest(reason="test_sentencepiece is set to False")
tokenizer = self.get_tokenizer()
text = "This is text to test the tokenizer."
if self.test_sentencepiece_ignore_case:
text = text.lower()
tokens = tokenizer.tokenize(text)
self.assertTrue(len(tokens) > 0)
# check if converting back to original text works
reverse_text = tokenizer.convert_tokens_to_string(tokens)
if self.test_sentencepiece_ignore_case:
reverse_text = reverse_text.lower()
expected_text = "this is text to test the tokenizer"
self.assertEqual(reverse_text, expected_text)
special_tokens = tokenizer.all_special_tokens
special_tokens_string = tokenizer.convert_tokens_to_string(special_tokens)
for special_token in special_tokens:
self.assertIn(special_token, special_tokens_string)
if self.test_rust_tokenizer:
rust_tokenizer = self.get_rust_tokenizer()
special_tokens_string_rust = rust_tokenizer.convert_tokens_to_string(special_tokens)
self.assertEqual(special_tokens_string, special_tokens_string_rust)
@slow
def test_tokenizer_integration(self):
tokenizer = SiglipTokenizer.from_pretrained("google/siglip-base-patch16-224")
# fmt: off
texts = [
'the real mountain view',
'Zürich',
'San Francisco',
'a picture of a laptop with the lockscreen on, a cup of cappucino, salt and pepper grinders. The view through the window reveals lake Zürich and the Alps in the background of the city.',
]
expected_input_ids = [
[260, 638, 3293, 870, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[262, 761, 5879, 5345, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[262, 264, 452, 20563, 15949, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[262, 266, 1357, 267, 262, 266, 4429, 275, 260, 3940, 6360, 277, 262, 266, 3064, 267, 3549, 388, 16538, 296, 298, 2617, 263, 4869, 14998, 264, 260, 870, 393, 260, 1710, 7958, 4324, 262, 761, 5879, 5345, 263, 260, 1518, 388, 264, 268, 260, 1970, 267, 260, 741, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
]
# fmt: on
for text, expected in zip(texts, expected_input_ids):
input_ids = tokenizer(text, padding="max_length").input_ids
self.assertListEqual(input_ids, expected)
def test_some_edge_cases(self):
tokenizer = SiglipTokenizer.from_pretrained("google/siglip-base-patch16-224", legacy=False)
sp_tokens = tokenizer.sp_model.encode("</s>>", out_type=str)
self.assertEqual(sp_tokens, ["</", "s", ">", ">"])
tokens = tokenizer.tokenize("</s>>")
self.assertNotEqual(sp_tokens, tokens)
self.assertEqual(tokens, ["</s>"])
tokens = tokenizer.tokenize("")
self.assertEqual(tokens, [])
self.assertEqual(tokens, tokenizer.sp_model.encode("", out_type=str))
tokens = tokenizer.tokenize(" ")
self.assertEqual(tokens, [])
self.assertEqual(tokens, tokenizer.sp_model.encode(" ", out_type=str))
tokens = tokenizer.tokenize("▁")
self.assertEqual(tokens, [])
self.assertEqual(tokens, tokenizer.sp_model.encode("▁", out_type=str))
tokens = tokenizer.tokenize(" ▁")
self.assertEqual(tokens, [])
self.assertEqual(tokens, tokenizer.sp_model.encode("▁", out_type=str))
@require_sentencepiece
@require_tokenizers
|
SiglipTokenizationTest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/debug/cli/analyzer_cli_test.py
|
{
"start": 21359,
"end": 62755
}
|
class ____(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._dump_root = tempfile.mkdtemp()
cls._dump_root_for_unique = tempfile.mkdtemp()
cls._is_gpu_available = test.is_gpu_available()
if cls._is_gpu_available:
gpu_name = test_util.gpu_device_name()
cls._main_device = "/job:localhost/replica:0/task:0" + gpu_name
else:
cls._main_device = "/job:localhost/replica:0/task:0/device:CPU:0"
cls._curr_file_path = os.path.abspath(
tf_inspect.getfile(tf_inspect.currentframe()))
cls._sess = session.Session(config=no_rewrite_session_config())
with cls._sess as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
v_init_val = np.array([[2.0], [-1.0]])
u_name = "simple_mul_add/u"
v_name = "simple_mul_add/v"
u_init = constant_op.constant(u_init_val, shape=[2, 2], name="u_init")
u = variable_v1.VariableV1(u_init, name=u_name)
cls._u_line_number = line_number_above()
v_init = constant_op.constant(v_init_val, shape=[2, 1], name="v_init")
v = variable_v1.VariableV1(v_init, name=v_name)
cls._v_line_number = line_number_above()
w = math_ops.matmul(u, v, name="simple_mul_add/matmul")
cls._w_line_number = line_number_above()
x = math_ops.add(w, w, name="simple_mul_add/add")
cls._x_line_number = line_number_above()
a = variable_v1.VariableV1([1, 3, 3, 7], name="a")
u.initializer.run()
v.initializer.run()
a.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls="file://%s" % cls._dump_root)
# Invoke Session.run().
run_metadata = config_pb2.RunMetadata()
sess.run([x], options=run_options, run_metadata=run_metadata)
cls._debug_dump = debug_data.DebugDumpDir(
cls._dump_root, partition_graphs=run_metadata.partition_graphs)
cls._analyzer, cls._registry = create_analyzer_cli(cls._debug_dump)
@classmethod
def tearDownClass(cls):
# Tear down temporary dump directory.
file_io.delete_recursively(cls._dump_root)
file_io.delete_recursively(cls._dump_root_for_unique)
def testMeasureTensorListColumnWidthsGivesRightAnswerForEmptyData(self):
timestamp_col_width, dump_size_col_width, op_type_col_width = (
self._analyzer._measure_tensor_list_column_widths([]))
self.assertEqual(len("t (ms)") + 1, timestamp_col_width)
self.assertEqual(len("Size (B)") + 1, dump_size_col_width)
self.assertEqual(len("Op type") + 1, op_type_col_width)
def testMeasureTensorListColumnWidthsGivesRightAnswerForData(self):
dump = self._debug_dump.dumped_tensor_data[0]
self.assertLess(dump.dump_size_bytes, 1000)
self.assertEqual(
"VariableV2", self._debug_dump.node_op_type(dump.node_name))
_, dump_size_col_width, op_type_col_width = (
self._analyzer._measure_tensor_list_column_widths([dump]))
# The length of str(dump.dump_size_bytes) is less than the length of
# "Size (B)" (8). So the column width should be determined by the length of
# "Size (B)".
self.assertEqual(len("Size (B)") + 1, dump_size_col_width)
# The length of "VariableV2" is greater than the length of "Op type". So the
# column should be determined by the length of "VariableV2".
self.assertEqual(len("VariableV2") + 1, op_type_col_width)
def testListTensors(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", [])
assert_listed_tensors(self, out, [
"simple_mul_add/u:0", "simple_mul_add/v:0", "simple_mul_add/u/read:0",
"simple_mul_add/v/read:0", "simple_mul_add/matmul:0",
"simple_mul_add/add:0"
], [
"VariableV2", "VariableV2", "Identity", "Identity",
_matmul_op_name(), "AddV2"
])
# Check the main menu.
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseTimeOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "timestamp", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
], [
"VariableV2", "VariableV2", "Identity", "Identity",
_matmul_op_name(), "AddV2"
],
sort_by="timestamp",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInDumpSizeOrderWorks(self):
out = self._registry.dispatch_command("lt", ["-s", "dump_size"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
], [
"VariableV2", "VariableV2", "Identity", "Identity",
_matmul_op_name(), "AddV2"
],
sort_by="dump_size")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseDumpSizeOrderWorks(self):
out = self._registry.dispatch_command("lt", ["-s", "dump_size", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
], [
"VariableV2", "VariableV2", "Identity", "Identity",
_matmul_op_name(), "AddV2"
],
sort_by="dump_size",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsWithInvalidSortByFieldGivesError(self):
out = self._registry.dispatch_command("lt", ["-s", "foobar"])
self.assertIn("ValueError: Unsupported key to sort tensors by: foobar",
out.lines)
def testListTensorsInOpTypeOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "op_type"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
], [
"VariableV2", "VariableV2", "Identity", "Identity",
_matmul_op_name(), "AddV2"
],
sort_by="op_type",
reverse=False)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseOpTypeOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "op_type", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
], [
"VariableV2", "VariableV2", "Identity", "Identity",
_matmul_op_name(), "AddV2"
],
sort_by="op_type",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInTensorNameOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "tensor_name"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
], [
"VariableV2", "VariableV2", "Identity", "Identity",
_matmul_op_name(), "AddV2"
],
sort_by="tensor_name",
reverse=False)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseTensorNameOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "tensor_name", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
], [
"VariableV2", "VariableV2", "Identity", "Identity",
_matmul_op_name(), "AddV2"
],
sort_by="tensor_name",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsFilterByNodeNameRegex(self):
out = self._registry.dispatch_command("list_tensors",
["--node_name_filter", ".*read.*"])
assert_listed_tensors(
self,
out, ["simple_mul_add/u/read:0", "simple_mul_add/v/read:0"],
["Identity", "Identity"],
node_name_regex=".*read.*")
out = self._registry.dispatch_command("list_tensors", ["-n", "^read"])
assert_listed_tensors(self, out, [], [], node_name_regex="^read")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorFilterByOpTypeRegex(self):
out = self._registry.dispatch_command("list_tensors",
["--op_type_filter", "Identity"])
assert_listed_tensors(
self,
out, ["simple_mul_add/u/read:0", "simple_mul_add/v/read:0"],
["Identity", "Identity"],
op_type_regex="Identity")
out = self._registry.dispatch_command(
"list_tensors", ["-t", "(Add|" + _matmul_op_name() + ")"])
assert_listed_tensors(
self,
out, ["simple_mul_add/add:0", "simple_mul_add/matmul:0"],
["AddV2", _matmul_op_name()],
op_type_regex=("(Add|" + _matmul_op_name() + ")"))
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorFilterByNodeNameRegexAndOpTypeRegex(self):
out = self._registry.dispatch_command(
"list_tensors", ["-t", "(Add|MatMul)", "-n", ".*add$"])
assert_listed_tensors(
self,
out, ["simple_mul_add/add:0"], ["AddV2"],
node_name_regex=".*add$",
op_type_regex="(Add|MatMul)")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorWithFilterAndNodeNameExclusionWorks(self):
# First, create and register the filter.
def is_2x1_vector(datum, tensor):
del datum # Unused.
return list(tensor.shape) == [2, 1]
self._analyzer.add_tensor_filter("is_2x1_vector", is_2x1_vector)
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command(
"lt", ["-f", "is_2x1_vector", "--filter_exclude_node_names", ".*v.*"])
# If the --filter_exclude_node_names were not used, then the matching
# tensors would be:
# - simple_mul_add/v:0
# - simple_mul_add/v/read:0
# - simple_mul_add/matmul:0
# - simple_mul_add/add:0
#
# With the --filter_exclude_node_names option, only the last two should
# show up in the result.
assert_listed_tensors(
self,
out, ["simple_mul_add/matmul:0", "simple_mul_add/add:0"],
[_matmul_op_name(), "AddV2"],
tensor_filter_name="is_2x1_vector")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsFilterNanOrInf(self):
"""Test register and invoke a tensor filter."""
# First, register the filter.
self._analyzer.add_tensor_filter("has_inf_or_nan",
debug_data.has_inf_or_nan)
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-f", "has_inf_or_nan"])
# This TF graph run did not generate any bad numerical values.
assert_listed_tensors(
self, out, [], [], tensor_filter_name="has_inf_or_nan")
# TODO(cais): A test with some actual bad numerical values.
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorNonexistentFilter(self):
"""Test attempt to use a nonexistent tensor filter."""
out = self._registry.dispatch_command("lt", ["-f", "foo_filter"])
self.assertEqual(["ERROR: There is no tensor filter named \"foo_filter\"."],
out.lines)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInvalidOptions(self):
out = self._registry.dispatch_command("list_tensors", ["--bar"])
check_syntax_error_output(self, out, "list_tensors")
def testNodeInfoByNodeName(self):
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", [node_name])
recipients = [("AddV2", "simple_mul_add/add"),
("AddV2", "simple_mul_add/add")]
assert_node_attribute_lines(self, out, node_name, _matmul_op_name(),
self._main_device,
[("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
recipients, [])
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
# Verify that the node name is bold in the first line.
self.assertEqual(
[(len(out.lines[0]) - len(node_name), len(out.lines[0]), "bold")],
out.font_attr_segs[0])
def testNodeInfoShowAttributes(self):
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-a", node_name])
test_attr_key_val_pairs = [("transpose_a", "b: false"),
("transpose_b", "b: false"),
("T", "type: DT_DOUBLE")]
if test_util.IsMklEnabled():
test_attr_key_val_pairs.append(("_kernel", 's: "MklNameChangeOp"'))
assert_node_attribute_lines(
self,
out,
node_name,
_matmul_op_name(),
self._main_device, [("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
[("AddV2", "simple_mul_add/add"), ("AddV2", "simple_mul_add/add")], [],
attr_key_val_pairs=test_attr_key_val_pairs)
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testNodeInfoShowDumps(self):
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-d", node_name])
assert_node_attribute_lines(
self,
out,
node_name,
_matmul_op_name(),
self._main_device, [("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
[("AddV2", "simple_mul_add/add"), ("AddV2", "simple_mul_add/add")], [],
num_dumped_tensors=1)
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 16,
len(out.lines[16]) - len(out.lines[16].strip()),
len(out.lines[16]), "pt %s:0 -n 0" % node_name)
def testNodeInfoShowStackTraceUnavailableIsIndicated(self):
self._debug_dump.set_python_graph(None)
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-t", node_name])
assert_node_attribute_lines(
self,
out,
node_name,
_matmul_op_name(),
self._main_device, [("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
[("AddV2", "simple_mul_add/add"), ("AddV2", "simple_mul_add/add")], [],
show_stack_trace=True,
stack_trace_available=False)
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testNodeInfoShowStackTraceAvailableWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-t", node_name])
assert_node_attribute_lines(
self,
out,
node_name,
_matmul_op_name(),
self._main_device, [("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
[("AddV2", "simple_mul_add/add"), ("AddV2", "simple_mul_add/add")], [],
show_stack_trace=True,
stack_trace_available=True)
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testNodeInfoByTensorName(self):
node_name = "simple_mul_add/u/read"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command("node_info", [tensor_name])
assert_node_attribute_lines(self, out, node_name, "Identity",
self._main_device,
[("VariableV2", "simple_mul_add/u")], [],
[(_matmul_op_name(), "simple_mul_add/matmul")],
[])
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testNodeInfoNonexistentNodeName(self):
out = self._registry.dispatch_command("node_info", ["bar"])
self.assertEqual(
["ERROR: There is no node named \"bar\" in the partition graphs"],
out.lines)
# Check color indicating error.
self.assertEqual({0: [(0, 59, cli_shared.COLOR_RED)]}, out.font_attr_segs)
check_main_menu(self, out, list_tensors_enabled=True)
def testPrintTensor(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name], screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\":" % tensor_name,
" dtype: float64",
" shape: (2, 1)",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
self.assertIn(5, out.annotations)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorAndWriteToNpyFile(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
npy_path = os.path.join(self._dump_root, "matmul.npy")
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "-w", npy_path],
screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\":" % tensor_name,
" dtype: float64",
" shape: (2, 1)",
"",
], out.lines[:4])
self.assertTrue(out.lines[4].startswith("Saved value to: %s (" % npy_path))
# Load the numpy file and verify its contents.
self.assertAllClose([[7.0], [-2.0]], np.load(npy_path))
def testPrintTensorHighlightingRanges(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "--ranges", "[-inf, 0.0]"],
screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\": " % tensor_name +
"Highlighted([-inf, 0.0]): 1 of 2 element(s) (50.00%)",
" dtype: float64",
" shape: (2, 1)",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
self.assertIn(5, out.annotations)
self.assertEqual([(8, 11, "bold")], out.font_attr_segs[5])
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "--ranges", "[[-inf, -5.5], [5.5, inf]]"],
screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\": " % tensor_name +
"Highlighted([[-inf, -5.5], [5.5, inf]]): "
"1 of 2 element(s) (50.00%)",
" dtype: float64",
" shape: (2, 1)",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
self.assertIn(5, out.annotations)
self.assertEqual([(9, 11, "bold")], out.font_attr_segs[4])
self.assertNotIn(5, out.font_attr_segs)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorHighlightingRangesAndIncludingNumericSummary(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "--ranges", "[-inf, 0.0]", "-s"],
screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\": " % tensor_name +
"Highlighted([-inf, 0.0]): 1 of 2 element(s) (50.00%)",
" dtype: float64",
" shape: (2, 1)",
"",
"Numeric summary:",
"| - + | total |",
"| 1 1 | 2 |",
"| min max mean std |",
"| -2.0 7.0 2.5 4.5 |",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(10, out.annotations)
self.assertIn(11, out.annotations)
self.assertEqual([(8, 11, "bold")], out.font_attr_segs[11])
def testPrintTensorWithSlicing(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name + "[1, :]"], screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity[1, :]\":" % tensor_name, " dtype: float64",
" shape: (1,)", "", "array([-2.])"
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorInvalidSlicingString(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name + "[1, foo()]"], screen_info={"cols": 80})
self.assertEqual("Error occurred during handling of command: print_tensor "
+ tensor_name + "[1, foo()]:", out.lines[0])
self.assertEqual("ValueError: Invalid tensor-slicing string.",
out.lines[-2])
def testPrintTensorValidExplicitNumber(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "-n", "0"], screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\":" % tensor_name,
" dtype: float64",
" shape: (2, 1)",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
self.assertIn(5, out.annotations)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorInvalidExplicitNumber(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "-n", "1"], screen_info={"cols": 80})
self.assertEqual([
"ERROR: Invalid number (1) for tensor simple_mul_add/matmul:0, "
"which generated one dump."
], out.lines)
self.assertNotIn("tensor_metadata", out.annotations)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorMissingOutputSlotLeadsToOnlyDumpedTensorPrinted(self):
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("print_tensor", [node_name])
self.assertEqual([
"Tensor \"%s:0:DebugIdentity\":" % node_name, " dtype: float64",
" shape: (2, 1)", "", "array([[ 7.],", " [-2.]])"
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorNonexistentNodeName(self):
out = self._registry.dispatch_command(
"print_tensor", ["simple_mul_add/matmul/foo:0"])
self.assertEqual([
"ERROR: Node \"simple_mul_add/matmul/foo\" does not exist in partition "
"graphs"
], out.lines)
check_main_menu(self, out, list_tensors_enabled=True)
def testEvalExpression(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"eval", ["np.matmul(`%s`, `%s`.T)" % (tensor_name, tensor_name)],
screen_info={"cols": 80})
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self,
["Tensor \"from eval of expression "
"'np.matmul(`simple_mul_add/matmul:0`, "
"`simple_mul_add/matmul:0`.T)'\":",
" dtype: float64",
" shape: (2, 2)",
"",
"Numeric summary:",
"| - + | total |",
"| 2 2 | 4 |",
"| min max mean std |"],
out.lines[:8])
cli_test_utils.assert_array_lines_close(
self, [-14.0, 49.0, 6.25, 25.7524270701], out.lines[8:9])
cli_test_utils.assert_array_lines_close(
self, [[49.0, -14.0], [-14.0, 4.0]], out.lines[10:])
def testEvalExpressionAndWriteToNpyFile(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
npy_path = os.path.join(self._dump_root, "matmul_eval.npy")
out = self._registry.dispatch_command(
"eval",
["np.matmul(`%s`, `%s`.T)" % (tensor_name, tensor_name), "-w",
npy_path], screen_info={"cols": 80})
self.assertEqual([
"Tensor \"from eval of expression "
"'np.matmul(`simple_mul_add/matmul:0`, "
"`simple_mul_add/matmul:0`.T)'\":",
" dtype: float64",
" shape: (2, 2)",
""], out.lines[:4])
self.assertTrue(out.lines[4].startswith("Saved value to: %s (" % npy_path))
# Load the numpy file and verify its contents.
self.assertAllClose([[49.0, -14.0], [-14.0, 4.0]], np.load(npy_path))
def testAddGetTensorFilterLambda(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump,
_cli_config_from_temp_file())
analyzer.add_tensor_filter("foo_filter", lambda x, y: True)
self.assertTrue(analyzer.get_tensor_filter("foo_filter")(None, None))
def testAddGetTensorFilterNestedFunction(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump,
_cli_config_from_temp_file())
def foo_filter(unused_arg_0, unused_arg_1):
return True
analyzer.add_tensor_filter("foo_filter", foo_filter)
self.assertTrue(analyzer.get_tensor_filter("foo_filter")(None, None))
def testAddTensorFilterEmptyName(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump,
_cli_config_from_temp_file())
with self.assertRaisesRegex(ValueError,
"Input argument filter_name cannot be empty."):
analyzer.add_tensor_filter("", lambda datum, tensor: True)
def testAddTensorFilterNonStrName(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump,
_cli_config_from_temp_file())
with self.assertRaisesRegex(
TypeError, "Input argument filter_name is expected to be str, "
"but is not"):
analyzer.add_tensor_filter(1, lambda datum, tensor: True)
def testAddGetTensorFilterNonCallable(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump,
_cli_config_from_temp_file())
with self.assertRaisesRegex(
TypeError, "Input argument filter_callable is expected to be callable, "
"but is not."):
analyzer.add_tensor_filter("foo_filter", "bar")
def testGetNonexistentTensorFilter(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump,
_cli_config_from_temp_file())
analyzer.add_tensor_filter("foo_filter", lambda datum, tensor: True)
with self.assertRaisesRegex(ValueError,
"There is no tensor filter named \"bar\""):
analyzer.get_tensor_filter("bar")
def _findSourceLine(self, annotated_source, line_number):
"""Find line of given line number in annotated source.
Args:
annotated_source: (debugger_cli_common.RichTextLines) the annotated source
line_number: (int) 1-based line number
Returns:
(int) If line_number is found, 0-based line index in
annotated_source.lines. Otherwise, None.
"""
index = None
for i, line in enumerate(annotated_source.lines):
if line.startswith("L%d " % line_number):
index = i
break
return index
def testPrintSourceForOpNamesWholeFileWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command(
"print_source", [self._curr_file_path], screen_info={"cols": 80})
# Verify the annotation of the line that creates u.
index = self._findSourceLine(out, self._u_line_number)
self.assertEqual(
["L%d u = variable_v1.VariableV1(u_init, name=u_name)" %
self._u_line_number,
" simple_mul_add/u",
" simple_mul_add/u/Assign",
" simple_mul_add/u/read"],
out.lines[index : index + 4])
self.assertEqual("pt simple_mul_add/u",
out.font_attr_segs[index + 1][0][2].content)
# simple_mul_add/u/Assign is not used in this run because the Variable has
# already been initialized.
self.assertEqual(cli_shared.COLOR_BLUE, out.font_attr_segs[index + 2][0][2])
self.assertEqual("pt simple_mul_add/u/read",
out.font_attr_segs[index + 3][0][2].content)
# Verify the annotation of the line that creates v.
index = self._findSourceLine(out, self._v_line_number)
self.assertEqual(
["L%d v = variable_v1.VariableV1(v_init, name=v_name)" %
self._v_line_number,
" simple_mul_add/v"],
out.lines[index : index + 2])
self.assertEqual("pt simple_mul_add/v",
out.font_attr_segs[index + 1][0][2].content)
# Verify the annotation of the line that creates w.
index = self._findSourceLine(out, self._w_line_number)
self.assertEqual(
["L%d " % self._w_line_number +
"w = math_ops.matmul(u, v, name=\"simple_mul_add/matmul\")",
" simple_mul_add/matmul"],
out.lines[index : index + 2])
self.assertEqual("pt simple_mul_add/matmul",
out.font_attr_segs[index + 1][0][2].content)
# Verify the annotation of the line that creates x.
index = self._findSourceLine(out, self._x_line_number)
self.assertEqual(
["L%d " % self._x_line_number +
"x = math_ops.add(w, w, name=\"simple_mul_add/add\")",
" simple_mul_add/add"],
out.lines[index : index + 2])
self.assertEqual("pt simple_mul_add/add",
out.font_attr_segs[index + 1][0][2].content)
def testPrintSourceForTensorNamesWholeFileWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command(
"print_source",
[self._curr_file_path, "--tensors"],
screen_info={"cols": 80})
# Verify the annotation of the line that creates u.
index = self._findSourceLine(out, self._u_line_number)
self.assertEqual(
["L%d u = variable_v1.VariableV1(u_init, name=u_name)" %
self._u_line_number,
" simple_mul_add/u/read:0",
" simple_mul_add/u:0"],
out.lines[index : index + 3])
self.assertEqual("pt simple_mul_add/u/read:0",
out.font_attr_segs[index + 1][0][2].content)
self.assertEqual("pt simple_mul_add/u:0",
out.font_attr_segs[index + 2][0][2].content)
def testPrintSourceForOpNamesStartingAtSpecifiedLineWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command(
"print_source",
[self._curr_file_path, "-b", "3"],
screen_info={"cols": 80})
self.assertEqual(
2, out.annotations[debugger_cli_common.INIT_SCROLL_POS_KEY])
index = self._findSourceLine(out, self._u_line_number)
self.assertEqual(
["L%d u = variable_v1.VariableV1(u_init, name=u_name)" %
self._u_line_number,
" simple_mul_add/u",
" simple_mul_add/u/Assign",
" simple_mul_add/u/read"],
out.lines[index : index + 4])
self.assertEqual("pt simple_mul_add/u",
out.font_attr_segs[index + 1][0][2].content)
# simple_mul_add/u/Assign is not used in this run because the Variable has
# already been initialized.
self.assertEqual(cli_shared.COLOR_BLUE, out.font_attr_segs[index + 2][0][2])
self.assertEqual("pt simple_mul_add/u/read",
out.font_attr_segs[index + 3][0][2].content)
def testPrintSourceForOpNameSettingMaximumElementCountWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command(
"print_source",
[self._curr_file_path, "-m", "1"],
screen_info={"cols": 80})
index = self._findSourceLine(out, self._u_line_number)
self.assertEqual(
["L%d u = variable_v1.VariableV1(u_init, name=u_name)" %
self._u_line_number,
" simple_mul_add/u",
" (... Omitted 2 of 3 op(s) ...) +5"],
out.lines[index : index + 3])
self.assertEqual("pt simple_mul_add/u",
out.font_attr_segs[index + 1][0][2].content)
more_elements_command = out.font_attr_segs[index + 2][-1][2].content
self.assertStartsWith(more_elements_command,
"ps %s " % self._curr_file_path)
self.assertIn(" -m 6", more_elements_command)
def testListSourceWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command("list_source", [])
non_tf_lib_files_start = [
i for i in range(len(out.lines))
if out.lines[i].startswith("Source file path")
][0] + 1
non_tf_lib_files_end = [
i for i in range(len(out.lines))
if out.lines[i].startswith("TensorFlow Python library file(s):")
][0] - 1
non_tf_lib_files = [
line.split(" ")[0] for line
in out.lines[non_tf_lib_files_start : non_tf_lib_files_end]]
self.assertIn(self._curr_file_path, non_tf_lib_files)
# Check that the TF library files are marked with special color attribute.
for i in range(non_tf_lib_files_end + 1, len(out.lines)):
if not out.lines[i]:
continue
for attr_seg in out.font_attr_segs[i]:
self.assertTrue(cli_shared.COLOR_GRAY in attr_seg[2] or
attr_seg[2] == cli_shared.COLOR_GRAY)
def testListSourceWithNodeNameFilterWithMatchesWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command("list_source", ["-n", ".*/read"])
self.assertStartsWith(out.lines[1], "Node name regex filter: \".*/read\"")
non_tf_lib_files_start = [
i for i in range(len(out.lines))
if out.lines[i].startswith("Source file path")
][0] + 1
non_tf_lib_files_end = [
i for i in range(len(out.lines))
if out.lines[i].startswith("TensorFlow Python library file(s):")
][0] - 1
non_tf_lib_files = [
line.split(" ")[0] for line
in out.lines[non_tf_lib_files_start : non_tf_lib_files_end]]
self.assertIn(self._curr_file_path, non_tf_lib_files)
# Check that the TF library files are marked with special color attribute.
for i in range(non_tf_lib_files_end + 1, len(out.lines)):
if not out.lines[i]:
continue
for attr_seg in out.font_attr_segs[i]:
self.assertTrue(cli_shared.COLOR_GRAY in attr_seg[2] or
attr_seg[2] == cli_shared.COLOR_GRAY)
def testListSourceWithNodeNameFilterWithNoMatchesWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command("list_source", ["-n", "^$"])
self.assertEqual([
"List of source files that created nodes in this run",
"Node name regex filter: \"^$\"", "",
"[No source file information.]"], out.lines)
def testListSourceWithPathAndNodeNameFiltersWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command(
"list_source", ["-p", self._curr_file_path, "-n", ".*read"])
self.assertEqual([
"List of source files that created nodes in this run",
"File path regex filter: \"%s\"" % self._curr_file_path,
"Node name regex filter: \".*read\"", ""], out.lines[:4])
def testListSourceWithCompiledPythonSourceWorks(self):
def fake_list_source_files_against_dump(dump,
path_regex_allowlist=None,
node_name_regex_allowlist=None):
del dump, path_regex_allowlist, node_name_regex_allowlist
return [("compiled_1.pyc", False, 10, 20, 30, 4),
("compiled_2.pyo", False, 10, 20, 30, 5),
("uncompiled.py", False, 10, 20, 30, 6)]
with test.mock.patch.object(
source_utils, "list_source_files_against_dump",
side_effect=fake_list_source_files_against_dump):
out = self._registry.dispatch_command("list_source", [])
self.assertStartsWith(out.lines[4], "compiled_1.pyc")
self.assertEqual((0, 14, [cli_shared.COLOR_WHITE]),
out.font_attr_segs[4][0])
self.assertStartsWith(out.lines[5], "compiled_2.pyo")
self.assertEqual((0, 14, [cli_shared.COLOR_WHITE]),
out.font_attr_segs[5][0])
self.assertStartsWith(out.lines[6], "uncompiled.py")
self.assertEqual(0, out.font_attr_segs[6][0][0])
self.assertEqual(13, out.font_attr_segs[6][0][1])
self.assertEqual(cli_shared.COLOR_WHITE, out.font_attr_segs[6][0][2][0])
self.assertEqual("ps uncompiled.py -b 6",
out.font_attr_segs[6][0][2][1].content)
def testListInputInvolvingNodesWithMultipleOutputs(self):
"""List an input tree containing tensors from non-:0 output slot."""
with session.Session(config=no_rewrite_session_config()) as sess:
with ops.device("CPU:0"):
x = variable_v1.VariableV1([1, 3, 3, 7], name="x")
_, idx = array_ops.unique(x, name="x_unique")
idx_times_two = math_ops.multiply(idx, 2, name="idx_times_two")
self.evaluate(x.initializer)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls="file://%s" % self._dump_root_for_unique)
run_metadata = config_pb2.RunMetadata()
self.assertAllEqual([0, 2, 2, 4],
sess.run(
idx_times_two,
options=run_options,
run_metadata=run_metadata))
debug_dump = debug_data.DebugDumpDir(
self._dump_root_for_unique,
partition_graphs=run_metadata.partition_graphs)
_, registry = create_analyzer_cli(debug_dump)
out = registry.dispatch_command("li", ["idx_times_two"])
self.assertEqual([
"Inputs to node \"idx_times_two\" (Depth limit = 1):",
"|- (1) x_unique:1"
], out.lines[:2])
|
AnalyzerCLISimpleMulAddTest
|
python
|
pytorch__pytorch
|
torch/_inductor/fx_passes/group_batch_fusion.py
|
{
"start": 48640,
"end": 48816
}
|
class ____(BatchMathOpsPreGradFusion):
def __init__(self, **kwargs):
super().__init__(torch.clamp, **kwargs)
@register_fusion("batch_dropout")
|
BatchClampPreGradFusion
|
python
|
pytorch__pytorch
|
test/distributed/tensor/debug/test_comm_mode.py
|
{
"start": 762,
"end": 7692
}
|
class ____(TestCase):
def tearDown(self):
super().tearDown()
dist.destroy_process_group()
def setUp(self):
super().setUp()
self.world_size = 2
store = FakeStore()
dist.init_process_group(
backend="fake", rank=1, world_size=self.world_size, store=store
)
self.device_type = device_type
self.world_pg = dist.distributed_c10d._get_default_group()
def checksAssert(self, comm_mode, key, expected_value, expected_total_value):
comm_counts = comm_mode.get_comm_counts()
self.assertEqual(comm_mode.get_total_counts(), expected_total_value)
self.assertEqual(comm_counts[key], expected_value)
return
def test_comm_mode(self):
world_pg = self.world_pg
class WrapperModel(nn.Module):
def __init__(self, device):
super().__init__()
self.model = MLPModule(device=device)
def forward(self, x):
x = funcol.all_gather_tensor(x, 0, world_pg)
x = funcol.reduce_scatter_tensor(x, "sum", 0, world_pg)
out = self.model(x)
return funcol.all_reduce(out, "sum", world_pg)
model = WrapperModel(self.device_type)
comm_mode = CommDebugMode()
with comm_mode:
model(torch.randn(20, 10, device=self.device_type))
comm_counts = comm_mode.get_comm_counts()
self.assertEqual(comm_mode.get_total_counts(), 3)
self.assertEqual(comm_counts[c10d_functional.all_reduce], 1)
self.assertEqual(comm_counts[c10d_functional.all_gather_into_tensor], 1)
self.assertEqual(comm_counts[c10d_functional.reduce_scatter_tensor], 1)
def test_comm_mode_coalesced(self):
world_pg = self.world_pg
class WrapperModelCoalesced(nn.Module):
def __init__(self, device):
super().__init__()
self.model = MLPModule(device=device)
def forward(self, x):
x = funcol.all_gather_tensor(x, 0, world_pg)
x = funcol.reduce_scatter_tensor(x, "sum", 0, world_pg)
out = self.model(x)
return funcol.all_reduce_coalesced([out], "sum", world_pg)
model = WrapperModelCoalesced(self.device_type)
comm_mode = CommDebugMode()
with comm_mode:
model(torch.randn(20, 10, device=self.device_type))
comm_counts = comm_mode.get_comm_counts()
self.assertEqual(comm_mode.get_total_counts(), 3)
self.assertEqual(comm_counts[c10d_functional.all_reduce_coalesced], 1)
self.assertEqual(comm_counts[c10d_functional.all_gather_into_tensor], 1)
self.assertEqual(comm_counts[c10d_functional.reduce_scatter_tensor], 1)
def test_comm_mode_with_dtensor(self):
mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
def f(x, y):
return torch.mm(x, y)
comm_mode = CommDebugMode()
x = torch.randn(4, 8, requires_grad=True)
y = torch.randn(4, 32, requires_grad=True)
x_dtensor = DTensor.from_local(x, mesh, [Shard(0)], run_check=False)
y_dtensor = DTensor.from_local(y, mesh, [Shard(0)], run_check=False)
with comm_mode:
f(x_dtensor, y_dtensor)
comm_counts = comm_mode.get_comm_counts()
self.assertEqual(comm_mode.get_total_counts(), 1)
self.assertEqual(comm_counts[c10d_functional.all_reduce], 0)
self.assertEqual(comm_counts[c10d_functional.all_gather_into_tensor], 1)
self.assertEqual(comm_counts[c10d_functional.reduce_scatter_tensor], 0)
@requires_accelerator_dist_backend(["nccl", "xccl"])
def test_comm_mode_with_c10d(self):
if not torch.accelerator.is_available():
return
inp = torch.rand(2, 8, 16).to(device_type)
all_gather_out = inp.new_empty(self.world_size * 2, 8, 16)
comm_mode = CommDebugMode()
# tests c10d all_reduce tracing
with comm_mode:
dist.all_reduce(inp)
self.checksAssert(comm_mode, c10d_ops.allreduce_, 1, 1)
# tests c10d all_gather_into_tensor tracing
with comm_mode:
dist.all_gather_into_tensor(all_gather_out, inp)
self.checksAssert(comm_mode, c10d_ops._allgather_base_, 1, 1)
# tests c10d reduce_scatter tracing
with comm_mode:
dist.reduce_scatter_tensor(inp, all_gather_out)
self.checksAssert(comm_mode, c10d_ops._reduce_scatter_base_, 1, 1)
# tests c10d broadcast tracing
with comm_mode:
dist.broadcast(inp, 0)
self.checksAssert(comm_mode, c10d_ops.broadcast_, 1, 1)
# tests c10d gather tracing
with comm_mode:
dist.gather(inp, None, 0)
self.checksAssert(comm_mode, c10d_ops.gather_, 1, 1)
# tests c10d reduce tracing
with comm_mode:
dist.reduce(inp, 0)
self.checksAssert(comm_mode, c10d_ops.reduce_, 1, 1)
# tests c10d scatter tracing
with comm_mode:
dist.scatter(inp, None, 0)
self.checksAssert(comm_mode, c10d_ops.scatter_, 1, 1)
# tests c10d all_gather tracing
output_list = []
with comm_mode:
dist.all_gather(output_list, inp, None)
self.checksAssert(comm_mode, c10d_ops.allgather_, 1, 1)
# tests c10d allgather_coalesced_ tracing
output_list = []
with comm_mode:
dist.all_gather_coalesced(output_list, [inp], None)
self.checksAssert(comm_mode, c10d_ops.allgather_coalesced_, 1, 1)
# tests c10d allgather_into_tensor_coalesced_ tracing
with comm_mode, dist._coalescing_manager():
dist.all_gather_into_tensor(all_gather_out, inp)
self.checksAssert(comm_mode, c10d_ops.allgather_into_tensor_coalesced_, 1, 1)
# tests c10d allreduce_coalesced
with comm_mode:
dist.all_reduce_coalesced(inp)
self.checksAssert(comm_mode, c10d_ops.allreduce_coalesced_, 1, 1)
# tests c10d reduce_scatter_
with comm_mode:
dist.reduce_scatter(all_gather_out, [inp])
self.checksAssert(comm_mode, c10d_ops.reduce_scatter_, 1, 1)
# tests c10d reduce_scatter_tensor_coalesced
with comm_mode, dist._coalescing_manager():
dist.reduce_scatter_tensor(all_gather_out, inp)
self.checksAssert(comm_mode, c10d_ops.reduce_scatter_tensor_coalesced_, 1, 1)
# tests c10d alltoall_
with comm_mode:
dist.all_to_all([inp], [inp])
self.checksAssert(comm_mode, c10d_ops.alltoall_, 1, 1)
# tests c10d alltoall_base_
with comm_mode:
dist.all_to_all_single(inp, inp)
self.checksAssert(comm_mode, c10d_ops.alltoall_base_, 1, 1)
if __name__ == "__main__":
run_tests()
|
TestCommMode
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 67395,
"end": 67747
}
|
class ____(sgqlc.types.Enum):
"""The possible states of a pull request review comment.
Enumeration Choices:
* `PENDING`: A comment that is part of a pending review
* `SUBMITTED`: A comment that is part of a submitted review
"""
__schema__ = github_schema
__choices__ = ("PENDING", "SUBMITTED")
|
PullRequestReviewCommentState
|
python
|
airbytehq__airbyte
|
airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/steps/docker.py
|
{
"start": 463,
"end": 5110
}
|
class ____(Step):
def __init__(
self,
title: str,
context: PipelineContext,
paths_to_mount: Optional[List[MountPath]] = None,
internal_tools: Optional[List[MountPath]] = None,
secret_env_variables: Optional[Dict[str, Secret]] = None,
env_variables: dict[str, str] = {},
working_directory: str = "/",
command: Optional[List[str]] = None,
) -> None:
"""A simple step that runs a given command in a container.
Args:
title (str): name of the step
context (PipelineContext): context of the step
paths_to_mount (List[MountPath], optional): directory paths to mount. Defaults to [].
internal_tools (List[MountPath], optional): internal tools to install. Defaults to [].
secret_env_variables (List[Tuple[str, Secret]], optional): secrets to add to container as environment variables, a tuple of env var name > Secret object . Defaults to [].
env_variables (dict[str, str], optional): env variables to set in container. Defaults to {}.
working_directory (str, optional): working directory to run the command in. Defaults to "/".
command (Optional[List[str]], optional): The default command to run. Defaults to None.
"""
self._title = title
super().__init__(context)
self.paths_to_mount = paths_to_mount if paths_to_mount else []
self.working_directory = working_directory
self.internal_tools = internal_tools if internal_tools else []
self.secret_env_variables = secret_env_variables if secret_env_variables else {}
self.env_variables = env_variables
self.command = command
@property
def title(self) -> str:
return self._title
def _mount_paths(self, container: dagger.Container) -> dagger.Container:
for path_to_mount in self.paths_to_mount:
if path_to_mount.optional and not path_to_mount.get_path().exists():
continue
if path_to_mount.get_path().is_symlink():
container = self._mount_path(container, path_to_mount.get_path().readlink())
container = self._mount_path(container, path_to_mount.get_path())
return container
def _mount_path(self, container: dagger.Container, path: Path) -> dagger.Container:
path_string = str(path)
destination_path = f"/{path_string}"
if path.is_file():
file_to_load = self.context.get_repo_file(path_string)
container = container.with_mounted_file(destination_path, file_to_load)
else:
dir_to_load = self.context.get_repo_dir(path_string)
container = container.with_mounted_directory(destination_path, dir_to_load)
return container
async def _install_internal_tools(self, container: dagger.Container) -> dagger.Container:
for internal_tool in self.internal_tools:
container = await with_installed_pipx_package(self.context, container, str(internal_tool))
return container
def _set_workdir(self, container: dagger.Container) -> dagger.Container:
return container.with_workdir(self.working_directory)
def _set_env_variables(self, container: dagger.Container) -> dagger.Container:
for key, value in self.env_variables.items():
container = container.with_env_variable(key, value)
return container
def _set_secret_env_variables(self, container: dagger.Container) -> dagger.Container:
for env_var_name, secret in self.secret_env_variables.items():
container = container.with_secret_variable(env_var_name, secret.as_dagger_secret(self.context.dagger_client))
return container
async def init_container(self) -> dagger.Container:
# TODO (ben): Replace with python base container when available
container = with_python_base(self.context)
container = self._mount_paths(container)
container = self._set_env_variables(container)
container = self._set_secret_env_variables(container)
container = await self._install_internal_tools(container)
container = self._set_workdir(container)
return container
async def _run(self, command: Optional[List[str]] = None) -> StepResult:
command_to_run = command or self.command
if not command_to_run:
raise ValueError(f"No command given to the {self.title} step")
container_to_run = await self.init_container()
return await self.get_step_result(container_to_run.with_exec(command_to_run))
|
SimpleDockerStep
|
python
|
weaviate__weaviate-python-client
|
weaviate/exceptions.py
|
{
"start": 7510,
"end": 7764
}
|
class ____(WeaviateQueryError):
"""Is raised if a gRPC batch send request to Weaviate fails in any way."""
def __init__(self, message: str):
super().__init__(message, "GRPC batch send")
self.message = message
|
WeaviateBatchSendError
|
python
|
run-llama__llama_index
|
llama-index-integrations/vector_stores/llama-index-vector-stores-vectorx/tests/test_vector_stores_vectorx.py
|
{
"start": 5400,
"end": 7052
}
|
class ____(VectorXTestSetup):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.embed_model = HuggingFaceEmbedding(
"sentence-transformers/all-MiniLM-L6-v2", device="cpu"
)
cls.vector_store = VectorXVectorStore.from_params(
api_token=cls.vecx_api_token,
index_name=cls.test_index_name,
encryption_key=cls.encryption_key,
dimension=cls.dimension,
space_type=cls.space_type,
)
cls.storage_context = StorageContext.from_defaults(
vector_store=cls.vector_store
)
Settings.llm = None
cls.index = VectorStoreIndex.from_documents(
cls.test_documents,
storage_context=cls.storage_context,
embed_model=cls.embed_model,
)
def test_custom_retriever(self):
ai_filter = MetadataFilter(
key="category", value="ai", operator=FilterOperator.EQ
)
retriever = VectorIndexRetriever(
index=self.index,
similarity_top_k=3,
filters=MetadataFilters(filters=[ai_filter]),
)
nodes = retriever.retrieve("What is deep learning?")
self.assertGreater(len(nodes), 0)
def test_query_engine(self):
retriever = VectorIndexRetriever(index=self.index, similarity_top_k=3)
query_engine = RetrieverQueryEngine.from_args(retriever=retriever)
response = query_engine.query("Explain machine learning vs deep learning")
self.assertTrue(len(str(response)) > 0)
# ------------------ Query & Filter Tests ------------------
|
TestCustomRetrieval
|
python
|
huggingface__transformers
|
src/transformers/models/metaclip_2/modeling_metaclip_2.py
|
{
"start": 10603,
"end": 11786
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config: Union[MetaClip2VisionConfig, MetaClip2TextConfig]):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = MetaClip2Attention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = MetaClip2MLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
**kwargs: Unpack[TransformersKwargs],
) -> torch.FloatTensor:
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
**kwargs,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
|
MetaClip2EncoderLayer
|
python
|
getsentry__sentry
|
src/sentry/apidocs/utils.py
|
{
"start": 284,
"end": 1345
}
|
class ____:
"""
Basic class that simply stores a type that is parsed into Open API Schema.
Used by `utils.inline_sentry_response_serializer`
"""
def __init__(self, t: type) -> None:
self.typeSchema = t
def inline_sentry_response_serializer(name: str, t: type) -> type:
"""
Function for documenting an API response with python types.
You may use existing types, and likely serializer response types.
Be sure to pass the type, and not the serializer itself.
.. code-block::
@extend_schema(
response=inline_sentry_response_serializer('ListMemberResponse',List[SCIMAPIMemberSerializerResponse])
)
:param name: the name of the component, used in the OpenAPIJson
:param t: the type of the response
"""
if isinstance(t, Serializer):
raise TypeError(
"Please use the type of the `serialize` function instead of the serializer itself."
)
serializer_class = type(name, (_RawSchema,), {"typeSchema": t})
return serializer_class
|
_RawSchema
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/hooks/cloud_run.py
|
{
"start": 6094,
"end": 7758
}
|
class ____(GoogleBaseAsyncHook):
"""
Async hook for the Google Cloud Run service.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
sync_hook_class = CloudRunHook
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
self._client: JobsAsyncClient | None = None
super().__init__(gcp_conn_id=gcp_conn_id, impersonation_chain=impersonation_chain, **kwargs)
async def get_conn(self):
if self._client is None:
sync_hook = await self.get_sync_hook()
self._client = JobsAsyncClient(credentials=sync_hook.get_credentials(), client_info=CLIENT_INFO)
return self._client
async def get_operation(self, operation_name: str) -> operations_pb2.Operation:
conn = await self.get_conn()
return await conn.get_operation(operations_pb2.GetOperationRequest(name=operation_name), timeout=120)
|
CloudRunAsyncHook
|
python
|
run-llama__llama_index
|
llama-index-integrations/voice_agents/llama-index-voice-agents-gemini-live/llama_index/voice_agents/gemini_live/events.py
|
{
"start": 432,
"end": 520
}
|
class ____(BaseVoiceAgentEvent):
tool_name: str
tool_result: Any
|
ToolCallResultEvent
|
python
|
google__jax
|
jax/_src/linear_util.py
|
{
"start": 3359,
"end": 3427
}
|
class ____: pass
_EMPTY_STORE_VALUE = EmptyStoreValue()
|
EmptyStoreValue
|
python
|
FactoryBoy__factory_boy
|
tests/test_typing.py
|
{
"start": 167,
"end": 594
}
|
class ____(unittest.TestCase):
def test_simple_factory(self) -> None:
class UserFactory(factory.Factory[User]):
name = "John Doe"
email = "john.doe@example.org"
id = 42
class Meta:
model = User
result: User
result = UserFactory.build()
result = UserFactory.create()
self.assertEqual(result.name, "John Doe")
|
TypingTests
|
python
|
pytorch__pytorch
|
test/quantization/pt2e/test_metadata_porting.py
|
{
"start": 2268,
"end": 21478
}
|
class ____(QuantizationTestCase):
def _test_quant_tag_preservation_through_decomp(
self, model, example_inputs, from_node_to_tags
):
ep = torch.export.export(model, example_inputs, strict=True)
found_tags = True
not_found_nodes = ""
for from_node, tag in from_node_to_tags.items():
for n in ep.graph_module.graph.nodes:
from_node_meta = n.meta.get("from_node", None)
if from_node_meta is None:
continue
if not isinstance(from_node_meta, list):
raise ValueError(
f"from_node metadata is of type {type(from_node_meta)}, but expected list"
)
for meta in from_node_meta:
node_target = meta.target
if node_target == str(from_node):
node_tag = n.meta.get("quantization_tag", None)
if node_tag is None or tag != node_tag:
not_found_nodes += str(n.target) + ", "
found_tags = False
break
if not found_tags:
break
self.assertTrue(
found_tags,
f"Decomposition did not preserve quantization tag for {not_found_nodes}",
)
def _test_metadata_porting(
self,
model,
example_inputs,
quantizer,
node_tags=None,
) -> torch.fx.GraphModule:
m_eager = model.eval()
# program capture
m = copy.deepcopy(m_eager)
m = torch.export.export(m, example_inputs, strict=True).module()
m = prepare_pt2e(m, quantizer)
# Calibrate
m(*example_inputs)
m = convert_pt2e(m)
m(*example_inputs)
recorded_node_tags = {}
for n in m.graph.nodes:
if "quantization_tag" not in n.meta:
continue
if n.op == "call_function" and n.target in _QUANT_OPS:
key = n.target
elif n.op == "get_attr":
key = "get_attr"
else:
continue
if key not in recorded_node_tags:
recorded_node_tags[key] = set()
if (
n.op == "call_function"
and n.meta["quantization_tag"] in recorded_node_tags[key]
):
raise ValueError(
f"{key} {n.format_node()} has tag {n.meta['quantization_tag']} that "
"is associated with another node of the same type"
)
recorded_node_tags[key].add(n.meta["quantization_tag"])
self.assertEqual(set(recorded_node_tags.keys()), set(node_tags.keys()))
for k, v in recorded_node_tags.items():
self.assertEqual(v, node_tags[k])
return m
@skipIfCrossRef # mlazos: retracing FX graph with torch function mode doesn't propagate metadata, because the stack
# trace of the mode torch function impl doesn't match the traced graph stored lineno.
def test_simple_metadata_porting(self):
"""
Model under test
conv2d -> avgpool -> hardtanh -> linear
Check quantization tags on conv2d, avgpool and linear are correctly set
"""
class BackendAQuantizer(Quantizer):
def annotate(self, gm: torch.fx.GraphModule) -> torch.fx.GraphModule:
backend_string = "BackendA"
quantization_config = get_symmetric_quantization_config(
is_per_channel=True
)
annotated_partitions = OP_TO_ANNOTATOR["linear"](
gm, quantization_config
)
_tag_partitions(backend_string, "linear", annotated_partitions)
annotated_partitions = OP_TO_ANNOTATOR["conv"](gm, quantization_config)
_tag_partitions(backend_string, "conv2d", annotated_partitions)
annotated_partitions = OP_TO_ANNOTATOR["adaptive_avg_pool2d"](
gm, quantization_config
)
_tag_partitions(
backend_string, "adaptive_avg_pool2d", annotated_partitions
)
def validate(self, model: torch.fx.GraphModule) -> None:
pass
example_inputs = (torch.randn(1, 3, 5, 5),)
get_attr_tags = {
"BackendA_conv2d_0",
"BackendA_linear_0",
}
quantize_per_tensor_tags = {
"BackendA_conv2d_0",
"BackendA_adaptive_avg_pool2d_0",
"BackendA_linear_0",
}
dequantize_per_tensor_tags = {
"BackendA_adaptive_avg_pool2d_0",
"BackendA_conv2d_0",
"BackendA_linear_0",
}
dequantize_per_channel_tags = {"BackendA_conv2d_0", "BackendA_linear_0"}
node_tags = {
"get_attr": get_attr_tags,
torch.ops.quantized_decomposed.quantize_per_tensor.default: quantize_per_tensor_tags,
torch.ops.quantized_decomposed.dequantize_per_tensor.default: dequantize_per_tensor_tags,
torch.ops.quantized_decomposed.dequantize_per_channel.default: dequantize_per_channel_tags,
}
m = self._test_metadata_porting(
TestHelperModules.Conv2dWithObsSharingOps(),
example_inputs,
BackendAQuantizer(),
node_tags,
)
from_node_to_tags = {
torch.ops.aten.adaptive_avg_pool2d.default: "BackendA_adaptive_avg_pool2d_0",
torch.ops.aten.linear.default: "BackendA_linear_0",
}
self._test_quant_tag_preservation_through_decomp(
m, example_inputs, from_node_to_tags
)
def test_metadata_porting_with_no_quant_inbetween(self):
"""
Model under test
conv2d -> avgpool -> hardtanh -> linear
Dont quantize avgpool
Check quantization tags on conv2d and linear are correctly set
"""
class BackendAQuantizer(Quantizer):
def annotate(self, gm: torch.fx.GraphModule) -> torch.fx.GraphModule:
backend_string = "BackendA"
quantization_config = get_symmetric_quantization_config(
is_per_channel=True
)
annotated_partitions = OP_TO_ANNOTATOR["linear"](
gm, quantization_config
)
_tag_partitions(backend_string, "linear", annotated_partitions)
annotated_partitions = OP_TO_ANNOTATOR["conv"](gm, quantization_config)
_tag_partitions(backend_string, "conv2d", annotated_partitions)
def validate(self, model: torch.fx.GraphModule) -> None:
pass
example_inputs = (torch.randn(1, 3, 5, 5),)
get_attr_tags = {"BackendA_conv2d_0", "BackendA_linear_0"}
quantize_per_tensor_tags = {"BackendA_conv2d_0", "BackendA_linear_0"}
dequantize_per_tensor_tags = {"BackendA_conv2d_0", "BackendA_linear_0"}
dequantize_per_channel_tags = {"BackendA_conv2d_0", "BackendA_linear_0"}
node_tags = {
"get_attr": get_attr_tags,
torch.ops.quantized_decomposed.quantize_per_tensor.default: quantize_per_tensor_tags,
torch.ops.quantized_decomposed.dequantize_per_tensor.default: dequantize_per_tensor_tags,
torch.ops.quantized_decomposed.dequantize_per_channel.default: dequantize_per_channel_tags,
}
self._test_metadata_porting(
TestHelperModules.Conv2dWithObsSharingOps(),
example_inputs,
BackendAQuantizer(),
node_tags,
)
@unittest.skip("Temporarily disabled")
def test_metadata_porting_for_dq(self):
"""
Model under test
conv2d -> avgpool -> hardtanh -> linear
Quantize all except linear.
Quantize linear with dynamic quantization
Check quantization tags on conv2d, avgpool and linear are correctly set
"""
class BackendAQuantizer(Quantizer):
def annotate(self, gm: torch.fx.GraphModule) -> torch.fx.GraphModule:
backend_string = "BackendA"
# static quantiazation
quantization_config = get_symmetric_quantization_config(
is_per_channel=True
)
annotated_partitions = OP_TO_ANNOTATOR["conv"](gm, quantization_config)
_tag_partitions(backend_string, "conv2d", annotated_partitions)
annotated_partitions = OP_TO_ANNOTATOR["adaptive_avg_pool2d"](
gm, quantization_config
)
_tag_partitions(
backend_string, "adaptive_avg_pool2d", annotated_partitions
)
# dynamic quantization
quantization_config_dynamic = get_symmetric_quantization_config(
is_per_channel=True, is_dynamic=True
)
annotated_partitions = OP_TO_ANNOTATOR["linear"](
gm, quantization_config_dynamic
)
_tag_partitions(backend_string, "linear_dynamic", annotated_partitions)
def validate(self, model: torch.fx.GraphModule) -> None:
pass
example_inputs = (torch.randn(1, 3, 5, 5),)
# TODO: add get_attr_tags when the test is re-enabled
get_attr_tags = {}
quantize_per_tensor_tags = {
"BackendA_conv2d_0",
"BackendA_adaptive_avg_pool2d_0",
}
quantize_per_tensor_tensor_tags = {"BackendA_linear_dynamic_0"}
choose_qparams_tensor_tensor_tags = {"BackendA_linear_dynamic_0"}
dequantize_per_tensor_tags = {
"BackendA_adaptive_avg_pool2d_0",
"BackendA_conv2d_0",
}
dequantize_per_tensor_tensor_tags = {"BackendA_linear_dynamic_0"}
dequantize_per_channel_tags = {
"BackendA_conv2d_0",
"BackendA_linear_dynamic_0",
}
node_tags = {
"get_attr": get_attr_tags,
torch.ops.quantized_decomposed.quantize_per_tensor.default: quantize_per_tensor_tags,
torch.ops.quantized_decomposed.quantize_per_tensor.tensor: quantize_per_tensor_tensor_tags,
torch.ops.quantized_decomposed.dequantize_per_tensor.default: dequantize_per_tensor_tags,
torch.ops.quantized_decomposed.dequantize_per_tensor.tensor: dequantize_per_tensor_tensor_tags,
torch.ops.quantized_decomposed.dequantize_per_channel.default: dequantize_per_channel_tags,
torch.ops.quantized_decomposed.choose_qparams.tensor: choose_qparams_tensor_tensor_tags,
}
self._test_metadata_porting(
TestHelperModules.Conv2dWithObsSharingOps(),
example_inputs,
BackendAQuantizer(),
node_tags,
)
def test_metadata_porting_for_two_dq(self):
"""
Model under test
conv2d -> avgpool -> hardtanh -> linear
Quantize linear and conv with dynamic quantization
Check quantization tags on conv2d, avgpool and linear are correctly set
"""
class BackendAQuantizer(Quantizer):
def annotate(self, gm: torch.fx.GraphModule) -> torch.fx.GraphModule:
backend_string = "BackendA"
# dynamic quantization
quantization_config_dynamic = get_symmetric_quantization_config(
is_per_channel=True, is_dynamic=True
)
annotated_partitions = OP_TO_ANNOTATOR["conv"](
gm, quantization_config_dynamic
)
_tag_partitions(backend_string, "conv2d_dynamic", annotated_partitions)
annotated_partitions = OP_TO_ANNOTATOR["linear"](
gm, quantization_config_dynamic
)
_tag_partitions(backend_string, "linear_dynamic", annotated_partitions)
def validate(self, model: torch.fx.GraphModule) -> None:
pass
example_inputs = (torch.randn(1, 3, 5, 5),)
get_attr_tags = {
"BackendA_conv2d_dynamic_0",
"BackendA_linear_dynamic_0",
}
choose_qparams_tensor_tags = {
"BackendA_conv2d_dynamic_0",
"BackendA_linear_dynamic_0",
}
quantize_per_tensor_tensor_tags = {
"BackendA_conv2d_dynamic_0",
"BackendA_linear_dynamic_0",
}
dequantize_per_tensor_tensor_tags = {
"BackendA_conv2d_dynamic_0",
"BackendA_linear_dynamic_0",
}
dequantize_per_channel_tags = {
"BackendA_conv2d_dynamic_0",
"BackendA_linear_dynamic_0",
}
node_tags = {
"get_attr": get_attr_tags,
torch.ops.quantized_decomposed.quantize_per_tensor.tensor: quantize_per_tensor_tensor_tags,
torch.ops.quantized_decomposed.dequantize_per_tensor.tensor: dequantize_per_tensor_tensor_tags,
torch.ops.quantized_decomposed.dequantize_per_channel.default: dequantize_per_channel_tags,
torch.ops.quantized_decomposed.choose_qparams.tensor: choose_qparams_tensor_tags,
}
self._test_metadata_porting(
TestHelperModules.Conv2dWithObsSharingOps(),
example_inputs,
BackendAQuantizer(),
node_tags,
)
def test_metadata_porting_for_dq_no_static_q(self):
"""
Model under test
conv2d -> avgpool -> hardtanh -> linear
Dont quantize anything except linear.
Quantize linear with dynamic quantization
Check quantization tags on conv2d, avgpool and linear are correctly set
"""
class BackendAQuantizer(Quantizer):
def annotate(self, gm: torch.fx.GraphModule) -> torch.fx.GraphModule:
backend_string = "BackendA"
# dynamic quantization
quantization_config_dynamic = get_symmetric_quantization_config(
is_per_channel=True, is_dynamic=True
)
annotated_partitions = OP_TO_ANNOTATOR["linear"](
gm, quantization_config_dynamic
)
_tag_partitions(backend_string, "linear_dynamic", annotated_partitions)
def validate(self, model: torch.fx.GraphModule) -> None:
pass
example_inputs = (torch.randn(1, 3, 5, 5),)
get_attr_tags = {"BackendA_linear_dynamic_0"}
choose_qparams_tensor_tags = {"BackendA_linear_dynamic_0"}
quantize_per_tensor_tensor_tags = {"BackendA_linear_dynamic_0"}
dequantize_per_tensor_tensor_tags = {"BackendA_linear_dynamic_0"}
dequantize_per_channel_tags = {"BackendA_linear_dynamic_0"}
node_tags = {
"get_attr": get_attr_tags,
torch.ops.quantized_decomposed.quantize_per_tensor.tensor: quantize_per_tensor_tensor_tags,
torch.ops.quantized_decomposed.dequantize_per_tensor.tensor: dequantize_per_tensor_tensor_tags,
torch.ops.quantized_decomposed.dequantize_per_channel.default: dequantize_per_channel_tags,
torch.ops.quantized_decomposed.choose_qparams.tensor: choose_qparams_tensor_tags,
}
self._test_metadata_porting(
TestHelperModules.Conv2dWithObsSharingOps(),
example_inputs,
BackendAQuantizer(),
node_tags,
)
def test_no_metadata_porting(self):
class BackendAQuantizer(Quantizer):
def annotate(self, gm: torch.fx.GraphModule) -> torch.fx.GraphModule:
quantization_config = get_symmetric_quantization_config(
is_per_channel=True
)
OP_TO_ANNOTATOR["linear"](gm, quantization_config)
OP_TO_ANNOTATOR["conv"](gm, quantization_config)
OP_TO_ANNOTATOR["adaptive_avg_pool2d"](gm, quantization_config)
def validate(self, model: torch.fx.GraphModule) -> None:
pass
example_inputs = (torch.randn(1, 3, 5, 5),)
node_tags = {}
m = self._test_metadata_porting(
TestHelperModules.Conv2dWithObsSharingOps(),
example_inputs,
BackendAQuantizer(),
node_tags,
)
from_node_to_tags = {}
self._test_quant_tag_preservation_through_decomp(
m, example_inputs, from_node_to_tags
)
def test_no_metadata_porting_through_unknown_ops(self):
"""
Model under test
matmul -> add -> relu
matmul has get_attr as first input, but the quantization_tag should not be
propagated to add even if it's part of a chain that ends at get_attr
"""
class MatmulWithConstInput(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.register_parameter("w", torch.nn.Parameter(torch.rand(8, 16)))
def forward(self, x, y):
x = torch.matmul(self.w, x)
z = x + y
return torch.nn.functional.relu(z)
class BackendAQuantizer(Quantizer):
def annotate(self, gm: torch.fx.GraphModule) -> torch.fx.GraphModule:
qconfig = get_symmetric_quantization_config()
for n in gm.graph.nodes:
if n.op != "call_function":
continue
n.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map={n.args[0]: qconfig.input_activation},
output_qspec=qconfig.output_activation,
)
tag = str(n.target)
n.meta["quantization_tag"] = tag
for arg in n.args:
if arg.op == "get_attr":
arg.meta["quantization_tag"] = tag
def validate(self, model: torch.fx.GraphModule) -> None:
pass
example_inputs = (torch.randn(16, 24), torch.randn(8, 24))
get_attr_tags = {"aten.matmul.default"}
quantize_per_tensor_tensor_tags = {
"aten.matmul.default",
"aten.add.Tensor",
"aten.relu.default",
}
dequantize_per_tensor_tensor_tags = {
"aten.matmul.default",
"aten.add.Tensor",
"aten.relu.default",
}
node_tags = {
"get_attr": get_attr_tags,
torch.ops.quantized_decomposed.quantize_per_tensor.default: quantize_per_tensor_tensor_tags,
torch.ops.quantized_decomposed.dequantize_per_tensor.default: dequantize_per_tensor_tensor_tags,
}
self._test_metadata_porting(
MatmulWithConstInput(),
example_inputs,
BackendAQuantizer(),
node_tags,
)
if __name__ == "__main__":
raise_on_run_directly("test/test_quantization.py")
|
TestMetaDataPorting
|
python
|
viewflow__viewflow
|
viewflow/templatetags/viewflow.py
|
{
"start": 3855,
"end": 4280
}
|
class ____(BaseViewsetURLNode):
"""
Reverse a url to a view within viewset
Example::
{% current_viewset_reverse viewset viewname args kwargs %}
"""
def _reverse_url(self, viewset, view_name, args, kwargs, current_app, context):
return current_viewset_reverse(
context.request, viewset, view_name, args=args, kwargs=kwargs
)
@register.tag("render")
|
CurrentViewsetURLNode
|
python
|
PrefectHQ__prefect
|
tests/server/orchestration/api/test_workers.py
|
{
"start": 60158,
"end": 67546
}
|
class ____:
async def test_heartbeat_worker(self, client, work_pool):
workers_response = await client.post(
f"/work_pools/{work_pool.name}/workers/filter"
)
assert workers_response.status_code == status.HTTP_200_OK
assert len(workers_response.json()) == 0
dt = datetime.now(timezone.utc)
response = await client.post(
f"/work_pools/{work_pool.name}/workers/heartbeat",
json=dict(name="test-worker"),
)
assert response.status_code == status.HTTP_204_NO_CONTENT, response.text
workers_response = await client.post(
f"/work_pools/{work_pool.name}/workers/filter"
)
assert workers_response.status_code == status.HTTP_200_OK
assert len(workers_response.json()) == 1
assert workers_response.json()[0]["name"] == "test-worker"
assert (
datetime.fromisoformat(
workers_response.json()[0]["last_heartbeat_time"].replace("Z", "+00:00")
)
> dt
)
assert workers_response.json()[0]["status"] == "ONLINE"
assert_status_events(work_pool.name, ["prefect.work-pool.ready"])
async def test_worker_heartbeat_updates_work_pool_status(self, client, work_pool):
# Verify that the work pool is not ready
work_pool_response = await client.get(f"/work_pools/{work_pool.name}")
assert work_pool_response.status_code == status.HTTP_200_OK
assert (
work_pool_response.json()["status"]
== schemas.statuses.WorkPoolStatus.NOT_READY.value
)
# Heartbeat a worker
heartbeat_response = await client.post(
f"/work_pools/{work_pool.name}/workers/heartbeat",
json=dict(name="test-worker"),
)
assert heartbeat_response.status_code == status.HTTP_204_NO_CONTENT
# Verify that the work pool is ready
work_pool_response = await client.get(f"/work_pools/{work_pool.name}")
assert work_pool_response.status_code == status.HTTP_200_OK
assert (
work_pool_response.json()["status"]
== schemas.statuses.WorkPoolStatus.READY.value
)
assert_status_events(work_pool.name, ["prefect.work-pool.ready"])
async def test_worker_heartbeat_does_not_updates_work_pool_status_if_paused(
self, client, work_pool
):
# Pause the work pool
await client.patch(
f"/work_pools/{work_pool.name}",
json=schemas.actions.WorkPoolUpdate(is_paused=True).model_dump(
mode="json", exclude_unset=True
),
)
# Verify that the work pool is paused
work_pool_response = await client.get(f"/work_pools/{work_pool.name}")
assert work_pool_response.status_code == status.HTTP_200_OK
assert (
work_pool_response.json()["status"]
== schemas.statuses.WorkPoolStatus.PAUSED.value
)
# Heartbeat a worker
heartbeat_response = await client.post(
f"/work_pools/{work_pool.name}/workers/heartbeat",
json=dict(name="test-worker"),
)
assert heartbeat_response.status_code == status.HTTP_204_NO_CONTENT
# Verify that the work pool is still paused
work_pool_response = await client.get(f"/work_pools/{work_pool.name}")
assert work_pool_response.status_code == status.HTTP_200_OK
assert (
work_pool_response.json()["status"]
== schemas.statuses.WorkPoolStatus.PAUSED.value
)
assert_status_events(work_pool.name, ["prefect.work-pool.paused"])
async def test_heartbeat_worker_requires_name(self, client, work_pool):
response = await client.post(f"/work_pools/{work_pool.name}/workers/heartbeat")
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY, (
response.text
)
assert b'"missing","loc":["body","name"]' in response.content
async def test_heartbeat_worker_upserts_for_same_name(self, client, work_pool):
for name in ["test-worker", "test-worker", "test-worker", "another-worker"]:
await client.post(
f"/work_pools/{work_pool.name}/workers/heartbeat",
json=dict(name=name),
)
workers_response = await client.post(
f"/work_pools/{work_pool.name}/workers/filter"
)
assert workers_response.status_code == status.HTTP_200_OK
assert len(workers_response.json()) == 2
async def test_heartbeat_worker_limit(self, client, work_pool):
for name in ["test-worker", "test-worker", "test-worker", "another-worker"]:
await client.post(
f"/work_pools/{work_pool.name}/workers/heartbeat",
json=dict(name=name),
)
workers_response = await client.post(
f"/work_pools/{work_pool.name}/workers/filter",
json=dict(limit=1),
)
assert workers_response.status_code == status.HTTP_200_OK
assert len(workers_response.json()) == 1
assert workers_response.json()[0]["name"] == "another-worker"
async def test_heartbeat_accepts_heartbeat_interval(self, client, work_pool):
await client.post(
f"/work_pools/{work_pool.name}/workers/heartbeat",
json=dict(name="test-worker", heartbeat_interval_seconds=60),
)
workers_response = await client.post(
f"/work_pools/{work_pool.name}/workers/filter",
)
assert len(workers_response.json()) == 1
assert workers_response.json()[0]["heartbeat_interval_seconds"] == 60
async def test_worker_with_old_heartbeat_has_offline_status(
self, client, work_pool, session, db
):
now = datetime.now(timezone.utc)
insert_stmt = db.queries.insert(db.Worker).values(
name="old-worker",
work_pool_id=work_pool.id,
last_heartbeat_time=now - timedelta(minutes=5),
)
await session.execute(insert_stmt)
await session.commit()
workers_response = await client.post(
f"/work_pools/{work_pool.name}/workers/filter",
)
assert len(workers_response.json()) == 1
assert workers_response.json()[0]["status"] == "OFFLINE"
async def test_worker_status_accounts_for_heartbeat_interval(
self, client, work_pool, session, db
):
"""
Worker status should use the heartbeat interval to determine if a worker is
offline.
This test sets an abnormally small heartbeat interval and then checks that the
worker is still considered offline than it would by default.
"""
now = datetime.now(timezone.utc)
insert_stmt = db.queries.insert(db.Worker).values(
name="old-worker",
work_pool_id=work_pool.id,
last_heartbeat_time=now - timedelta(seconds=10),
heartbeat_interval_seconds=1,
)
await session.execute(insert_stmt)
await session.commit()
workers_response = await client.post(
f"/work_pools/{work_pool.name}/workers/filter",
)
assert len(workers_response.json()) == 1
assert workers_response.json()[0]["status"] == "OFFLINE"
|
TestWorkerProcess
|
python
|
pypa__pipenv
|
pipenv/patched/pip/_internal/metadata/importlib/_compat.py
|
{
"start": 451,
"end": 2811
}
|
class ____(Protocol):
"""A protocol that various path objects conform.
This exists because importlib.metadata uses both ``pathlib.Path`` and
``zipfile.Path``, and we need a common base for type hints (Union does not
work well since ``zipfile.Path`` is too new for our linter setup).
This does not mean to be exhaustive, but only contains things that present
in both classes *that we need*.
"""
@property
def name(self) -> str:
raise NotImplementedError()
@property
def parent(self) -> "BasePath":
raise NotImplementedError()
def get_info_location(d: importlib.metadata.Distribution) -> Optional[BasePath]:
"""Find the path to the distribution's metadata directory.
HACK: This relies on importlib.metadata's private ``_path`` attribute. Not
all distributions exist on disk, so importlib.metadata is correct to not
expose the attribute as public. But pip's code base is old and not as clean,
so we do this to avoid having to rewrite too many things. Hopefully we can
eliminate this some day.
"""
return getattr(d, "_path", None)
def parse_name_and_version_from_info_directory(
dist: importlib.metadata.Distribution,
) -> Tuple[Optional[str], Optional[str]]:
"""Get a name and version from the metadata directory name.
This is much faster than reading distribution metadata.
"""
info_location = get_info_location(dist)
if info_location is None:
return None, None
stem, suffix = os.path.splitext(info_location.name)
if suffix == ".dist-info":
name, sep, version = stem.partition("-")
if sep:
return name, version
if suffix == ".egg-info":
name = stem.split("-", 1)[0]
return name, None
return None, None
def get_dist_canonical_name(dist: importlib.metadata.Distribution) -> NormalizedName:
"""Get the distribution's normalized name.
The ``name`` attribute is only available in Python 3.10 or later. We are
targeting exactly that, but Mypy does not know this.
"""
if name := parse_name_and_version_from_info_directory(dist)[0]:
return canonicalize_name(name)
name = cast(Any, dist).name
if not isinstance(name, str):
raise BadMetadata(dist, reason="invalid metadata entry 'name'")
return canonicalize_name(name)
|
BasePath
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-tableau/dagster_tableau/translator.py
|
{
"start": 6835,
"end": 7036
}
|
class ____(NamespacedTagSet):
asset_type: Optional[Literal["dashboard", "data_source", "sheet"]] = None
@classmethod
def namespace(cls) -> str:
return "dagster-tableau"
|
TableauTagSet
|
python
|
tensorflow__tensorflow
|
tensorflow/python/tpu/embedding_context_utils.py
|
{
"start": 965,
"end": 1542
}
|
class ____:
"""Disables embedding pipelining for all ops created in the scope."""
def __init__(self):
self._original_embedding_pipelining_state_enabled = (
embedding_pipelining_state.enabled
)
def __enter__(self):
embedding_pipelining_state.enabled = False
logging.info("Entering SequentialEmbeddingContext.")
def __exit__(self, exc_type, exc_val, exc_tb):
embedding_pipelining_state.enabled = (
self._original_embedding_pipelining_state_enabled
)
logging.info("Exiting SequentialEmbeddingContext.")
|
SequentialEmbeddingContext
|
python
|
pytorch__pytorch
|
test/dynamo/test_guard_serialization.py
|
{
"start": 60265,
"end": 61654
}
|
class ____(torch.nn.Module):
def __init__(self, c):
super().__init__()
self.c = c
self.p = torch.nn.Parameter(torch.randn(3, 2))
def forward(self, x):
z = x + 1
for p in self.parameters():
z += p
return z
if not IS_MACOS:
from torch.testing._internal.common_fsdp import FSDPTestMultiThread
@torch._dynamo.config.patch({"strict_precompile": True})
class TestGuardSerializationFSDP(TestGuardSerializationBase, FSDPTestMultiThread):
def setUp(self):
TestGuardSerializationBase.setUp(self)
FSDPTestMultiThread.setUp(self)
def test_guard_serialization_fsdp_module(self):
from torch.distributed._tensor import distribute_tensor, Replicate
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.fsdp import fully_shard
mesh = init_device_mesh(str(torch.get_default_device()), (1,))
m = SimpleModule(42)
m = fully_shard(m, mesh=mesh)
inputs = distribute_tensor(torch.randn(3, 2), mesh, [Replicate()])
ref, loaded = self._test_serialization("TENSOR_MATCH", m, inputs)
self._test_check_fn(ref, loaded, {"self": m, "x": inputs}, True)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
|
SimpleModule
|
python
|
walkccc__LeetCode
|
solutions/369. Plus One Linked List/369.py
|
{
"start": 0,
"end": 362
}
|
class ____:
def plusOne(self, head: ListNode) -> ListNode:
if not head:
return ListNode(1)
if self._addOne(head) == 1:
return ListNode(1, head)
return head
def _addOne(self, node: ListNode) -> int:
carry = self._addOne(node.next) if node.next else 1
summ = node.val + carry
node.val = summ % 10
return summ // 10
|
Solution
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-good-days-to-rob-the-bank.py
|
{
"start": 29,
"end": 671
}
|
class ____(object):
def goodDaysToRobBank(self, security, time):
"""
:type security: List[int]
:type time: int
:rtype: List[int]
"""
right = [0]
for i in reversed(xrange(1, len(security))):
right.append(right[-1]+1 if security[i] >= security[i-1] else 0)
right.reverse()
result = []
left = 0
for i in xrange(len(security)):
if left >= time and right[i] >= time:
result.append(i)
if i+1 < len(security):
left = left+1 if security[i] >= security[i+1] else 0
return result
|
Solution
|
python
|
pypa__pipenv
|
pipenv/patched/pip/_internal/index/package_finder.py
|
{
"start": 21762,
"end": 39511
}
|
class ____:
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links.
"""
def __init__(
self,
link_collector: LinkCollector,
target_python: TargetPython,
allow_yanked: bool,
format_control: Optional[FormatControl] = None,
candidate_prefs: Optional[CandidatePreferences] = None,
ignore_requires_python: Optional[bool] = None,
ignore_compatibility: Optional[bool] = False,
) -> None:
"""
This constructor is primarily meant to be used by the create() class
method and from tests.
:param format_control: A FormatControl object, used to control
the selection of source packages / binary packages when consulting
the index and links.
:param candidate_prefs: Options to use when creating a
CandidateEvaluator object.
"""
if candidate_prefs is None:
candidate_prefs = CandidatePreferences()
format_control = format_control or FormatControl(set(), set())
self._allow_yanked = allow_yanked
self._candidate_prefs = candidate_prefs
self._ignore_requires_python = ignore_requires_python
self._link_collector = link_collector
self._target_python = target_python
self._ignore_compatibility = ignore_compatibility
self.format_control = format_control
# These are boring links that have already been logged somehow.
self._logged_links: Set[Tuple[Link, LinkType, str]] = set()
# Cache of the result of finding candidates
self._all_candidates: Dict[str, List[InstallationCandidate]] = {}
self._best_candidates: Dict[
Tuple[str, Optional[specifiers.BaseSpecifier], Optional[Hashes]],
BestCandidateResult,
] = {}
# Don't include an allow_yanked default value to make sure each call
# site considers whether yanked releases are allowed. This also causes
# that decision to be made explicit in the calling code, which helps
# people when reading the code.
@classmethod
def create(
cls,
link_collector: LinkCollector,
selection_prefs: SelectionPreferences,
target_python: Optional[TargetPython] = None,
) -> "PackageFinder":
"""Create a PackageFinder.
:param selection_prefs: The candidate selection preferences, as a
SelectionPreferences object.
:param target_python: The target Python interpreter to use when
checking compatibility. If None (the default), a TargetPython
object will be constructed from the running Python.
"""
if target_python is None:
target_python = TargetPython()
candidate_prefs = CandidatePreferences(
prefer_binary=selection_prefs.prefer_binary,
allow_all_prereleases=selection_prefs.allow_all_prereleases,
)
return cls(
candidate_prefs=candidate_prefs,
link_collector=link_collector,
target_python=target_python,
allow_yanked=selection_prefs.allow_yanked,
format_control=selection_prefs.format_control,
ignore_requires_python=selection_prefs.ignore_requires_python,
)
@property
def target_python(self) -> TargetPython:
return self._target_python
@property
def search_scope(self) -> SearchScope:
return self._link_collector.search_scope
@search_scope.setter
def search_scope(self, search_scope: SearchScope) -> None:
self._link_collector.search_scope = search_scope
@property
def find_links(self) -> List[str]:
return self._link_collector.find_links
@property
def index_urls(self) -> List[str]:
return self.search_scope.index_urls
@property
def proxy(self) -> Optional[str]:
return self._link_collector.session.pip_proxy
@property
def trusted_hosts(self) -> Iterable[str]:
for host_port in self._link_collector.session.pip_trusted_origins:
yield build_netloc(*host_port)
@property
def custom_cert(self) -> Optional[str]:
# session.verify is either a boolean (use default bundle/no SSL
# verification) or a string path to a custom CA bundle to use. We only
# care about the latter.
verify = self._link_collector.session.verify
return verify if isinstance(verify, str) else None
@property
def client_cert(self) -> Optional[str]:
cert = self._link_collector.session.cert
assert not isinstance(cert, tuple), "pip only supports PEM client certs"
return cert
@property
def allow_all_prereleases(self) -> bool:
return self._candidate_prefs.allow_all_prereleases
def set_allow_all_prereleases(self) -> None:
self._candidate_prefs.allow_all_prereleases = True
@property
def prefer_binary(self) -> bool:
return self._candidate_prefs.prefer_binary
def set_prefer_binary(self) -> None:
self._candidate_prefs.prefer_binary = True
def requires_python_skipped_reasons(self) -> List[str]:
reasons = {
detail
for _, result, detail in self._logged_links
if result == LinkType.requires_python_mismatch
}
return sorted(reasons)
def make_link_evaluator(self, project_name: str) -> LinkEvaluator:
canonical_name = canonicalize_name(project_name)
formats = self.format_control.get_allowed_formats(canonical_name)
return LinkEvaluator(
project_name=project_name,
canonical_name=canonical_name,
formats=formats,
target_python=self._target_python,
allow_yanked=self._allow_yanked,
ignore_requires_python=self._ignore_requires_python,
ignore_compatibility=self._ignore_compatibility,
)
def _sort_links(self, links: Iterable[Link]) -> List[Link]:
"""
Returns elements of links in order, non-egg links first, egg links
second, while eliminating duplicates
"""
eggs, no_eggs = [], []
seen: Set[Link] = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _log_skipped_link(self, link: Link, result: LinkType, detail: str) -> None:
entry = (link, result, detail)
if entry not in self._logged_links:
# Put the link at the end so the reason is more visible and because
# the link string is usually very long.
logger.debug("Skipping link: %s: %s", detail, link)
self._logged_links.add(entry)
def get_install_candidate(
self, link_evaluator: LinkEvaluator, link: Link
) -> Optional[InstallationCandidate]:
"""
If the link is a candidate for install, convert it to an
InstallationCandidate and return it. Otherwise, return None.
"""
result, detail = link_evaluator.evaluate_link(link)
if result != LinkType.candidate:
self._log_skipped_link(link, result, detail)
return None
try:
return InstallationCandidate(
name=link_evaluator.project_name,
link=link,
version=detail,
)
except InvalidVersion:
return None
def evaluate_links(
self, link_evaluator: LinkEvaluator, links: Iterable[Link]
) -> List[InstallationCandidate]:
"""
Convert links that are candidates to InstallationCandidate objects.
"""
candidates = []
for link in self._sort_links(links):
candidate = self.get_install_candidate(link_evaluator, link)
if candidate is not None:
candidates.append(candidate)
return candidates
def process_project_url(
self, project_url: Link, link_evaluator: LinkEvaluator
) -> List[InstallationCandidate]:
logger.debug(
"Fetching project page and analyzing links: %s",
project_url,
)
index_response = self._link_collector.fetch_response(project_url)
if index_response is None:
return []
page_links = list(parse_links(index_response))
with indent_log():
package_links = self.evaluate_links(
link_evaluator,
links=page_links,
)
return package_links
def find_all_candidates(self, project_name: str) -> List[InstallationCandidate]:
"""Find all available InstallationCandidate for project_name
This checks index_urls and find_links.
All versions found are returned as an InstallationCandidate list.
See LinkEvaluator.evaluate_link() for details on which files
are accepted.
"""
if project_name in self._all_candidates:
return self._all_candidates[project_name]
link_evaluator = self.make_link_evaluator(project_name)
collected_sources = self._link_collector.collect_sources(
project_name=project_name,
candidates_from_page=functools.partial(
self.process_project_url,
link_evaluator=link_evaluator,
),
)
page_candidates_it = itertools.chain.from_iterable(
source.page_candidates()
for sources in collected_sources
for source in sources
if source is not None
)
page_candidates = list(page_candidates_it)
file_links_it = itertools.chain.from_iterable(
source.file_links()
for sources in collected_sources
for source in sources
if source is not None
)
file_candidates = self.evaluate_links(
link_evaluator,
sorted(file_links_it, reverse=True),
)
if logger.isEnabledFor(logging.DEBUG) and file_candidates:
paths = []
for candidate in file_candidates:
assert candidate.link.url # we need to have a URL
try:
paths.append(candidate.link.file_path)
except Exception:
paths.append(candidate.link.url) # it's not a local file
logger.debug("Local files found: %s", ", ".join(paths))
# This is an intentional priority ordering
self._all_candidates[project_name] = file_candidates + page_candidates
return self._all_candidates[project_name]
def make_candidate_evaluator(
self,
project_name: str,
specifier: Optional[specifiers.BaseSpecifier] = None,
hashes: Optional[Hashes] = None,
) -> CandidateEvaluator:
"""Create a CandidateEvaluator object to use."""
candidate_prefs = self._candidate_prefs
return CandidateEvaluator.create(
project_name=project_name,
target_python=self._target_python,
prefer_binary=candidate_prefs.prefer_binary,
allow_all_prereleases=candidate_prefs.allow_all_prereleases,
specifier=specifier,
hashes=hashes,
)
def find_best_candidate(
self,
project_name: str,
specifier: Optional[specifiers.BaseSpecifier] = None,
hashes: Optional[Hashes] = None,
) -> BestCandidateResult:
"""Find matches for the given project and specifier.
:param specifier: An optional object implementing `filter`
(e.g. `packaging.specifiers.SpecifierSet`) to filter applicable
versions.
:return: A `BestCandidateResult` instance.
"""
if (project_name, specifier, hashes) in self._best_candidates:
return self._best_candidates[project_name, specifier, hashes]
candidates = self.find_all_candidates(project_name)
candidate_evaluator = self.make_candidate_evaluator(
project_name=project_name,
specifier=specifier,
hashes=hashes,
)
self._best_candidates[project_name, specifier, hashes] = (
candidate_evaluator.compute_best_candidate(candidates)
)
return self._best_candidates[project_name, specifier, hashes]
def find_requirement(
self, req: InstallRequirement, upgrade: bool
) -> Optional[InstallationCandidate]:
"""Try to find a Link matching req
Expects req, an InstallRequirement and upgrade, a boolean
Returns a InstallationCandidate if found,
Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise
"""
name = req.name
assert name is not None, "find_requirement() called with no name"
hashes = req.hashes(trust_internet=False)
best_candidate_result = self.find_best_candidate(
name,
specifier=req.specifier,
hashes=hashes,
)
best_candidate = best_candidate_result.best_candidate
installed_version: Optional[_BaseVersion] = None
if req.satisfied_by is not None:
installed_version = req.satisfied_by.version
def _format_versions(cand_iter: Iterable[InstallationCandidate]) -> str:
# This repeated parse_version and str() conversion is needed to
# handle different vendoring sources from pip and pkg_resources.
# If we stop using the pkg_resources provided specifier and start
# using our own, we can drop the cast to str().
return (
", ".join(
sorted(
{str(c.version) for c in cand_iter},
key=parse_version,
)
)
or "none"
)
if installed_version is None and best_candidate is None:
logger.critical(
"Could not find a version that satisfies the requirement %s "
"(from versions: %s)",
req,
_format_versions(best_candidate_result.all_candidates),
)
raise DistributionNotFound(f"No matching distribution found for {req}")
def _should_install_candidate(
candidate: Optional[InstallationCandidate],
) -> "TypeGuard[InstallationCandidate]":
if installed_version is None:
return True
if best_candidate is None:
return False
return best_candidate.version > installed_version
if not upgrade and installed_version is not None:
if _should_install_candidate(best_candidate):
logger.debug(
"Existing installed version (%s) satisfies requirement "
"(most up-to-date version is %s)",
installed_version,
best_candidate.version,
)
else:
logger.debug(
"Existing installed version (%s) is most up-to-date and "
"satisfies requirement",
installed_version,
)
return None
if _should_install_candidate(best_candidate):
logger.debug(
"Using version %s (newest of versions: %s)",
best_candidate.version,
_format_versions(best_candidate_result.applicable_candidates),
)
return best_candidate
# We have an existing version, and its the best version
logger.debug(
"Installed version (%s) is most up-to-date (past versions: %s)",
installed_version,
_format_versions(best_candidate_result.applicable_candidates),
)
raise BestVersionAlreadyInstalled
def _find_name_version_sep(fragment: str, canonical_name: str) -> int:
"""Find the separator's index based on the package's canonical name.
:param fragment: A <package>+<version> filename "fragment" (stem) or
egg fragment.
:param canonical_name: The package's canonical name.
This function is needed since the canonicalized name does not necessarily
have the same length as the egg info's name part. An example::
>>> fragment = 'foo__bar-1.0'
>>> canonical_name = 'foo-bar'
>>> _find_name_version_sep(fragment, canonical_name)
8
"""
# Project name and version must be separated by one single dash. Find all
# occurrences of dashes; if the string in front of it matches the canonical
# name, this is the one separating the name and version parts.
for i, c in enumerate(fragment):
if c != "-":
continue
if canonicalize_name(fragment[:i]) == canonical_name:
return i
raise ValueError(f"{fragment} does not match {canonical_name}")
def _extract_version_from_fragment(fragment: str, canonical_name: str) -> Optional[str]:
"""Parse the version string from a <package>+<version> filename
"fragment" (stem) or egg fragment.
:param fragment: The string to parse. E.g. foo-2.1
:param canonical_name: The canonicalized name of the package this
belongs to.
"""
try:
version_start = _find_name_version_sep(fragment, canonical_name) + 1
except ValueError:
return None
version = fragment[version_start:]
if not version:
return None
return version
|
PackageFinder
|
python
|
django__django
|
django/template/loader_tags.py
|
{
"start": 970,
"end": 2619
}
|
class ____(Node):
def __init__(self, name, nodelist, parent=None):
self.name = name
self.nodelist = nodelist
self.parent = parent
def __repr__(self):
return "<Block Node: %s. Contents: %r>" % (self.name, self.nodelist)
def render(self, context):
block_context = context.render_context.get(BLOCK_CONTEXT_KEY)
with context.push():
if block_context is None:
context["block"] = self
result = self.nodelist.render(context)
else:
push = block = block_context.pop(self.name)
if block is None:
block = self
# Create new block so we can store context without
# thread-safety issues.
block = type(self)(block.name, block.nodelist)
block.context = context
context["block"] = block
result = block.nodelist.render(context)
if push is not None:
block_context.push(self.name, push)
return result
def super(self):
if not hasattr(self, "context"):
raise TemplateSyntaxError(
"'%s' object has no attribute 'context'. Did you use "
"{{ block.super }} in a base template?" % self.__class__.__name__
)
render_context = self.context.render_context
if (
BLOCK_CONTEXT_KEY in render_context
and render_context[BLOCK_CONTEXT_KEY].get_block(self.name) is not None
):
return mark_safe(self.render(self.context))
return ""
|
BlockNode
|
python
|
sqlalchemy__sqlalchemy
|
test/ext/asyncio/test_session.py
|
{
"start": 34252,
"end": 35722
}
|
class ____(AsyncFixture):
def test_default(self, async_engine):
ass = AsyncSession(async_engine)
is_true(isinstance(ass.sync_session, Session))
is_(ass.sync_session.__class__, Session)
is_(ass.sync_session_class, Session)
def test_init_class(self, async_engine):
ass = AsyncSession(async_engine, sync_session_class=_MySession)
is_true(isinstance(ass.sync_session, _MySession))
is_(ass.sync_session_class, _MySession)
def test_init_orm_sessionmaker(self, async_engine):
sm = sessionmaker(
async_engine, class_=AsyncSession, sync_session_class=_MySession
)
ass = sm()
is_true(isinstance(ass.sync_session, _MySession))
is_(ass.sync_session_class, _MySession)
def test_init_asyncio_sessionmaker(self, async_engine):
sm = async_sessionmaker(async_engine, sync_session_class=_MySession)
ass = sm()
is_true(isinstance(ass.sync_session, _MySession))
is_(ass.sync_session_class, _MySession)
def test_subclass(self, async_engine):
ass = _MyAS(async_engine)
is_true(isinstance(ass.sync_session, _MySession))
is_(ass.sync_session_class, _MySession)
def test_subclass_override(self, async_engine):
ass = _MyAS(async_engine, sync_session_class=Session)
is_true(not isinstance(ass.sync_session, _MySession))
is_(ass.sync_session_class, Session)
|
OverrideSyncSession
|
python
|
python__mypy
|
mypy/test/testfinegrained.py
|
{
"start": 15103,
"end": 17776
}
|
class ____(unittest.TestCase):
def test_simple_sorting(self) -> None:
msgs = ['x.py:1: error: "int" not callable', 'foo/y.py:123: note: "X" not defined']
old_msgs = ['foo/y.py:12: note: "Y" not defined', 'x.py:8: error: "str" not callable']
assert sort_messages_preserving_file_order(msgs, old_msgs) == list(reversed(msgs))
assert sort_messages_preserving_file_order(list(reversed(msgs)), old_msgs) == list(
reversed(msgs)
)
def test_long_form_sorting(self) -> None:
# Multi-line errors should be sorted together and not split.
msg1 = [
'x.py:1: error: "int" not callable',
"and message continues (x: y)",
" 1()",
" ^~~",
]
msg2 = [
'foo/y.py: In function "f":',
'foo/y.py:123: note: "X" not defined',
"and again message continues",
]
old_msgs = ['foo/y.py:12: note: "Y" not defined', 'x.py:8: error: "str" not callable']
assert sort_messages_preserving_file_order(msg1 + msg2, old_msgs) == msg2 + msg1
assert sort_messages_preserving_file_order(msg2 + msg1, old_msgs) == msg2 + msg1
def test_mypy_error_prefix(self) -> None:
# Some errors don't have a file and start with "mypy: ". These
# shouldn't be sorted together with file-specific errors.
msg1 = 'x.py:1: error: "int" not callable'
msg2 = 'foo/y:123: note: "X" not defined'
msg3 = "mypy: Error not associated with a file"
old_msgs = [
"mypy: Something wrong",
'foo/y:12: note: "Y" not defined',
'x.py:8: error: "str" not callable',
]
assert sort_messages_preserving_file_order([msg1, msg2, msg3], old_msgs) == [
msg2,
msg1,
msg3,
]
assert sort_messages_preserving_file_order([msg3, msg2, msg1], old_msgs) == [
msg2,
msg1,
msg3,
]
def test_new_file_at_the_end(self) -> None:
msg1 = 'x.py:1: error: "int" not callable'
msg2 = 'foo/y.py:123: note: "X" not defined'
new1 = "ab.py:3: error: Problem: error"
new2 = "aaa:3: error: Bad"
old_msgs = ['foo/y.py:12: note: "Y" not defined', 'x.py:8: error: "str" not callable']
assert sort_messages_preserving_file_order([msg1, msg2, new1], old_msgs) == [
msg2,
msg1,
new1,
]
assert sort_messages_preserving_file_order([new1, msg1, msg2, new2], old_msgs) == [
msg2,
msg1,
new1,
new2,
]
|
TestMessageSorting
|
python
|
google__jax
|
tests/compilation_cache_test.py
|
{
"start": 3080,
"end": 26735
}
|
class ____(CompilationCacheTestCase):
def setUp(self):
super().setUp()
supported_platforms = ["tpu", "gpu", "cpu"]
if not jtu.test_device_matches(supported_platforms):
raise SkipTest(
"serialize executable only works on " + ",".join(supported_platforms)
)
def test_get_no_executable(self):
computation = jax.jit(lambda x, y: x + y).lower(1, 1).compiler_ir()
devices = np.array([[jax.local_devices()[0]]])
compile_options = compiler.get_compile_options(
num_replicas=1, num_partitions=1
)
backend = xla_bridge.get_backend()
key = cc.get_cache_key(computation, devices, compile_options, backend)
executable, compile_time = cc.get_executable_and_time(
key, compile_options, backend, xc.DeviceList(tuple(devices.flat)))
self.assertIsNone(executable)
self.assertIsNone(compile_time)
def test_diff_executables(self):
computation1 = str(jax.jit(lambda x, y: x + y).lower(1, 1).compiler_ir())
computation2 = str(jax.jit(lambda x, y: x * y).lower(2, 2).compiler_ir())
compile_options = compiler.get_compile_options(
num_replicas=1, num_partitions=1
)
backend = xla_bridge.get_backend()
executable_devices = xc.DeviceList(tuple(backend.local_devices()))
executable1 = backend.compile_and_load(
computation1, executable_devices, compile_options)
executable2 = backend.compile_and_load(
computation2, executable_devices, compile_options)
cc.put_executable_and_time(
"key1", "computation1", executable1, backend, FAKE_COMPILE_TIME)
cc.put_executable_and_time(
"key2", "computation2", executable2, backend, FAKE_COMPILE_TIME)
self.assertNotEqual(
cc.get_executable_and_time(
"key1", compile_options, backend, executable_devices)[0],
cc.get_executable_and_time(
"key2", compile_options, backend, executable_devices)[0]
)
def test_put_executable(self):
computation = (
jax.jit(lambda x, y: x + y)
.lower(np.int32(1), np.int32(1))
.compiler_ir()
)
devices = np.array([[jax.local_devices()[0]]])
compile_options = compiler.get_compile_options(
num_replicas=1, num_partitions=1
)
backend = xla_bridge.get_backend()
executable_devices = xc.DeviceList(tuple(devices.flat))
executable = backend.compile_and_load(
str(computation), executable_devices, compile_options)
key = cc.get_cache_key(computation, devices, compile_options, backend)
cc.put_executable_and_time(
key, "alambda", executable, backend, FAKE_COMPILE_TIME)
executable_retrieved, compile_time_retrieved = cc.get_executable_and_time(
key, compile_options, backend, executable_devices)
inputs_to_executable = (
jnp.array(1, dtype=np.int32),
jnp.array(2, dtype=np.int32),
)
expected = executable.execute(inputs_to_executable)
actual = executable_retrieved.execute(inputs_to_executable)
self.assertEqual(expected, actual)
self.assertEqual(FAKE_COMPILE_TIME, compile_time_retrieved)
def test_pmap(self):
f = pmap(lambda x: x - lax.psum(x, "i"), axis_name="i")
x = np.arange(jax.device_count(), dtype=np.int32)
f(x)
self.assertEqual(count_cache_items(), 1)
x = np.arange(jax.device_count(), dtype=np.float32)
f(x)
self.assertEqual(count_cache_items(), 2)
# TODO: create a test for calling pmap with the same input more than once
def test_pmap_with_consts(self):
const = jnp.array([42, 43], dtype=np.int32)
clear_cache()
f = pmap(lambda x: x - lax.psum(x, "i") + const[0], axis_name="i")
x = np.arange(jax.device_count(), dtype=np.int32)
self.assertAllClose(f(x), x - np.sum(x, dtype=np.int32) + np.int32(42))
self.assertEqual(count_cache_items(), 1)
const1 = jnp.array([142, 143], dtype=np.int32) # another const
f1 = pmap(lambda x: x - lax.psum(x, "i") + const1[0], axis_name="i")
expected_compilations = 0 if config.use_simplified_jaxpr_constants.value else 1
self.assertCacheMisses(lambda: f1(x),
lowering=1,
compilation_after_persistent_cache_miss=expected_compilations)
self.assertAllClose(f1(x), x - np.sum(x, dtype=np.int32) + np.int32(142))
self.assertEqual(count_cache_items(), 1 + expected_compilations)
def test_jit(self):
f = jit(lambda x: x * x)
self.assertCacheMisses(lambda: f(1), lowering=1,
compilation_after_persistent_cache_miss=1)
self.assertEqual(count_cache_items(), 1)
f1 = jit(lambda x: x * x)
self.assertCacheMisses(lambda: f1(2), lowering=1,
compilation_after_persistent_cache_miss=0)
f(1.0)
self.assertEqual(count_cache_items(), 2)
def test_jit_sharded(self):
mesh = jtu.create_mesh((2,), 'x')
with jax.set_mesh(mesh):
@jax.jit(in_shardings=(P("x"), P("x")), out_shardings=None)
def f(x, y):
return x + y
shape = (8, 8)
x = np.arange(math.prod(shape), dtype=np.int64).reshape(shape)
f(x, x + 1)
self.assertEqual(count_cache_items(), 1)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
f(x, x + 1)
self.assertEqual(count_cache_items(), 2)
def test_jit_with_constants(self):
const = jnp.array([42, 43]) # A distinctive shape
clear_cache()
f = jit(lambda x: x * const[0])
self.assertAllClose(f(2), 2 * 42)
self.assertEqual(count_cache_items(), 1)
const1 = jnp.array([142, 143]) # The closed over const can be different
f1 = jit(lambda x: x * const1[0])
expected_compilations = 0 if config.use_simplified_jaxpr_constants.value else 1
self.assertCacheMisses(
lambda: f1(3), lowering=1,
compilation_after_persistent_cache_miss=expected_compilations)
self.assertAllClose(f1(3), 3 * 142)
self.assertEqual(count_cache_items(), 1 + expected_compilations)
def test_set_cache_dir_after_backends_init(self):
# This a regression test for #25768
with config.compilation_cache_dir(None):
cc.reset_cache()
backend = xla_bridge.get_backend()
a = jnp.zeros((2,3))
self.assertFalse(cc.is_persistent_cache_enabled())
cache = cc._get_cache(backend)
self.assertIsNone(cache) # Not able to create cache
with tempfile.TemporaryDirectory() as tmp_cache_dir:
with config.compilation_cache_dir(tmp_cache_dir):
f = jit(lambda x: x + 1)
f(a) # Compile and cache
self.assertTrue(cc.is_persistent_cache_enabled())
cache = cc._get_cache(backend)
self.assertIsNotNone(cache) # Cache is created
def test_enable_compilation_cache(self):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
with (
config.enable_compilation_cache(False),
config.compilation_cache_dir(tmp_cache_dir)
):
cc.reset_cache() # reset cache before testing
backend = xla_bridge.get_backend()
f = jit(lambda x: x + 1)
f(1) # Compile and cache
cache = cc._get_cache(backend)
self.assertIsNone(cache) # Cache should not exist
with config.enable_compilation_cache(True):
cc.reset_cache()
backend = xla_bridge.get_backend()
g = jit(lambda x: x * 3)
g(2)
cache = cc._get_cache(backend)
self.assertIsNotNone(cache) # Cache should be initialized
def test_xla_autofdo_profile_version(self):
original_profile_version = config.jax_xla_profile_version.value
with config.jax_xla_profile_version(original_profile_version + 1):
f = jit(lambda x: x * x)
f(1)
self.assertEqual(count_cache_items(), 1)
# Clear the cache directory, then update the profile version and execute
# again. The in-memory caches should be invalidated and a new persistent
# cache entry created.
clear_cache()
with config.jax_xla_profile_version(original_profile_version + 2):
f(1)
self.assertEqual(count_cache_items(), 1)
def test_cache_write_warning(self):
f = jit(lambda x: x * x)
backend = xla_bridge.get_backend()
with (
config.raise_persistent_cache_errors(False),
mock.patch.object(cc._get_cache(backend).__class__, "put") as mock_put,
test_warning_util.record_warnings() as w,
):
mock_put.side_effect = RuntimeError("test error")
self.assertEqual(f(2).item(), 4)
if len(w) != 1:
print("Warnings:", [str(w_) for w_ in w], flush=True)
self.assertLen(w, 1)
self.assertIn(
(
"Error writing persistent compilation cache entry "
"for 'jit__lambda': RuntimeError: test error"
),
str(w[0].message),
)
def test_cache_read_warning(self):
f = jit(lambda x: x * x)
backend = xla_bridge.get_backend()
with (
config.raise_persistent_cache_errors(False),
mock.patch.object(cc._get_cache(backend).__class__, "get") as mock_get,
test_warning_util.record_warnings() as w,
):
mock_get.side_effect = RuntimeError("test error")
# Calling assertEqual with the jitted f will generate two JIT
# executables: Equal and the lambda function itself.
self.assertEqual(f(2).item(), 4)
if len(w) != 1:
print("Warnings:", [str(w_) for w_ in w], flush=True)
self.assertLen(w, 1)
self.assertIn(
(
"Error reading persistent compilation cache entry "
"for 'jit__lambda': RuntimeError: test error"
),
str(w[0].message),
)
def test_min_entry_size(self):
with (
config.persistent_cache_min_compile_time_secs(0),
config.persistent_cache_min_entry_size_bytes(1048576), # 1MiB
):
jit(lambda x: x + 1)(1)
self.assertEqual(count_cache_items(), 0)
def test_min_compile_time(self):
with (
config.persistent_cache_min_compile_time_secs(2),
config.persistent_cache_min_entry_size_bytes(0),
):
# Mock time to progress in small intervals so compilation time is small.
with mock.patch("time.monotonic", side_effect=np.arange(0, 10, 0.1)):
jit(lambda x: x + 1)(1)
self.assertEqual(count_cache_items(), 0)
# Mock time to progress in large intervals so compilation time is large.
with mock.patch("time.monotonic", side_effect=np.arange(0, 100, 10)):
jit(lambda x: x + 2)(1)
self.assertEqual(count_cache_items(), 1)
# This is perhaps related to mocking time.monotonic?
@unittest.skipIf(platform.system() == "Windows", "Test fails on Windows")
def test_cache_saving_metric(self):
with (
config.persistent_cache_min_compile_time_secs(2),
config.persistent_cache_min_entry_size_bytes(0),
):
durations = Counter() # Map metric name to time duration.
def append_metric_duration(metric, duration, **kwargs):
del kwargs
durations[metric] += duration
with jtu.register_event_duration_listener(append_metric_duration):
# Mock time to create a short compilation time, no cache saved, no cache
# hit, no metric recorded.
with mock.patch("time.monotonic", side_effect=np.arange(0, 1, 0.1)):
jit(lambda x: x + 1)(1)
jit(lambda x: x + 1)(1)
self.assertNotIn(
"/jax/compilation_cache/cache_retrieval_time_sec", durations)
self.assertNotIn(
"/jax/compilation_cache/compile_time_saved_sec", durations)
# Mock time to create a long compilation time, metrics incremented with
# a cache hit.
with mock.patch("time.monotonic", side_effect=np.arange(0, 100, 10)):
jit(lambda x: x + 2)(1)
jit(lambda x: x + 2)(1)
self.assertGreater(
durations["/jax/compilation_cache/cache_retrieval_time_sec"], 0)
self.assertGreater(
durations["/jax/compilation_cache/compile_time_saved_sec"], 0)
def test_task_using_cache_metric(self):
count_before_first_use = _counts[
"/jax/compilation_cache/tasks_using_cache"]
jit(lambda x: x + 1)(1)
count_after_first_use = _counts[
"/jax/compilation_cache/tasks_using_cache"]
self.assertEqual(count_after_first_use, count_before_first_use + 1)
# Verify that the count is incremented only once per task.
jit(lambda x: x + 3)(3)
count_after_second_use = _counts[
"/jax/compilation_cache/tasks_using_cache"]
self.assertEqual(count_after_second_use, count_after_first_use)
def test_compile_requests_use_cache_metric(self):
previous_counts = Counter(_counts)
jit(lambda x: x + 1)(1)
jit(lambda x: x + 2)(1)
jit(lambda x: x + 1)(1)
self.assertEqual(
_counts["/jax/compilation_cache/compile_requests_use_cache"]
- previous_counts["/jax/compilation_cache/compile_requests_use_cache"],
3)
@parameterized.parameters(0, 1048576) # 0 byte, 1 MiB
def test_cache_misses_metric(self, min_entry_size):
previous_counts = Counter(_counts)
with (
config.persistent_cache_min_compile_time_secs(2),
config.persistent_cache_min_entry_size_bytes(min_entry_size),
):
# Mock time to create a long compilation time and make cache misses.
with mock.patch("time.monotonic", side_effect=np.arange(0, 100, 10)):
jit(lambda x: x + 1)(1)
jit(lambda x: x + 2)(1)
if min_entry_size <= 0:
self.assertEqual(
_counts["/jax/compilation_cache/cache_misses"]
- previous_counts["/jax/compilation_cache/cache_misses"],
2)
else:
self.assertEqual(
_counts["/jax/compilation_cache/cache_misses"]
- previous_counts["/jax/compilation_cache/cache_misses"],
0)
def test_cache_hits_metric(self):
previous_counts = Counter(_counts)
with (
config.persistent_cache_min_compile_time_secs(2),
config.persistent_cache_min_entry_size_bytes(0),
):
# Mock time to create a long compilation time, cache saved.
with mock.patch("time.monotonic", side_effect=np.arange(0, 100, 10)):
jit(lambda x: x + 1)(1)
jit(lambda x: x + 1)(1)
self.assertEqual(
_counts["/jax/compilation_cache/cache_hits"]
- previous_counts["/jax/compilation_cache/cache_hits"],
1)
def test_persistent_cache_hit_logging(self):
jit(lambda x: x + 1)(1)
msg = "Persistent compilation cache hit"
# cache hits with `log_compiles` on should be in WARNING when enabled
with config.log_compiles(True):
with self.assertLogs(level="WARNING") as log:
jit(lambda x: x + 1)(1)
self.assertTrue(msg_exists_in_logs(msg, log.records, logging.WARNING))
def test_persistent_cache_hit_no_logging(self):
jit(lambda x: x + 1)(1)
msg = "Persistent compilation cache hit"
# cache hits with `log_compiles` off should NOT be in WARNING
with config.log_compiles(False):
with self.assertLogs(level="DEBUG") as log:
jit(lambda x: x + 1)(1)
self.assertFalse(msg_exists_in_logs(msg, log.records, logging.WARNING))
def test_persistent_cache_miss_logging_with_explain(self):
with (config.explain_cache_misses(True),
config.compilation_cache_dir("jax-cache")):
# omitting writing to cache because compilation is too fast
pure_fn = lambda a: jnp.array(1, dtype=jnp.int32)
with config.persistent_cache_min_compile_time_secs(1e5):
with self.assertLogs(level="DEBUG") as log:
jit(lambda x: x +
jax.pure_callback(pure_fn, jax.ShapeDtypeStruct((), jnp.int32), x)
)(1)
msg1 = "Not writing persistent cache entry"
msg2 = "because it uses host callbacks"
self.assertTrue(msg_exists_in_logs(msg1, log.records, logging.WARNING))
self.assertTrue(msg_exists_in_logs(msg2, log.records, logging.WARNING))
# omitting writing to cache because host callback is present
pure_fn = lambda a: jnp.array(1, dtype=jnp.int32)
with self.assertLogs(level="DEBUG") as log:
jit(lambda x: x +
jax.pure_callback(pure_fn, jax.ShapeDtypeStruct((), jnp.int32), x)
)(1)
msg1 = "Not writing persistent cache entry"
msg2 = "because it uses host callbacks"
self.assertTrue(msg_exists_in_logs(msg1, log.records, logging.WARNING))
self.assertTrue(msg_exists_in_logs(msg2, log.records, logging.WARNING))
# omitting writing to cache because binary is too small
with config.persistent_cache_min_entry_size_bytes(int(1e9)):
with self.assertLogs(level="DEBUG") as log:
jit(lambda x: x + 2)(1)
msg1 = "Not writing persistent cache entry"
msg2 = "is less than threshold"
self.assertTrue(msg_exists_in_logs(msg1, log.records, logging.WARNING))
self.assertTrue(msg_exists_in_logs(msg2, log.records, logging.WARNING))
# successful cache write
with config.persistent_cache_min_entry_size_bytes(1):
with self.assertLogs(level="DEBUG") as log:
jit(lambda x: x ** 2)(1)
msg = "to persistent compilation cache with key"
self.assertTrue(msg_exists_in_logs(msg, log.records, logging.WARNING))
def test_persistent_cache_miss_logging_with_no_explain(self):
# test that cache failure messages do not get logged in WARNING
with (config.explain_cache_misses(False),
config.compilation_cache_dir("jax-cache")):
# omitting writing to cache because compilation is too fast
with config.persistent_cache_min_compile_time_secs(1e3):
with self.assertLogs(level="DEBUG") as log:
jit(lambda x: x + 1)(1)
msg1, msg2 = "Not writing persistent cache entry", "because it took <"
self.assertFalse(msg_exists_in_logs(msg1, log.records, logging.WARNING))
self.assertFalse(msg_exists_in_logs(msg2, log.records, logging.WARNING))
# omitting writing to cache because host callback is present
pure_fn = lambda a: jnp.array(1, dtype=jnp.int32)
with self.assertLogs(level="DEBUG") as log:
jit(lambda x: x +
jax.pure_callback(pure_fn, jax.ShapeDtypeStruct((), jnp.int32), x)
)(1)
msg1 = "Not writing persistent cache entry"
msg2 = "because it uses host callbacks"
self.assertFalse(msg_exists_in_logs(msg1, log.records, logging.WARNING))
self.assertFalse(msg_exists_in_logs(msg2, log.records, logging.WARNING))
# omitting writing to cache because binary is too small
with config.persistent_cache_min_entry_size_bytes(int(1e9)):
with self.assertLogs(level="DEBUG") as log:
jit(lambda x: x + 2)(1)
msg1 = "Not writing persistent cache entry"
msg2 = "is less than threshold"
self.assertFalse(msg_exists_in_logs(msg1, log.records, logging.WARNING))
self.assertFalse(msg_exists_in_logs(msg2, log.records, logging.WARNING))
# successful cache write
with config.persistent_cache_min_entry_size_bytes(1):
with self.assertLogs(level="DEBUG") as log:
jit(lambda x: x ** 2)(1)
msg = "to persistent compilation cache with key"
self.assertFalse(msg_exists_in_logs(msg, log.records, logging.WARNING))
@parameterized.parameters(0, 1)
def test_cache_write_with_process_restriction(self, process_id):
with (
config.persistent_cache_min_compile_time_secs(0),
config.persistent_cache_min_entry_size_bytes(0),
mock.patch.object(distributed.global_state, "process_id", process_id),
):
jit(lambda x: x + 1)(1)
files_in_directory = count_cache_items()
if process_id == 0:
self.assertEqual(files_in_directory, 1)
elif process_id == 1:
self.assertEqual(files_in_directory, 0)
def test_backend_serialization_deserialization(self):
backend = xla_bridge.get_backend()
executable = (
jax.jit(lambda x, y: x + y)
.lower(np.array(1.), np.array(1.))
.compile()
.runtime_executable()
)
serialized_executable = backend.serialize_executable(executable)
deserialized_executable = backend.deserialize_executable( # type: ignore
serialized_executable,
xc.DeviceList(tuple(jax.local_devices(backend=backend))), None)
self.assertEqual(
executable.fingerprint, deserialized_executable.fingerprint)
def test_persistent_cache_enable_xla_caches(self):
s = os.sep
with config.compilation_cache_dir("jax-cache"):
with config.persistent_cache_enable_xla_caches("none"):
compile_options = compiler.get_compile_options(
num_replicas=1, num_partitions=1
)
self.assertEqual(compile_options.executable_build_options.debug_options.xla_gpu_kernel_cache_file, "")
self.assertEqual(compile_options.executable_build_options.debug_options.xla_gpu_enable_llvm_module_compilation_parallelism, False)
self.assertEqual(compile_options.executable_build_options.debug_options.xla_gpu_per_fusion_autotune_cache_dir, "")
self.assertEqual(compile_options.executable_build_options.debug_options.xla_gpu_experimental_autotune_cache_mode, xc.AutotuneCacheMode.UPDATE)
with config.persistent_cache_enable_xla_caches("all"):
compile_options = compiler.get_compile_options(
num_replicas=1, num_partitions=1
)
self.assertEqual(compile_options.executable_build_options.debug_options.xla_gpu_kernel_cache_file, f"jax-cache{s}xla_gpu_kernel_cache_file")
self.assertEqual(compile_options.executable_build_options.debug_options.xla_gpu_enable_llvm_module_compilation_parallelism, True)
self.assertEqual(compile_options.executable_build_options.debug_options.xla_gpu_per_fusion_autotune_cache_dir, f"jax-cache{s}xla_gpu_per_fusion_autotune_cache_dir")
self.assertEqual(compile_options.executable_build_options.debug_options.xla_gpu_experimental_autotune_cache_mode, xc.AutotuneCacheMode.UPDATE)
with config.persistent_cache_enable_xla_caches("xla_gpu_kernel_cache_file"):
compile_options = compiler.get_compile_options(
num_replicas=1, num_partitions=1
)
self.assertEqual(compile_options.executable_build_options.debug_options.xla_gpu_kernel_cache_file, f"jax-cache{s}xla_gpu_kernel_cache_file")
self.assertEqual(compile_options.executable_build_options.debug_options.xla_gpu_enable_llvm_module_compilation_parallelism, True)
self.assertEqual(compile_options.executable_build_options.debug_options.xla_gpu_per_fusion_autotune_cache_dir, "")
self.assertEqual(compile_options.executable_build_options.debug_options.xla_gpu_experimental_autotune_cache_mode, xc.AutotuneCacheMode.UPDATE)
with config.persistent_cache_enable_xla_caches("xla_gpu_per_fusion_autotune_cache_dir"):
compile_options = compiler.get_compile_options(
num_replicas=1, num_partitions=1
)
self.assertEqual(compile_options.executable_build_options.debug_options.xla_gpu_kernel_cache_file, "")
self.assertEqual(compile_options.executable_build_options.debug_options.xla_gpu_enable_llvm_module_compilation_parallelism, False)
self.assertEqual(compile_options.executable_build_options.debug_options.xla_gpu_per_fusion_autotune_cache_dir, f"jax-cache{s}xla_gpu_per_fusion_autotune_cache_dir")
self.assertEqual(compile_options.executable_build_options.debug_options.xla_gpu_experimental_autotune_cache_mode, xc.AutotuneCacheMode.UPDATE)
@jtu.with_config(
jax_enable_compilation_cache=False,
jax_persistent_cache_min_compile_time_secs=0,
jax_persistent_cache_min_entry_size_bytes=0,
)
|
CompilationCacheTest
|
python
|
redis__redis-py
|
tests/test_connection_pool.py
|
{
"start": 19386,
"end": 23066
}
|
class ____:
def test_host(self):
pool = redis.ConnectionPool.from_url("rediss://my.host")
assert pool.connection_class == redis.SSLConnection
assert pool.connection_kwargs == {"host": "my.host"}
def test_connection_class_override(self):
class MyConnection(redis.SSLConnection):
pass
pool = redis.ConnectionPool.from_url(
"rediss://my.host", connection_class=MyConnection
)
assert pool.connection_class == MyConnection
def test_cert_reqs_options(self):
class DummyConnectionPool(redis.ConnectionPool):
def get_connection(self):
return self.make_connection()
pool = DummyConnectionPool.from_url("rediss://?ssl_cert_reqs=none")
assert pool.get_connection().cert_reqs == ssl.CERT_NONE
pool = DummyConnectionPool.from_url("rediss://?ssl_cert_reqs=optional")
assert pool.get_connection().cert_reqs == ssl.CERT_OPTIONAL
pool = DummyConnectionPool.from_url("rediss://?ssl_cert_reqs=required")
assert pool.get_connection().cert_reqs == ssl.CERT_REQUIRED
pool = DummyConnectionPool.from_url("rediss://?ssl_check_hostname=False")
assert pool.get_connection().check_hostname is False
pool = DummyConnectionPool.from_url("rediss://?ssl_check_hostname=True")
assert pool.get_connection().check_hostname is True
def test_ssl_flags_config_parsing(self):
class DummyConnectionPool(redis.ConnectionPool):
def get_connection(self):
return self.make_connection()
pool = DummyConnectionPool.from_url(
"rediss://?ssl_include_verify_flags=VERIFY_X509_STRICT,VERIFY_CRL_CHECK_CHAIN"
)
assert pool.get_connection().ssl_include_verify_flags == [
ssl.VerifyFlags.VERIFY_X509_STRICT,
ssl.VerifyFlags.VERIFY_CRL_CHECK_CHAIN,
]
pool = DummyConnectionPool.from_url(
"rediss://?ssl_include_verify_flags=[VERIFY_X509_STRICT, VERIFY_CRL_CHECK_CHAIN]"
)
assert pool.get_connection().ssl_include_verify_flags == [
ssl.VerifyFlags.VERIFY_X509_STRICT,
ssl.VerifyFlags.VERIFY_CRL_CHECK_CHAIN,
]
pool = DummyConnectionPool.from_url(
"rediss://?ssl_exclude_verify_flags=VERIFY_X509_STRICT, VERIFY_CRL_CHECK_CHAIN"
)
assert pool.get_connection().ssl_exclude_verify_flags == [
ssl.VerifyFlags.VERIFY_X509_STRICT,
ssl.VerifyFlags.VERIFY_CRL_CHECK_CHAIN,
]
pool = DummyConnectionPool.from_url(
"rediss://?ssl_include_verify_flags=VERIFY_X509_STRICT, VERIFY_CRL_CHECK_CHAIN&ssl_exclude_verify_flags=VERIFY_CRL_CHECK_LEAF"
)
assert pool.get_connection().ssl_include_verify_flags == [
ssl.VerifyFlags.VERIFY_X509_STRICT,
ssl.VerifyFlags.VERIFY_CRL_CHECK_CHAIN,
]
assert pool.get_connection().ssl_exclude_verify_flags == [
ssl.VerifyFlags.VERIFY_CRL_CHECK_LEAF,
]
def test_ssl_flags_config_invalid_flag(self):
class DummyConnectionPool(redis.ConnectionPool):
def get_connection(self):
return self.make_connection()
with pytest.raises(ValueError):
DummyConnectionPool.from_url(
"rediss://?ssl_include_verify_flags=[VERIFY_X509,VERIFY_CRL_CHECK_CHAIN]"
)
with pytest.raises(ValueError):
DummyConnectionPool.from_url(
"rediss://?ssl_exclude_verify_flags=[VERIFY_X509_STRICT1, VERIFY_CRL_CHECK_CHAIN]"
)
|
TestSSLConnectionURLParsing
|
python
|
pypa__warehouse
|
tests/unit/manage/views/test_organizations.py
|
{
"start": 28660,
"end": 48614
}
|
class ____:
@pytest.mark.usefixtures("_enable_organizations")
def test_manage_organization(self, db_request, organization_service, monkeypatch):
db_request.user = pretend.stub()
organization = OrganizationFactory.create()
OrganizationProjectFactory.create(
organization=organization, project=ProjectFactory.create()
)
save_organization_obj = pretend.stub()
save_organization_cls = pretend.call_recorder(
lambda *a, **kw: save_organization_obj
)
monkeypatch.setattr(org_views, "SaveOrganizationForm", save_organization_cls)
save_organization_name_obj = pretend.stub()
save_organization_name_cls = pretend.call_recorder(
lambda *a, **kw: save_organization_name_obj
)
monkeypatch.setattr(
org_views, "SaveOrganizationNameForm", save_organization_name_cls
)
view = org_views.ManageOrganizationSettingsViews(organization, db_request)
result = view.manage_organization()
assert view.request == db_request
assert view.organization_service == organization_service
assert result == {
"organization": organization,
"save_organization_form": save_organization_obj,
"save_organization_name_form": save_organization_name_obj,
"active_projects": view.active_projects,
}
assert save_organization_cls.calls == [
pretend.call(
MultiDict(
{
"name": organization.name,
"display_name": organization.display_name,
"link_url": organization.link_url,
"description": organization.description,
"orgtype": organization.orgtype,
}
)
),
]
@pytest.mark.usefixtures("_enable_organizations")
@pytest.mark.parametrize(
("orgtype", "has_customer"),
[(orgtype, True) for orgtype in list(OrganizationType)]
+ [(orgtype, False) for orgtype in list(OrganizationType)],
)
def test_save_organization(
self,
db_request,
pyramid_user,
orgtype,
has_customer,
billing_service,
organization_service,
monkeypatch,
):
organization = OrganizationFactory.create(orgtype=orgtype)
customer = StripeCustomerFactory.create()
if has_customer:
OrganizationStripeCustomerFactory.create(
organization=organization, customer=customer
)
db_request.POST = {
"display_name": organization.display_name,
"link_url": organization.link_url,
"description": organization.description,
"orgtype": organization.orgtype,
}
db_request.registry.settings["site.name"] = "PiePeaEye"
monkeypatch.setattr(
organization_service,
"update_organization",
pretend.call_recorder(lambda *a, **kw: None),
)
monkeypatch.setattr(
billing_service,
"update_customer",
pretend.call_recorder(lambda stripe_customer_id, name, description: None),
)
save_organization_obj = pretend.stub(
validate=lambda: True, data=db_request.POST
)
save_organization_cls = pretend.call_recorder(
lambda *a, **kw: save_organization_obj
)
monkeypatch.setattr(org_views, "SaveOrganizationForm", save_organization_cls)
send_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(org_views, "send_organization_updated_email", send_email)
monkeypatch.setattr(
org_views, "organization_owners", lambda *a, **kw: [pyramid_user]
)
view = org_views.ManageOrganizationSettingsViews(organization, db_request)
result = view.save_organization()
assert isinstance(result, HTTPSeeOther)
assert organization_service.update_organization.calls == [
pretend.call(organization.id, **db_request.POST)
]
assert billing_service.update_customer.calls == (
[
pretend.call(
customer.customer_id,
(
f"PiePeaEye Organization - {organization.display_name} "
f"({organization.name})"
),
organization.description,
)
]
if has_customer
else []
)
assert send_email.calls == [
pretend.call(
db_request,
{pyramid_user},
organization_name=organization.name,
organization_display_name=organization.display_name,
organization_link_url=organization.link_url,
organization_description=organization.description,
organization_orgtype=organization.orgtype,
previous_organization_display_name=organization.display_name,
previous_organization_link_url=organization.link_url,
previous_organization_description=organization.description,
previous_organization_orgtype=organization.orgtype,
),
]
@pytest.mark.usefixtures("_enable_organizations")
def test_save_organization_validation_fails(
self, db_request, organization_service, monkeypatch
):
organization = OrganizationFactory.create()
db_request.POST = {
"display_name": organization.display_name,
"link_url": organization.link_url,
"description": organization.description,
"orgtype": organization.orgtype,
}
db_request.user = pretend.stub()
monkeypatch.setattr(
organization_service,
"update_organization",
pretend.call_recorder(lambda *a, **kw: None),
)
save_organization_obj = pretend.stub(
validate=lambda: False, data=db_request.POST
)
save_organization_cls = pretend.call_recorder(
lambda *a, **kw: save_organization_obj
)
monkeypatch.setattr(org_views, "SaveOrganizationForm", save_organization_cls)
save_organization_name_obj = pretend.stub()
save_organization_name_cls = pretend.call_recorder(
lambda *a, **kw: save_organization_name_obj
)
monkeypatch.setattr(
org_views, "SaveOrganizationNameForm", save_organization_name_cls
)
view = org_views.ManageOrganizationSettingsViews(organization, db_request)
result = view.save_organization()
assert result == {
**view.default_response,
"save_organization_form": save_organization_obj,
}
assert organization_service.update_organization.calls == []
@pytest.mark.usefixtures("_enable_organizations")
def test_save_organization_name_wrong_confirm(
self, db_request, organization_service, monkeypatch
):
organization = OrganizationFactory.create(name="foobar")
db_request.POST = {
"confirm_current_organization_name": organization.name.upper(),
"name": "FooBar",
}
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
view = org_views.ManageOrganizationSettingsViews(organization, db_request)
with pytest.raises(HTTPSeeOther):
view.save_organization_name()
assert db_request.session.flash.calls == [
pretend.call(
(
"Could not rename organization - "
"'FOOBAR' is not the same as 'foobar'"
),
queue="error",
)
]
@pytest.mark.usefixtures("_enable_organizations")
def test_disable_save_organization_name(
self,
db_request,
pyramid_user,
user_service,
monkeypatch,
):
organization = OrganizationFactory.create(name="foobar")
db_request.POST = {
"confirm_current_organization_name": organization.name,
"name": "FooBar",
}
db_request.route_path = pretend.call_recorder(
lambda *a, organization_name, **kw: (
f"/manage/organization/{organization_name}/settings/"
)
)
admin = None
monkeypatch.setattr(
user_service,
"get_admin_user",
pretend.call_recorder(lambda *a, **kw: admin),
)
save_organization_obj = pretend.stub()
save_organization_cls = pretend.call_recorder(
lambda *a, **kw: save_organization_obj
)
monkeypatch.setattr(org_views, "SaveOrganizationForm", save_organization_cls)
save_organization_name_obj = pretend.stub(
validate=lambda: True, name=pretend.stub(data=db_request.POST["name"])
)
save_organization_name_cls = pretend.call_recorder(
lambda *a, **kw: save_organization_name_obj
)
monkeypatch.setattr(
org_views, "SaveOrganizationNameForm", save_organization_name_cls
)
send_email = pretend.call_recorder(lambda *a, **kw: None)
view = org_views.ManageOrganizationSettingsViews(organization, db_request)
result = view.save_organization_name()
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == (
f"/manage/organization/{organization.normalized_name}/settings/"
)
assert send_email.calls == []
# When support for renaming orgs is re-introduced
# @pytest.mark.usefixtures("_enable_organizations")
# def test_save_organization_name(
# self,
# db_request,
# pyramid_user,
# organization_service,
# user_service,
# monkeypatch,
# ):
# organization = OrganizationFactory.create(name="foobar")
# db_request.POST = {
# "confirm_current_organization_name": organization.name,
# "name": "FooBar",
# }
# db_request.route_path = pretend.call_recorder(
# lambda *a, organization_name, **kw: (
# f"/manage/organization/{organization_name}/settings/"
# )
# )
# def rename_organization(organization_id, organization_name):
# organization.name = organization_name
# monkeypatch.setattr(
# organization_service,
# "rename_organization",
# pretend.call_recorder(rename_organization),
# )
# admin = None
# monkeypatch.setattr(
# user_service,
# "get_admin_user",
# pretend.call_recorder(lambda *a, **kw: admin),
# )
# save_organization_obj = pretend.stub()
# save_organization_cls = pretend.call_recorder(
# lambda *a, **kw: save_organization_obj
# )
# monkeypatch.setattr(org_views, "SaveOrganizationForm", save_organization_cls)
# save_organization_name_obj = pretend.stub(
# validate=lambda: True, name=pretend.stub(data=db_request.POST["name"])
# )
# save_organization_name_cls = pretend.call_recorder(
# lambda *a, **kw: save_organization_name_obj
# )
# monkeypatch.setattr(
# org_views, "SaveOrganizationNameForm", save_organization_name_cls
# )
# send_email = pretend.call_recorder(lambda *a, **kw: None)
# monkeypatch.setattr(org_views, "send_organization_renamed_email", send_email)
# monkeypatch.setattr(
# org_views, "organization_owners", lambda *a, **kw: [pyramid_user]
# )
# view = org_views.ManageOrganizationSettingsViews(organization, db_request)
# result = view.save_organization_name()
# assert isinstance(result, HTTPSeeOther)
# assert result.headers["Location"] == (
# f"/manage/organization/{organization.normalized_name}/settings/#modal-close"
# )
# assert organization_service.rename_organization.calls == [
# pretend.call(organization.id, "FooBar")
# ]
# assert send_email.calls == [
# pretend.call(
# db_request,
# {pyramid_user},
# organization_name="FooBar",
# previous_organization_name="foobar",
# ),
# ]
# @pytest.mark.usefixtures("_enable_organizations")
# def test_save_organization_name_validation_fails(
# self, db_request, organization_service, monkeypatch
# ):
# organization = OrganizationFactory.create(name="foobar")
# db_request.POST = {
# "confirm_current_organization_name": organization.name,
# "name": "FooBar",
# }
# db_request.user = pretend.stub()
# def rename_organization(organization_id, organization_name):
# organization.name = organization_name
# monkeypatch.setattr(
# organization_service,
# "rename_organization",
# pretend.call_recorder(rename_organization),
# )
# save_organization_obj = pretend.stub()
# save_organization_cls = pretend.call_recorder(
# lambda *a, **kw: save_organization_obj
# )
# monkeypatch.setattr(org_views, "SaveOrganizationForm", save_organization_cls)
# save_organization_name_obj = pretend.stub(
# validate=lambda: False, errors=pretend.stub(values=lambda: ["Invalid"])
# )
# save_organization_name_cls = pretend.call_recorder(
# lambda *a, **kw: save_organization_name_obj
# )
# monkeypatch.setattr(
# org_views, "SaveOrganizationNameForm", save_organization_name_cls
# )
# view = org_views.ManageOrganizationSettingsViews(organization, db_request)
# result = view.save_organization_name()
# assert result == {
# **view.default_response,
# "save_organization_name_form": save_organization_name_obj,
# }
# assert organization_service.rename_organization.calls == []
@pytest.mark.usefixtures("_enable_organizations")
def test_delete_organization(
self,
db_request,
pyramid_user,
organization_service,
user_service,
monkeypatch,
):
organization = OrganizationFactory.create()
db_request.POST = {"confirm_organization_name": organization.name}
db_request.route_path = pretend.call_recorder(
lambda *a, **kw: "/manage/organizations/"
)
monkeypatch.setattr(
organization_service,
"delete_organization",
pretend.call_recorder(lambda *a, **kw: None),
)
admin = None
monkeypatch.setattr(
user_service,
"get_admin_user",
pretend.call_recorder(lambda *a, **kw: admin),
)
send_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(org_views, "send_organization_deleted_email", send_email)
monkeypatch.setattr(
org_views, "organization_owners", lambda *a, **kw: [pyramid_user]
)
view = org_views.ManageOrganizationSettingsViews(organization, db_request)
result = view.delete_organization()
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/manage/organizations/"
assert organization_service.delete_organization.calls == [
pretend.call(organization.id)
]
assert send_email.calls == [
pretend.call(
db_request,
{pyramid_user},
organization_name=organization.name,
),
]
assert db_request.route_path.calls == [pretend.call("manage.organizations")]
@pytest.mark.usefixtures("_enable_organizations")
def test_delete_organization_with_active_projects(
self,
db_request,
pyramid_user,
organization_service,
monkeypatch,
):
organization = OrganizationFactory.create()
OrganizationProjectFactory.create(
organization=organization, project=ProjectFactory.create()
)
db_request.POST = {"confirm_organization_name": organization.name}
db_request.route_path = pretend.call_recorder(
lambda *a, **kw: "/manage/organizations/"
)
save_organization_obj = pretend.stub()
save_organization_cls = pretend.call_recorder(
lambda *a, **kw: save_organization_obj
)
monkeypatch.setattr(org_views, "SaveOrganizationForm", save_organization_cls)
save_organization_name_obj = pretend.stub()
save_organization_name_cls = pretend.call_recorder(
lambda *a, **kw: save_organization_name_obj
)
monkeypatch.setattr(
org_views, "SaveOrganizationNameForm", save_organization_name_cls
)
monkeypatch.setattr(
organization_service,
"delete_organization",
pretend.call_recorder(lambda *a, **kw: None),
)
view = org_views.ManageOrganizationSettingsViews(organization, db_request)
result = view.delete_organization()
assert result == view.default_response
assert organization_service.delete_organization.calls == []
assert db_request.route_path.calls == []
@pytest.mark.usefixtures("_enable_organizations")
def test_delete_organization_with_subscriptions(
self,
db_request,
pyramid_user,
organization_service,
user_service,
monkeypatch,
):
organization = OrganizationFactory.create()
stripe_customer = StripeCustomerFactory.create()
OrganizationStripeCustomerFactory.create(
organization=organization, customer=stripe_customer
)
subscription = StripeSubscriptionFactory.create(customer=stripe_customer)
OrganizationStripeSubscriptionFactory.create(
organization=organization, subscription=subscription
)
db_request.POST = {"confirm_organization_name": organization.name}
db_request.route_path = pretend.call_recorder(
lambda *a, **kw: "/manage/organizations/"
)
monkeypatch.setattr(
organization_service,
"delete_organization",
pretend.call_recorder(lambda *a, **kw: None),
)
admin = None
monkeypatch.setattr(
user_service,
"get_admin_user",
pretend.call_recorder(lambda *a, **kw: admin),
)
send_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(org_views, "send_organization_deleted_email", send_email)
monkeypatch.setattr(
org_views, "organization_owners", lambda *a, **kw: [pyramid_user]
)
view = org_views.ManageOrganizationSettingsViews(organization, db_request)
result = view.delete_organization()
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/manage/organizations/"
assert organization_service.delete_organization.calls == [
pretend.call(organization.id)
]
assert send_email.calls == [
pretend.call(
db_request,
{pyramid_user},
organization_name=organization.name,
),
]
assert db_request.route_path.calls == [pretend.call("manage.organizations")]
|
TestManageOrganizationSettings
|
python
|
django__django
|
tests/fixtures_regress/models.py
|
{
"start": 2899,
"end": 3308
}
|
class ____(models.Model):
name = models.CharField(max_length=255)
author = models.ForeignKey(Person, models.CASCADE)
stores = models.ManyToManyField(Store)
class Meta:
ordering = ("name",)
def __str__(self):
return "%s by %s (available at %s)" % (
self.name,
self.author.name,
", ".join(s.name for s in self.stores.all()),
)
|
Book
|
python
|
PrefectHQ__prefect
|
src/prefect/events/filters.py
|
{
"start": 2283,
"end": 2884
}
|
class ____(EventDataFilter):
since: DateTime = Field(
default_factory=lambda: prefect.types._datetime.start_of_day(
prefect.types._datetime.now("UTC")
)
- datetime.timedelta(days=180),
description="Only include events after this time (inclusive)",
)
until: DateTime = Field(
default_factory=lambda: prefect.types._datetime.now("UTC"),
description="Only include events prior to this time (inclusive)",
)
def includes(self, event: Event) -> bool:
return self.since <= event.occurred <= self.until
|
EventOccurredFilter
|
python
|
Lightning-AI__lightning
|
src/lightning/pytorch/loops/progress.py
|
{
"start": 3008,
"end": 3949
}
|
class ____(_StartedTracker):
"""Track an event's progress.
Args:
ready: Intended to track the number of events ready to start.
started: Intended to be incremented after the event is started (e.g. after ``on_*_start`` runs).
processed: Intended to be incremented after the event is processed.
completed: Intended to be incremented after the event completes (e.g. after ``on_*_end`` runs).
These attributes should be increased in order, that is, :attr:`ready` first and :attr:`completed` last.
"""
processed: int = 0
@override
def reset(self) -> None:
super().reset()
self.processed = 0
@override
def reset_on_restart(self) -> None:
super().reset_on_restart()
self.processed = self.completed
@override
def increment_by(self, n: int) -> None:
super().increment_by(n)
self.processed += n
@dataclass
|
_ProcessedTracker
|
python
|
skorch-dev__skorch
|
scripts/hf-integration-tests.py
|
{
"start": 2457,
"end": 4326
}
|
class ____(nn.Module):
def __init__(self, name, num_labels):
super().__init__()
self.name = name
self.num_labels = num_labels
self.reset_weights()
def reset_weights(self):
self.bert = AutoModelForSequenceClassification.from_pretrained(
self.name, num_labels=self.num_labels
)
def forward(self, **kwargs):
pred = self.bert(**kwargs)
return pred.logits
def load_20newsgroup_small():
dataset = fetch_20newsgroups()
y = dataset.target
mask_0_or_1 = (y == 0) | (y == 1)
X = np.asarray(dataset.data)[mask_0_or_1][:N_SAMPLES]
y = dataset.target[mask_0_or_1][:N_SAMPLES]
X_train, X_test, y_train, y_test = train_test_split(
X, y, stratify=y, random_state=0
)
return X_train, X_test, y_train, y_test
def get_transformer_model():
return Pipeline([
('tokenizer', HuggingfacePretrainedTokenizer(TOKENIZER)),
('net', NeuralNetClassifier(
BertModule,
module__name=PRETRAINED_MODEL,
module__num_labels=2,
optimizer=OPTMIZER,
lr=LR,
max_epochs=MAX_EPOCHS,
criterion=CRITERION,
batch_size=BATCH_SIZE,
iterator_train__shuffle=True,
device=DEVICE,
)),
])
def test_tokenizers_transfomers():
print("Testing tokenizers and transfomers started")
torch.manual_seed(0)
np.random.seed(0)
X_train, X_test, y_train, y_test = load_20newsgroup_small()
pipeline = get_transformer_model()
pipeline.fit(X_train, y_train)
with torch.inference_mode():
y_pred = pipeline.predict(X_test)
assert accuracy_score(y_test, y_pred) > 0.7
print("Testing tokenizers and transfomers completed")
########################
# TESTING HF MODEL HUB #
########################
|
BertModule
|
python
|
jazzband__django-polymorphic
|
src/polymorphic/base.py
|
{
"start": 561,
"end": 737
}
|
class ____(RuntimeWarning):
pass
###################################################################################
# PolymorphicModel meta class
|
ManagerInheritanceWarning
|
python
|
viewflow__viewflow
|
viewflow/workflow/nodes/mixins.py
|
{
"start": 734,
"end": 1676
}
|
class ____(object):
"""Mixin for nodes that have only one outgoing path."""
def __init__(self, *args, **kwargs): # noqa D102
self._next = None
self._task_data = None
self._task_seed = None
super().__init__(*args, **kwargs)
def Next(
self,
node,
task_data: Optional[Callable[[Activation], Dict[str, Any]]] = None,
task_seed: Optional[Callable[[Activation], Any]] = None,
):
"""Next node to activate."""
self._next = node
self._task_data = task_data
self._task_seed = task_seed
return self
def _resolve(self, cls):
self._next = this.resolve(cls, self._next)
self._task_data = this.resolve(cls, self._task_data)
self._task_seed = this.resolve(cls, self._task_seed)
def _outgoing(self):
if self._next:
yield Edge(src=self, dst=self._next, edge_class="next")
|
NextNodeMixin
|
python
|
doocs__leetcode
|
solution/1700-1799/1746.Maximum Subarray Sum After One Operation/Solution.py
|
{
"start": 0,
"end": 292
}
|
class ____:
def maxSumAfterOperation(self, nums: List[int]) -> int:
f = g = 0
ans = -inf
for x in nums:
ff = max(f, 0) + x
gg = max(max(f, 0) + x * x, g + x)
f, g = ff, gg
ans = max(ans, f, g)
return ans
|
Solution
|
python
|
bokeh__bokeh
|
src/bokeh/models/widgets/tables.py
|
{
"start": 8887,
"end": 16118
}
|
class ____(StringFormatter):
''' Date cell formatter.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
format = Either(Enum(DateFormat), String, default='ISO-8601', help="""
The date format can be any standard `strftime`_ format string, as well
as any of the following predefined format names:
================================================ ================== ===================
Format name(s) Format string Example Output
================================================ ================== ===================
``ATOM`` / ``W3C`` / ``RFC-3339`` / ``ISO-8601`` ``"%Y-%m-%d"`` 2014-03-01
``COOKIE`` ``"%a, %d %b %Y"`` Sat, 01 Mar 2014
``RFC-850`` ``"%A, %d-%b-%y"`` Saturday, 01-Mar-14
``RFC-1123`` / ``RFC-2822`` ``"%a, %e %b %Y"`` Sat, 1 Mar 2014
``RSS`` / ``RFC-822`` / ``RFC-1036`` ``"%a, %e %b %y"`` Sat, 1 Mar 14
``TIMESTAMP`` (ms since epoch) 1393632000000
================================================ ================== ===================
Note that in the table some of the format names are synonymous, with
identical format names separated by slashes.
This list of supported `strftime`_ format codes is reproduced below.
%a
The abbreviated name of the day of the week according to the
current locale.
%A
The full name of the day of the week according to the current
locale.
%b
The abbreviated month name according to the current locale.
%B
The full month name according to the current locale.
%c
The preferred date and time representation for the current
locale.
%C
The century number (year/100) as a 2-digit integer.
%d
The day of the month as a decimal number (range 01 to 31).
%D
Equivalent to %m/%d/%y. (Americans should note that in many
other countries %d/%m/%y is rather common. This means that in
international context this format is ambiguous and should not
be used.)
%e
Like %d, the day of the month as a decimal number, but a
leading zero is replaced by a space.
%f
Microsecond as a decimal number, zero-padded on the left (range
000000-999999). This is an extension to the set of directives
available to `timezone`_.
%F
Equivalent to %Y-%m-%d (the ISO 8601 date format).
%G
The ISO 8601 week-based year with century as a decimal number.
The 4-digit year corresponding to the ISO week number (see %V).
This has the same format and value as %Y, except that if the
ISO week number belongs to the previous or next year, that year
is used instead.
%g
Like %G, but without century, that is, with a 2-digit year (00-99).
%h
Equivalent to %b.
%H
The hour as a decimal number using a 24-hour clock (range 00
to 23).
%I
The hour as a decimal number using a 12-hour clock (range 01
to 12).
%j
The day of the year as a decimal number (range 001 to 366).
%k
The hour (24-hour clock) as a decimal number (range 0 to 23).
Single digits are preceded by a blank. (See also %H.)
%l
The hour (12-hour clock) as a decimal number (range 1 to 12).
Single digits are preceded by a blank. (See also %I.) (TZ)
%m
The month as a decimal number (range 01 to 12).
%M
The minute as a decimal number (range 00 to 59).
%n
A newline character. Bokeh text does not currently support
newline characters.
%N
Nanosecond as a decimal number, zero-padded on the left (range
000000000-999999999). Supports a padding width specifier, i.e.
%3N displays 3 leftmost digits. However, this is only accurate
to the millisecond level of precision due to limitations of
`timezone`_.
%p
Either "AM" or "PM" according to the given time value, or the
corresponding strings for the current locale. Noon is treated
as "PM" and midnight as "AM".
%P
Like %p but in lowercase: "am" or "pm" or a corresponding
string for the current locale.
%r
The time in a.m. or p.m. notation. In the POSIX locale this
is equivalent to %I:%M:%S %p.
%R
The time in 24-hour notation (%H:%M). For a version including
the seconds, see %T below.
%s
The number of seconds since the Epoch, 1970-01-01 00:00:00
+0000 (UTC).
%S
The second as a decimal number (range 00 to 60). (The range
is up to 60 to allow for occasional leap seconds.)
%t
A tab character. Bokeh text does not currently support tab
characters.
%T
The time in 24-hour notation (%H:%M:%S).
%u
The day of the week as a decimal, range 1 to 7, Monday being 1.
See also %w.
%U
The week number of the current year as a decimal number, range
00 to 53, starting with the first Sunday as the first day of
week 01. See also %V and %W.
%V
The ISO 8601 week number (see NOTES) of the current year as a
decimal number, range 01 to 53, where week 1 is the first week
that has at least 4 days in the new year. See also %U and %W.
%w
The day of the week as a decimal, range 0 to 6, Sunday being 0.
See also %u.
%W
The week number of the current year as a decimal number, range
00 to 53, starting with the first Monday as the first day of
week 01.
%x
The preferred date representation for the current locale
without the time.
%X
The preferred time representation for the current locale
without the date.
%y
The year as a decimal number without a century (range 00 to 99).
%Y
The year as a decimal number including the century.
%z
The +hhmm or -hhmm numeric timezone (that is, the hour and
minute offset from UTC).
%Z
The timezone name or abbreviation.
%%
A literal '%' character.
.. warning::
The client library BokehJS uses the `timezone`_ library to
format datetimes. The inclusion of the list below is based on the
claim that `timezone`_ makes to support "the full compliment
of GNU date format specifiers." However, this claim has not
been tested exhaustively against this list. If you find formats
that do not function as expected, please submit a `github issue`_,
so that the documentation can be updated appropriately.
.. _strftime: http://man7.org/linux/man-pages/man3/strftime.3.html
.. _timezone: http://bigeasy.github.io/timezone/
.. _github issue: https://github.com/bokeh/bokeh/issues
""")
nan_format = Override(default="-")
null_format = Override(default="-")
|
DateFormatter
|
python
|
astropy__astropy
|
astropy/visualization/wcsaxes/transforms.py
|
{
"start": 5661,
"end": 6411
}
|
class ____(CurvedTransform, metaclass=abc.ABCMeta):
"""
Base transformation from pixel to world coordinates.
"""
has_inverse = True
frame_out = None
@property
@abc.abstractmethod
def output_dims(self):
"""
The number of output world dimensions.
"""
@abc.abstractmethod
def transform(self, pixel):
"""
Transform pixel to world coordinates. You should pass in a Nx2 array
of (x, y) pixel coordinates to transform to world coordinates. This
will then return an NxM array where M is the number of dimensions.
"""
@abc.abstractmethod
def inverted(self):
"""
Return the inverse of the transform.
"""
|
Pixel2WorldTransform
|
python
|
joke2k__faker
|
faker/providers/bank/pl_PL/__init__.py
|
{
"start": 42,
"end": 180
}
|
class ____(BankProvider):
"""Implement bank provider for ``pl_PL`` locale."""
bban_format = "#" * 24
country_code = "PL"
|
Provider
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_20/tasks.py
|
{
"start": 92095,
"end": 95384
}
|
class ____(Response):
"""
Response of tasks.archive_many endpoint.
:param succeeded:
:type succeeded: Sequence[dict]
:param failed:
:type failed: Sequence[dict]
"""
_service = "tasks"
_action = "archive_many"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"failed": {
"items": {
"properties": {
"error": {
"description": "Error info",
"properties": {
"codes": {
"items": {"type": "integer"},
"type": "array",
},
"data": {
"additionalProperties": True,
"type": "object",
},
"msg": {"type": "string"},
},
"type": "object",
},
"id": {
"description": "ID of the failed entity",
"type": "string",
},
},
"type": "object",
},
"type": ["array", "null"],
},
"succeeded": {
"items": {
"properties": {
"archived": {
"description": "Indicates whether the task was archived",
"type": "boolean",
},
"id": {
"description": "ID of the succeeded entity",
"type": "string",
},
},
"type": "object",
},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(
self, succeeded: Optional[List[dict]] = None, failed: Optional[List[dict]] = None, **kwargs: Any
) -> None:
super(ArchiveManyResponse, self).__init__(**kwargs)
self.succeeded = succeeded
self.failed = failed
@schema_property("succeeded")
def succeeded(self) -> Optional[List[dict]]:
return self._property_succeeded
@succeeded.setter
def succeeded(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_succeeded = None
return
self.assert_isinstance(value, "succeeded", (list, tuple))
self.assert_isinstance(value, "succeeded", (dict,), is_array=True)
self._property_succeeded = value
@schema_property("failed")
def failed(self) -> Optional[List[dict]]:
return self._property_failed
@failed.setter
def failed(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_failed = None
return
self.assert_isinstance(value, "failed", (list, tuple))
self.assert_isinstance(value, "failed", (dict,), is_array=True)
self._property_failed = value
|
ArchiveManyResponse
|
python
|
jina-ai__jina
|
tests/integration/docker_volumes/filewriter-exec/executor.py
|
{
"start": 49,
"end": 305
}
|
class ____(Executor):
@requests
def foo(self, **kwargs):
print(self.workspace)
file = os.path.join(self.workspace, 'out.txt')
with open(file, 'w', encoding='utf-8') as f:
f.write('Filewriter was here')
|
FilewriterExec
|
python
|
ray-project__ray
|
python/ray/data/tests/test_namespace_expressions.py
|
{
"start": 7842,
"end": 9106
}
|
class ____:
"""Tests for string padding operations."""
def test_string_padding(
self, dataset_format, method_name, method_kwargs, expected_value
):
"""Test string padding methods."""
data = [{"val": "hi"}]
ds = _create_dataset(data, dataset_format)
method = getattr(col("val").str, method_name)
result = ds.with_column("result", method(**method_kwargs)).to_pandas()
expected = pd.DataFrame({"val": ["hi"], "result": [expected_value]})
assert rows_same(result, expected)
@pytest.mark.parametrize("dataset_format", DATASET_FORMATS)
@pytest.mark.parametrize(
"method_name,method_args,method_kwargs,input_values,expected_results",
[
("starts_with", ("A",), {}, ["Alice", "Bob", "Alex"], [True, False, True]),
("starts_with", ("A",), {"ignore_case": True}, ["alice", "bob"], [True, False]),
("ends_with", ("e",), {}, ["Alice", "Bob"], [True, False]),
("contains", ("li",), {}, ["Alice", "Bob", "Charlie"], [True, False, True]),
("find", ("i",), {}, ["Alice", "Bob"], [2, -1]),
("count", ("a",), {}, ["banana", "apple"], [3, 1]),
("match", ("Al%",), {}, ["Alice", "Bob", "Alex"], [True, False, True]),
],
)
|
TestStringPadding
|
python
|
google__pytype
|
pytype/pyi/parser_test.py
|
{
"start": 36318,
"end": 41813
}
|
class ____(parser_test_base.ParserTestBase):
def test_no_bases(self):
canonical = """
class Foo: ...
"""
self.check(canonical, canonical)
self.check(
"""
class Foo():
pass
""",
canonical,
)
def test_bases(self):
self.check("""
class Foo(Bar): ...
""")
self.check("""
class Foo(Bar, Baz): ...
""")
def test_base_remove_nothingtype(self):
self.check(
"""
class Foo(nothing): ...
""",
"""
class Foo: ...
""",
)
self.check(
"""
class Foo(Bar, nothing): ...
""",
"""
class Foo(Bar): ...
""",
)
def test_class_type_ignore(self):
canonical = """
class Foo: # type: ignore
pass
class Bar(Foo): # type: ignore
pass
"""
self.check(
canonical,
"""
class Foo: ...
class Bar(Foo): ...
""",
)
def test_metaclass(self):
self.check("""
class Foo(metaclass=Meta): ...
""")
self.check("""
class Foo(Bar, metaclass=Meta): ...
""")
self.check_error(
"""
class Foo(badkeyword=Meta): ...
""",
1,
"Unexpected classdef kwarg 'badkeyword'",
)
self.check_error(
"""
class Foo(metaclass=Meta, Bar): ...
""",
1,
"positional argument follows keyword argument",
)
def test_shadow_pep484(self):
self.check("""
class List:
def bar(self) -> List: ...
""")
def test_no_body(self):
canonical = """
class Foo: ...
"""
# There are numerous ways to indicate an empty body.
self.check(canonical, canonical)
self.check(
"""
class Foo(): pass
""",
canonical,
)
self.check(
"""
class Foo():
pass
""",
canonical,
)
self.check(
"""
class Foo():
...
""",
canonical,
)
# pylint: disable=g-inconsistent-quotes
self.check(
'''\
class Foo():
"""docstring"""
...
''',
canonical,
)
self.check(
'''\
class Foo():
"""docstring"""
''',
canonical,
)
# Accept type: ignore with empty body
self.check(
"""
class Foo: ... # type: ignore
""",
canonical,
)
self.check(
"""
class Foo: # type: ignore
pass
""",
canonical,
)
def test_attribute(self):
self.check("""
class Foo:
a: int
""")
def test_method(self):
self.check("""
class Foo:
def a(self, x: int) -> str: ...
""")
def test_property(self):
self.check(
"""
class Foo:
@property
def a(self) -> int: ...
""",
"""
from typing import Annotated
class Foo:
a: Annotated[int, 'property']
""",
)
def test_duplicate_name(self):
# Duplicate constants: last one wins.
self.check(
"""
class Foo:
bar: int
bar: str
""",
"""
class Foo:
bar: str
""",
)
# Duplicate names between different node types is an error.
self.check_error(
"""
class Foo:
def bar(self) -> int: ...
bar = ... # type: str
""",
1,
"Duplicate attribute name(s) in class Foo: bar",
)
# Multiple method defs are ok (needed for variant signatures).
self.check(
"""
class Foo:
@overload
def x(self) -> int: ...
@overload
def x(self) -> str: ...
""",
"""
from typing import overload
class Foo:
@overload
def x(self) -> int: ...
@overload
def x(self) -> str: ...
""",
)
def test_protocol_base(self):
self.check("""
from typing import Protocol
class Foo(Protocol): ...
""")
def test_parameterized_protocol_base(self):
self.check(
"""
from typing import Protocol, TypeVar
T = TypeVar('T')
class Foo(Protocol[T]): ...
""",
"""
from typing import Generic, Protocol, TypeVar
T = TypeVar('T')
class Foo(Generic[T], Protocol): ...
""",
)
def test_typing_extensions_parameterized_protocol(self):
self.check(
"""
from typing import TypeVar
from typing_extensions import Protocol
T = TypeVar('T')
class Foo(Protocol[T]): ...
""",
"""
from typing import Generic, TypeVar
from typing_extensions import Protocol
T = TypeVar('T')
class Foo(Generic[T], Protocol): ...
""",
)
def test_bad_typevar_in_mutation(self):
self.check_error(
"""
from typing import Generic, TypeVar
S = TypeVar('S')
T = TypeVar('T')
U = TypeVar('U')
V = TypeVar('V')
class Foo(Generic[T]):
def __init__(self, x: S):
self = Generic[S, T, U, V]
""",
None,
"Type parameter(s) {U, V}",
)
def test_nested_class_typing_class_conflict(self):
ast = parser.parse_string(textwrap.dedent("""
from typing import Mapping
class Foo:
class Mapping: ...
x: Mapping
""").lstrip())
x = ast.Lookup("x")
self.assertEqual(x.type.name, "typing.Mapping")
|
ClassTest
|
python
|
networkx__networkx
|
networkx/algorithms/tree/tests/test_distance_measures.py
|
{
"start": 1922,
"end": 3389
}
|
class ____:
@pytest.mark.parametrize("n", [1, 2, 99, 100])
def test_tree_centroid_path_graphs(self, n):
G = nx.path_graph(n)
expected = {(n - 1) // 2, math.ceil((n - 1) / 2)}
assert set(nx.tree.centroid(G)) == expected
@pytest.mark.parametrize("r", range(2, 5))
@pytest.mark.parametrize("h", range(1, 5))
def test_tree_centroid_balanced_tree(self, r, h):
G = nx.balanced_tree(r, h)
assert nx.tree.centroid(G) == [0]
def test_tree_centroid_multiple_centroids(self):
G = nx.full_rary_tree(2, 8)
assert nx.tree.centroid(G) == [0, 1]
def test_tree_centroid_different_from_graph_center(self):
G = nx.star_graph(6)
nx.add_path(G, [6, 7, 8, 9, 10])
# nx.center(G) would be [7]
assert nx.tree.centroid(G) == [0]
def test_tree_centroid_not_a_tree(self):
G = nx.cycle_graph(3)
with pytest.raises(nx.NotATree, match=r"not a tree"):
nx.tree.centroid(G)
@pytest.mark.parametrize("G", [nx.DiGraph([(0, 1)]), nx.MultiDiGraph([(0, 1)])])
def test_tree_centroid_direct_raises(self, G):
with pytest.raises(
nx.NetworkXNotImplemented, match=r"not implemented for directed type"
):
nx.tree.centroid(G)
def test_tree_centroid_empty(self):
G = nx.Graph()
with pytest.raises(nx.NetworkXPointlessConcept, match=r"has no nodes"):
nx.tree.centroid(G)
|
TestDistance
|
python
|
huggingface__transformers
|
src/transformers/models/sam3/modeling_sam3.py
|
{
"start": 40981,
"end": 42734
}
|
class ____(Sam3PreTrainedModel):
config_class = Sam3VisionConfig
main_input_name = "pixel_values"
_can_record_outputs = {
"hidden_states": Sam3ViTLayer,
"attentions": Sam3ViTRoPEAttention,
}
def __init__(self, config: Sam3VisionConfig):
super().__init__(config)
self.config = config
self.backbone = AutoModel.from_config(config.backbone_config)
self.neck = Sam3VisionNeck(config)
self.post_init()
def get_input_embeddings(self):
return self.backbone.get_input_embeddings()
@check_model_inputs()
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, Sam3VisionEncoderOutput]:
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
backbone_output = self.backbone(pixel_values, **kwargs)
hidden_states = backbone_output.last_hidden_state # [batch_size, seq_len, hidden_size]
# Reshape for FPN neck: [batch_size, seq_len, hidden_size] -> [batch_size, hidden_size, height, width]
batch_size = hidden_states.shape[0]
height = pixel_values.shape[-2] // self.config.backbone_config.patch_size
width = pixel_values.shape[-1] // self.config.backbone_config.patch_size
hidden_states_spatial = hidden_states.view(batch_size, height, width, -1).permute(0, 3, 1, 2)
fpn_hidden_states, fpn_position_encoding = self.neck(hidden_states_spatial)
return Sam3VisionEncoderOutput(
last_hidden_state=hidden_states,
fpn_hidden_states=fpn_hidden_states,
fpn_position_encoding=fpn_position_encoding,
)
|
Sam3VisionModel
|
python
|
pypa__warehouse
|
tests/unit/email/test_init.py
|
{
"start": 25997,
"end": 28144
}
|
class ____:
def test_new_email_added_emails(self, pyramid_request, pyramid_config, monkeypatch):
stub_user = pretend.stub(
id="id", username="username", name=None, email="foo@example.com"
)
stub_email = pretend.stub(id="id", email="email@example.com", verified=False)
new_email_address = "new@example.com"
pyramid_request.method = "POST"
subject_renderer = pyramid_config.testing_add_renderer(
"email/new-email-added/subject.txt"
)
subject_renderer.string_response = "Email Subject"
body_renderer = pyramid_config.testing_add_renderer(
"email/new-email-added/body.txt"
)
body_renderer.string_response = "Email Body"
html_renderer = pyramid_config.testing_add_renderer(
"email/new-email-added/body.html"
)
html_renderer.string_response = "Email HTML Body"
send_email = pretend.stub(
delay=pretend.call_recorder(lambda *args, **kwargs: None)
)
pyramid_request.task = pretend.call_recorder(lambda *args, **kwargs: send_email)
monkeypatch.setattr(email, "send_email", send_email)
pyramid_request.db = pretend.stub(
query=lambda a: pretend.stub(
filter=lambda *a: pretend.stub(
one=lambda: pretend.stub(user_id=stub_user.id)
)
),
)
pyramid_request.user = stub_user
pyramid_request.registry.settings = {"mail.sender": "noreply@example.com"}
result = email.send_new_email_added_email(
pyramid_request,
(stub_user, stub_email),
new_email_address=new_email_address,
)
assert result == {
"username": stub_user.username,
"new_email_address": new_email_address,
}
subject_renderer.assert_()
body_renderer.assert_(new_email_address=new_email_address)
html_renderer.assert_(new_email_address=new_email_address)
assert pyramid_request.task.calls == []
assert send_email.delay.calls == []
|
TestNewEmailAddedEmails
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.