language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
django__django
|
django/contrib/gis/db/models/fields.py
|
{
"start": 12603,
"end": 14304
}
|
class ____(BaseSpatialField):
"""
Raster field for GeoDjango -- evaluates into GDALRaster objects.
"""
description = _("Raster Field")
geom_type = "RASTER"
geography = False
def _check_connection(self, connection):
# Make sure raster fields are used only on backends with raster
# support.
if (
not connection.features.gis_enabled
or not connection.features.supports_raster
):
raise ImproperlyConfigured(
"Raster fields require backends with raster support."
)
def db_type(self, connection):
self._check_connection(connection)
return super().db_type(connection)
def from_db_value(self, value, expression, connection):
return connection.ops.parse_raster(value)
def contribute_to_class(self, cls, name, **kwargs):
super().contribute_to_class(cls, name, **kwargs)
# Setup for lazy-instantiated Raster object. For large querysets, the
# instantiation of all GDALRasters can potentially be expensive. This
# delays the instantiation of the objects to the moment of evaluation
# of the raster attribute.
setattr(cls, self.attname, SpatialProxy(gdal.GDALRaster, self))
def get_transform(self, name):
from django.contrib.gis.db.models.lookups import RasterBandTransform
try:
band_index = int(name)
return type(
"SpecificRasterBandTransform",
(RasterBandTransform,),
{"band_index": band_index},
)
except ValueError:
pass
return super().get_transform(name)
|
RasterField
|
python
|
sympy__sympy
|
sympy/sets/fancysets.py
|
{
"start": 47627,
"end": 48187
}
|
class ____(CartesianComplexRegion, metaclass=Singleton):
"""
The :class:`Set` of all complex numbers
Examples
========
>>> from sympy import S, I
>>> S.Complexes
Complexes
>>> 1 + I in S.Complexes
True
See also
========
Reals
ComplexRegion
"""
is_empty = False
is_finite_set = False
# Override property from superclass since Complexes has no args
@property
def sets(self):
return ProductSet(S.Reals, S.Reals)
def __new__(cls):
return Set.__new__(cls)
|
Complexes
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/self2.py
|
{
"start": 1165,
"end": 1518
}
|
class ____:
def set_scale(self: Self, scale: float) -> Self:
self.scale = scale
return self
@classmethod
def from_config(cls: type[Self], config: dict[str, float]) -> Self:
return cls()
def difference(self: Self, other: Self) -> float: ...
def apply(self: Self, f: Callable[[Self], None]) -> None: ...
|
Shape2
|
python
|
scipy__scipy
|
scipy/signal/tests/test_ltisys.py
|
{
"start": 1334,
"end": 10695
}
|
class ____:
def _check(self, A, B, P, **kwargs):
"""
Perform the most common tests on the poles computed by place_poles
and return the Bunch object for further specific tests
"""
fsf = place_poles(A, B, P, **kwargs)
expected, _ = np.linalg.eig(A - np.dot(B, fsf.gain_matrix))
_assert_poles_close(expected, fsf.requested_poles)
_assert_poles_close(expected, fsf.computed_poles)
_assert_poles_close(P,fsf.requested_poles)
return fsf
def test_real(self):
# Test real pole placement using KNV and YT0 algorithm and example 1 in
# section 4 of the reference publication (see place_poles docstring)
A = np.array([1.380, -0.2077, 6.715, -5.676, -0.5814, -4.290, 0,
0.6750, 1.067, 4.273, -6.654, 5.893, 0.0480, 4.273,
1.343, -2.104]).reshape(4, 4)
B = np.array([0, 5.679, 1.136, 1.136, 0, 0, -3.146,0]).reshape(4, 2)
P = np.array([-0.2, -0.5, -5.0566, -8.6659])
# Check that both KNV and YT compute correct K matrix
self._check(A, B, P, method='KNV0')
self._check(A, B, P, method='YT')
# Try to reach the specific case in _YT_real where two singular
# values are almost equal. This is to improve code coverage but I
# have no way to be sure this code is really reached
# on some architectures this can lead to a RuntimeWarning invalid
# value in divide (see gh-7590), so suppress it for now
with np.errstate(invalid='ignore'):
self._check(A, B, (2,2,3,3))
def test_complex(self):
# Test complex pole placement on a linearized car model, taken from L.
# Jaulin, Automatique pour la robotique, Cours et Exercices, iSTE
# editions p 184/185
A = np.array([[0, 7, 0, 0],
[0, 0, 0, 7/3.],
[0, 0, 0, 0],
[0, 0, 0, 0]])
B = np.array([[0, 0],
[0, 0],
[1, 0],
[0, 1]])
# Test complex poles on YT
P = np.array([-3, -1, -2-1j, -2+1j])
# on macOS arm64 this can lead to a RuntimeWarning invalid
# value in divide, so suppress it for now
with np.errstate(divide='ignore', invalid='ignore'):
self._check(A, B, P)
# Try to reach the specific case in _YT_complex where two singular
# values are almost equal. This is to improve code coverage but I
# have no way to be sure this code is really reached
P = [0-1e-6j,0+1e-6j,-10,10]
with np.errstate(divide='ignore', invalid='ignore'):
self._check(A, B, P, maxiter=1000)
# Try to reach the specific case in _YT_complex where the rank two
# update yields two null vectors. This test was found via Monte Carlo.
A = np.array(
[-2148,-2902, -2267, -598, -1722, -1829, -165, -283, -2546,
-167, -754, -2285, -543, -1700, -584, -2978, -925, -1300,
-1583, -984, -386, -2650, -764, -897, -517, -1598, 2, -1709,
-291, -338, -153, -1804, -1106, -1168, -867, -2297]
).reshape(6,6)
B = np.array(
[-108, -374, -524, -1285, -1232, -161, -1204, -672, -637,
-15, -483, -23, -931, -780, -1245, -1129, -1290, -1502,
-952, -1374, -62, -964, -930, -939, -792, -756, -1437,
-491, -1543, -686]
).reshape(6,5)
P = [-25.-29.j, -25.+29.j, 31.-42.j, 31.+42.j, 33.-41.j, 33.+41.j]
self._check(A, B, P)
# Use a lot of poles to go through all cases for update_order
# in _YT_loop
big_A = np.ones((11,11))-np.eye(11)
big_B = np.ones((11,10))-np.diag([1]*10,1)[:,1:]
big_A[:6,:6] = A
big_B[:6,:5] = B
P = [-10,-20,-30,40,50,60,70,-20-5j,-20+5j,5+3j,5-3j]
with np.errstate(divide='ignore', invalid='ignore'):
self._check(big_A, big_B, P)
#check with only complex poles and only real poles
P = [-10,-20,-30,-40,-50,-60,-70,-80,-90,-100]
self._check(big_A[:-1,:-1], big_B[:-1,:-1], P)
P = [-10+10j,-20+20j,-30+30j,-40+40j,-50+50j,
-10-10j,-20-20j,-30-30j,-40-40j,-50-50j]
self._check(big_A[:-1,:-1], big_B[:-1,:-1], P)
# need a 5x5 array to ensure YT handles properly when there
# is only one real pole and several complex
A = np.array([0,7,0,0,0,0,0,7/3.,0,0,0,0,0,0,0,0,
0,0,0,5,0,0,0,0,9]).reshape(5,5)
B = np.array([0,0,0,0,1,0,0,1,2,3]).reshape(5,2)
P = np.array([-2, -3+1j, -3-1j, -1+1j, -1-1j])
with np.errstate(divide='ignore', invalid='ignore'):
place_poles(A, B, P)
# same test with an odd number of real poles > 1
# this is another specific case of YT
P = np.array([-2, -3, -4, -1+1j, -1-1j])
with np.errstate(divide='ignore', invalid='ignore'):
self._check(A, B, P)
def test_tricky_B(self):
# check we handle as we should the 1 column B matrices and
# n column B matrices (with n such as shape(A)=(n, n))
A = np.array([1.380, -0.2077, 6.715, -5.676, -0.5814, -4.290, 0,
0.6750, 1.067, 4.273, -6.654, 5.893, 0.0480, 4.273,
1.343, -2.104]).reshape(4, 4)
B = np.array([0, 5.679, 1.136, 1.136, 0, 0, -3.146, 0, 1, 2, 3, 4,
5, 6, 7, 8]).reshape(4, 4)
# KNV or YT are not called here, it's a specific case with only
# one unique solution
P = np.array([-0.2, -0.5, -5.0566, -8.6659])
fsf = self._check(A, B, P)
# rtol and nb_iter should be set to np.nan as the identity can be
# used as transfer matrix
assert np.isnan(fsf.rtol)
assert np.isnan(fsf.nb_iter)
# check with complex poles too as they trigger a specific case in
# the specific case :-)
P = np.array((-2+1j,-2-1j,-3,-2))
fsf = self._check(A, B, P)
assert np.isnan(fsf.rtol)
assert np.isnan(fsf.nb_iter)
#now test with a B matrix with only one column (no optimisation)
B = B[:,0].reshape(4,1)
P = np.array((-2+1j,-2-1j,-3,-2))
fsf = self._check(A, B, P)
# we can't optimize anything, check they are set to 0 as expected
assert fsf.rtol == 0
assert fsf.nb_iter == 0
def test_errors(self):
# Test input mistakes from user
A = np.array([0,7,0,0,0,0,0,7/3.,0,0,0,0,0,0,0,0]).reshape(4,4)
B = np.array([0,0,0,0,1,0,0,1]).reshape(4,2)
#should fail as the method keyword is invalid
assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4),
method="foo")
#should fail as poles are not 1D array
assert_raises(ValueError, place_poles, A, B,
np.array((-2.1,-2.2,-2.3,-2.4)).reshape(4,1))
#should fail as A is not a 2D array
assert_raises(ValueError, place_poles, A[:,:,np.newaxis], B,
(-2.1,-2.2,-2.3,-2.4))
#should fail as B is not a 2D array
assert_raises(ValueError, place_poles, A, B[:,:,np.newaxis],
(-2.1,-2.2,-2.3,-2.4))
#should fail as there are too many poles
assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4,-3))
#should fail as there are not enough poles
assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3))
#should fail as the rtol is greater than 1
assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4),
rtol=42)
#should fail as maxiter is smaller than 1
assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4),
maxiter=-42)
# should fail as ndim(B) is two
assert_raises(ValueError, place_poles, A, B, (-2,-2,-2,-2))
# uncontrollable system
assert_raises(ValueError, place_poles, np.ones((4,4)),
np.ones((4,2)), (1,2,3,4))
# Should not raise ValueError as the poles can be placed but should
# raise a warning as the convergence is not reached
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
fsf = place_poles(A, B, (-1,-2,-3,-4), rtol=1e-16, maxiter=42)
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert ("Convergence was not reached after maxiter iterations"
in str(w[-1].message))
assert fsf.nb_iter == 42
# should fail as a complex misses its conjugate
assert_raises(ValueError, place_poles, A, B, (-2+1j,-2-1j,-2+3j,-2))
# should fail as A is not square
assert_raises(ValueError, place_poles, A[:,:3], B, (-2,-3,-4,-5))
# should fail as B has not the same number of lines as A
assert_raises(ValueError, place_poles, A, B[:3,:], (-2,-3,-4,-5))
# should fail as KNV0 does not support complex poles
assert_raises(ValueError, place_poles, A, B,
(-2+1j,-2-1j,-2+3j,-2-3j), method="KNV0")
|
TestPlacePoles
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_base_insight_streams.py
|
{
"start": 1741,
"end": 30635
}
|
class ____:
def test_init(self, api, some_config):
stream = AdsInsights(
api=api,
account_ids=some_config["account_ids"],
start_date=datetime(2010, 1, 1),
end_date=datetime(2011, 1, 1),
insights_lookback_window=28,
)
assert not stream.breakdowns
assert stream.action_breakdowns == [
"action_type",
"action_target_id",
"action_destination",
]
assert stream.name == "ads_insights"
assert stream.primary_key == ["date_start", "account_id", "ad_id"]
def test_init_override(self, api, some_config):
stream = AdsInsights(
api=api,
account_ids=some_config["account_ids"],
start_date=datetime(2010, 1, 1),
end_date=datetime(2011, 1, 1),
name="CustomName",
breakdowns=["test1", "test2"],
action_breakdowns=["field1", "field2"],
insights_lookback_window=28,
)
assert stream.breakdowns == ["test1", "test2"]
assert stream.action_breakdowns == ["field1", "field2"]
assert stream.name == "custom_name"
assert stream.primary_key == [
"date_start",
"account_id",
"ad_id",
"test1",
"test2",
]
def test_init_statuses(self, api, some_config):
stream = AdsInsights(
api=api,
account_ids=some_config["account_ids"],
start_date=datetime(2010, 1, 1),
end_date=datetime(2011, 1, 1),
insights_lookback_window=28,
fields=["account_id", "account_currency"],
filter_statuses=["ACTIVE", "ARCHIVED"],
)
assert stream.request_params()["filtering"] == [{"field": "ad.effective_status", "operator": "IN", "value": ["ACTIVE", "ARCHIVED"]}]
def test_read_records_all(self, mocker, api, some_config):
"""1. yield all from mock
2. if read slice 2, 3 state not changed
if read slice 2, 3, 1 state changed to 3
"""
job = mocker.Mock(spec=InsightAsyncJob)
rec = mocker.Mock()
rec.export_all_data.return_value = {}
job.get_result.return_value = [rec, rec, rec]
job.interval = DateInterval(date(2010, 1, 1), date(2010, 1, 1))
stream = AdsInsights(
api=api,
account_ids=some_config["account_ids"],
start_date=datetime(2010, 1, 1),
end_date=datetime(2011, 1, 1),
insights_lookback_window=28,
)
records = list(
stream.read_records(
sync_mode=SyncMode.incremental,
stream_slice={
"insight_job": job,
"account_id": some_config["account_ids"][0],
},
)
)
assert len(records) == 3
def test_read_records_random_order(self, mocker, api, some_config):
"""1. yield all from mock
2. if read slice 2, 3 state not changed
if read slice 2, 3, 1 state changed to 3
"""
rec = mocker.Mock()
rec.export_all_data.return_value = {}
job = mocker.Mock(spec=AsyncJob)
job.get_result.return_value = [rec, rec, rec]
job.interval = DateInterval(date(2010, 1, 1), date(2010, 1, 1))
stream = AdsInsights(
api=api,
account_ids=some_config["account_ids"],
start_date=datetime(2010, 1, 1),
end_date=datetime(2011, 1, 1),
insights_lookback_window=28,
)
records = list(
stream.read_records(
sync_mode=SyncMode.incremental,
stream_slice={
"insight_job": job,
"account_id": some_config["account_ids"][0],
},
)
)
assert len(records) == 3
def test_read_records_add_account_id(self, mocker, api, some_config):
rec_without_account = mocker.Mock()
rec_without_account.export_all_data.return_value = {}
rec_with_account = mocker.Mock()
rec_with_account.export_all_data.return_value = {"account_id": "some_account_id"}
job = mocker.Mock(spec=AsyncJob)
job.get_result.return_value = [rec_without_account, rec_with_account]
job.interval = DateInterval(date(2010, 1, 1), date(2010, 1, 1))
stream = AdsInsights(
api=api,
account_ids=some_config["account_ids"],
start_date=datetime(2010, 1, 1),
end_date=datetime(2011, 1, 1),
insights_lookback_window=28,
)
records = list(
stream.read_records(
sync_mode=SyncMode.incremental,
stream_slice={
"insight_job": job,
"account_id": some_config["account_ids"][0],
},
)
)
assert len(records) == 2
for record in records:
assert record.get("account_id")
@pytest.mark.parametrize(
"state,result_state",
[
# Old format
(
{
AdsInsights.cursor_field: "2010-10-03",
"slices": [
"2010-01-01",
"2010-01-02",
],
"time_increment": 1,
},
{
"unknown_account": {
AdsInsights.cursor_field: "2010-10-03",
"slices": [
"2010-01-01",
"2010-01-02",
],
},
"time_increment": 1,
},
),
(
{
AdsInsights.cursor_field: "2010-10-03",
},
{
"unknown_account": {
AdsInsights.cursor_field: "2010-10-03",
}
},
),
(
{
"slices": [
"2010-01-01",
"2010-01-02",
]
},
{
"unknown_account": {
"slices": [
"2010-01-01",
"2010-01-02",
]
}
},
),
# New format - nested with account_id
(
{
"unknown_account": {
AdsInsights.cursor_field: "2010-10-03",
"slices": [
"2010-01-01",
"2010-01-02",
],
},
"time_increment": 1,
},
None,
),
(
{
"unknown_account": {
AdsInsights.cursor_field: "2010-10-03",
}
},
None,
),
(
{
"unknown_account": {
"slices": [
"2010-01-01",
"2010-01-02",
]
}
},
None,
),
],
)
def test_state(self, api, state, result_state, some_config):
"""State setter/getter should work with all combinations"""
stream = AdsInsights(
api=api,
account_ids=some_config["account_ids"],
start_date=datetime(2010, 1, 1),
end_date=datetime(2011, 1, 1),
insights_lookback_window=28,
)
assert stream.state == {
"time_increment": 1,
"unknown_account": {"slices": []},
}
stream.state = state
actual_state = stream.state
result_state = state if not result_state else result_state
result_state[some_config["account_ids"][0]]["slices"] = result_state[some_config["account_ids"][0]].get("slices", [])
result_state["time_increment"] = 1
assert actual_state == result_state
def test_stream_slices_no_state(self, api, async_manager_mock, start_date, some_config):
"""Stream will use start_date when there is not state"""
end_date = start_date + timedelta(weeks=2)
stream = AdsInsights(
api=api,
account_ids=some_config["account_ids"],
start_date=start_date,
end_date=end_date,
insights_lookback_window=28,
)
async_manager_mock.completed_jobs.return_value = [1, 2, 3]
slices = list(stream.stream_slices(stream_state=None, sync_mode=SyncMode.incremental))
assert slices == [
{"account_id": "unknown_account", "insight_job": 1},
{"account_id": "unknown_account", "insight_job": 2},
{"account_id": "unknown_account", "insight_job": 3},
]
async_manager_mock.assert_called_once()
args, kwargs = async_manager_mock.call_args
generated_jobs = list(kwargs["jobs"])
assert len(generated_jobs) == (end_date - start_date).days + 1
assert generated_jobs[0].interval.start == start_date.date()
assert generated_jobs[1].interval.start == start_date.date() + timedelta(days=1)
def test_stream_slices_no_state_close_to_now(self, api, async_manager_mock, recent_start_date, some_config):
"""Stream will use start_date when there is not state and start_date within 28d from now"""
start_date = recent_start_date
end_date = ab_datetime_now()
stream = AdsInsights(
api=api,
account_ids=some_config["account_ids"],
start_date=start_date,
end_date=end_date,
insights_lookback_window=28,
)
async_manager_mock.completed_jobs.return_value = [1, 2, 3]
slices = list(stream.stream_slices(stream_state=None, sync_mode=SyncMode.incremental))
assert slices == [
{"account_id": "unknown_account", "insight_job": 1},
{"account_id": "unknown_account", "insight_job": 2},
{"account_id": "unknown_account", "insight_job": 3},
]
async_manager_mock.assert_called_once()
args, kwargs = async_manager_mock.call_args
generated_jobs = list(kwargs["jobs"])
assert len(generated_jobs) == (end_date - start_date).days + 1
assert generated_jobs[0].interval.start == start_date.date()
assert generated_jobs[1].interval.start == start_date.date() + timedelta(days=1)
def test_stream_slices_with_state(self, api, async_manager_mock, start_date, some_config):
"""Stream will use cursor_value from state when there is state"""
end_date = start_date + timedelta(days=10)
cursor_value = start_date + timedelta(days=5)
state = {AdsInsights.cursor_field: cursor_value.date().isoformat()}
stream = AdsInsights(
api=api,
account_ids=some_config["account_ids"],
start_date=start_date,
end_date=end_date,
insights_lookback_window=28,
)
async_manager_mock.completed_jobs.return_value = [1, 2, 3]
slices = list(stream.stream_slices(stream_state=state, sync_mode=SyncMode.incremental))
assert slices == [
{"account_id": "unknown_account", "insight_job": 1},
{"account_id": "unknown_account", "insight_job": 2},
{"account_id": "unknown_account", "insight_job": 3},
]
async_manager_mock.assert_called_once()
args, kwargs = async_manager_mock.call_args
generated_jobs = list(kwargs["jobs"])
# assert that we sync all periods including insight_lookback_period
assert len(generated_jobs) == (end_date.date() - start_date.date()).days + 1
assert generated_jobs[0].interval.start == start_date.date()
assert generated_jobs[1].interval.start == start_date.date() + timedelta(days=1)
def test_stream_slices_with_state_close_to_now(self, api, async_manager_mock, recent_start_date, some_config):
"""Stream will use start_date when close to now and start_date close to now"""
start_date = recent_start_date
end_date = ab_datetime_now()
cursor_value = end_date - timedelta(days=1)
state = {AdsInsights.cursor_field: cursor_value.date().isoformat()}
stream = AdsInsights(
api=api,
account_ids=some_config["account_ids"],
start_date=start_date,
end_date=end_date,
insights_lookback_window=28,
)
async_manager_mock.completed_jobs.return_value = [1, 2, 3]
slices = list(stream.stream_slices(stream_state=state, sync_mode=SyncMode.incremental))
assert slices == [
{"account_id": "unknown_account", "insight_job": 1},
{"account_id": "unknown_account", "insight_job": 2},
{"account_id": "unknown_account", "insight_job": 3},
]
async_manager_mock.assert_called_once()
args, kwargs = async_manager_mock.call_args
generated_jobs = list(kwargs["jobs"])
assert len(generated_jobs) == (end_date.date() - start_date.date()).days + 1
assert generated_jobs[0].interval.start == start_date.date()
assert generated_jobs[1].interval.start == start_date.date() + timedelta(days=1)
@pytest.mark.parametrize("state_format", ["old_format", "new_format"])
def test_stream_slices_with_state_and_slices(self, api, async_manager_mock, start_date, some_config, state_format):
"""Stream will use cursor_value from state, but will skip saved slices"""
end_date = start_date + timedelta(days=40)
cursor_value = start_date + timedelta(days=32)
if state_format == "old_format":
state = {
AdsInsights.cursor_field: cursor_value.date().isoformat(),
"slices": [
(cursor_value + timedelta(days=1)).date().isoformat(),
(cursor_value + timedelta(days=3)).date().isoformat(),
],
}
else:
state = {
"unknown_account": {
AdsInsights.cursor_field: cursor_value.date().isoformat(),
"slices": [
(cursor_value + timedelta(days=1)).date().isoformat(),
(cursor_value + timedelta(days=3)).date().isoformat(),
],
}
}
stream = AdsInsights(
api=api,
account_ids=some_config["account_ids"],
start_date=start_date,
end_date=end_date,
insights_lookback_window=28,
)
async_manager_mock.completed_jobs.return_value = [1, 2, 3]
slices = list(stream.stream_slices(stream_state=state, sync_mode=SyncMode.incremental))
assert slices == [
{"account_id": "unknown_account", "insight_job": 1},
{"account_id": "unknown_account", "insight_job": 2},
{"account_id": "unknown_account", "insight_job": 3},
]
async_manager_mock.assert_called_once()
args, kwargs = async_manager_mock.call_args
generated_jobs = list(kwargs["jobs"])
assert (
len(generated_jobs) == (end_date.date() - (cursor_value.date() - stream.insights_lookback_period)).days + 1
), "should be 37 slices because we ignore slices which are within insights_lookback_period"
assert generated_jobs[0].interval.start == cursor_value.date() - stream.insights_lookback_period
assert generated_jobs[1].interval.start == cursor_value.date() - stream.insights_lookback_period + timedelta(days=1)
def test_get_json_schema(self, api, some_config):
stream = AdsInsights(
api=api,
account_ids=some_config["account_ids"],
start_date=datetime(2010, 1, 1),
end_date=datetime(2011, 1, 1),
insights_lookback_window=28,
)
schema = stream.get_json_schema()
assert "device_platform" not in schema["properties"]
assert "country" not in schema["properties"]
assert not (set(stream.fields()) - set(schema["properties"].keys())), "all fields present in schema"
def test_get_json_schema_custom(self, api, some_config):
stream = AdsInsights(
api=api,
account_ids=some_config["account_ids"],
start_date=datetime(2010, 1, 1),
end_date=datetime(2011, 1, 1),
breakdowns=["device_platform", "country"],
insights_lookback_window=28,
)
schema = stream.get_json_schema()
assert "device_platform" in schema["properties"]
assert "country" in schema["properties"]
assert not (set(stream.fields()) - set(schema["properties"].keys())), "all fields present in schema"
def test_fields(self, api, some_config):
stream = AdsInsights(
api=api,
account_ids=some_config["account_ids"],
start_date=datetime(2010, 1, 1),
end_date=datetime(2011, 1, 1),
insights_lookback_window=28,
)
fields = stream.fields()
assert "account_id" in fields
assert "account_currency" in fields
assert "actions" in fields
def test_fields_custom(self, api, some_config):
stream = AdsInsights(
api=api,
account_ids=some_config["account_ids"],
start_date=datetime(2010, 1, 1),
end_date=datetime(2011, 1, 1),
fields=["account_id", "account_currency"],
insights_lookback_window=28,
)
assert stream.fields() == ["account_id", "account_currency"]
schema = stream.get_json_schema()
assert schema["properties"].keys() == set(
[
"account_currency",
"account_id",
stream.cursor_field,
"date_stop",
"ad_id",
]
)
def test_level_custom(self, api, some_config):
stream = AdsInsights(
api=api,
account_ids=some_config["account_ids"],
start_date=datetime(2010, 1, 1),
end_date=datetime(2011, 1, 1),
fields=["account_id", "account_currency"],
insights_lookback_window=28,
level="adset",
)
assert stream.level == "adset"
def test_breakdowns_fields_present_in_response_data(self, api, some_config):
stream = AdsInsights(
api=api,
account_ids=some_config["account_ids"],
start_date=datetime(2010, 1, 1),
end_date=datetime(2011, 1, 1),
breakdowns=["age", "gender"],
insights_lookback_window=28,
)
data = {"age": "0-100", "gender": "male"}
assert stream._response_data_is_valid(data)
data = {"id": "0000001", "name": "Pipenpodl Absakopalis"}
assert not stream._response_data_is_valid(data)
@pytest.mark.parametrize(
"config_start_date, saved_cursor_date, expected_start_date, lookback_window",
[
("2024-01-01", "2024-02-29", "2024-02-19", 10),
("2024-01-01", "2024-02-29", "2024-02-01", 28),
("2018-01-01", "2020-02-29", "2021-02-02", 28),
],
ids=[
"with_stream_state in 37 month interval__stream_state_minus_lookback_10_expected",
"with_stream_state in 37 month interval__stream_state_minus_lookback_28_expected",
"with_stream_state NOT in 37 month interval__today_minus_37_month_expected",
],
)
@freeze_time("2024-03-01")
def test_start_date_with_lookback_window(
self, api, some_config, config_start_date: str, saved_cursor_date: str, expected_start_date: str, lookback_window: int
):
start_date = ab_datetime_parse(config_start_date)
end_date = start_date + timedelta(days=10)
state = (
{"unknown_account": {AdsInsights.cursor_field: ab_datetime_parse(saved_cursor_date).isoformat()}} if saved_cursor_date else None
)
stream = AdsInsights(
api=api,
account_ids=some_config["account_ids"],
start_date=start_date,
end_date=end_date,
insights_lookback_window=lookback_window,
)
stream.state = state
assert str(stream._get_start_date().get("unknown_account")) == expected_start_date
@pytest.mark.parametrize(
"breakdowns, record, expected_record",
(
(
[
"body_asset",
],
{"body_asset": {"id": "871246182", "text": "Some text"}},
{"body_asset": {"id": "871246182", "text": "Some text"}, "body_asset_id": "871246182"},
),
(
[
"call_to_action_asset",
],
{"call_to_action_asset": {"id": "871246182", "name": "Some name"}},
{"call_to_action_asset": {"id": "871246182", "name": "Some name"}, "call_to_action_asset_id": "871246182"},
),
(
[
"description_asset",
],
{"description_asset": {"id": "871246182", "text": "Some text"}},
{"description_asset": {"id": "871246182", "text": "Some text"}, "description_asset_id": "871246182"},
),
(
[
"image_asset",
],
{"image_asset": {"id": "871246182", "hash": "hash", "url": "url"}},
{"image_asset": {"id": "871246182", "hash": "hash", "url": "url"}, "image_asset_id": "871246182"},
),
(
[
"link_url_asset",
],
{"link_url_asset": {"id": "871246182", "website_url": "website_url"}},
{"link_url_asset": {"id": "871246182", "website_url": "website_url"}, "link_url_asset_id": "871246182"},
),
(
[
"title_asset",
],
{"title_asset": {"id": "871246182", "text": "Some text"}},
{"title_asset": {"id": "871246182", "text": "Some text"}, "title_asset_id": "871246182"},
),
(
[
"video_asset",
],
{
"video_asset": {
"id": "871246182",
"video_id": "video_id",
"url": "url",
"thumbnail_url": "thumbnail_url",
"video_name": "video_name",
}
},
{
"video_asset": {
"id": "871246182",
"video_id": "video_id",
"url": "url",
"thumbnail_url": "thumbnail_url",
"video_name": "video_name",
},
"video_asset_id": "871246182",
},
),
(
["body_asset", "country"],
{"body_asset": {"id": "871246182", "text": "Some text"}, "country": "country", "dma": "dma"},
{"body_asset": {"id": "871246182", "text": "Some text"}, "country": "country", "dma": "dma", "body_asset_id": "871246182"},
),
),
)
def test_transform_breakdowns(self, api, some_config, breakdowns, record, expected_record):
start_date = ab_datetime_parse("2024-01-01")
end_date = start_date + timedelta(days=10)
stream = AdsInsights(
api=api,
account_ids=some_config["account_ids"],
start_date=start_date,
end_date=end_date,
insights_lookback_window=1,
breakdowns=breakdowns,
)
assert stream._transform_breakdown(record) == expected_record
@pytest.mark.parametrize(
"breakdowns, expect_pks",
(
(["body_asset"], ["date_start", "account_id", "ad_id", "body_asset_id"]),
(["call_to_action_asset"], ["date_start", "account_id", "ad_id", "call_to_action_asset_id"]),
(["description_asset"], ["date_start", "account_id", "ad_id", "description_asset_id"]),
(["image_asset"], ["date_start", "account_id", "ad_id", "image_asset_id"]),
(["link_url_asset"], ["date_start", "account_id", "ad_id", "link_url_asset_id"]),
(["title_asset"], ["date_start", "account_id", "ad_id", "title_asset_id"]),
(["video_asset"], ["date_start", "account_id", "ad_id", "video_asset_id"]),
(
["video_asset", "skan_conversion_id", "place_page_id"],
["date_start", "account_id", "ad_id", "video_asset_id", "skan_conversion_id", "place_page_id"],
),
(None, ["date_start", "account_id", "ad_id"]),
),
)
def test_primary_keys(self, api, some_config, breakdowns, expect_pks):
start_date = ab_datetime_parse("2024-01-01")
end_date = start_date + timedelta(days=10)
stream = AdsInsights(
api=api,
account_ids=some_config["account_ids"],
start_date=start_date,
end_date=end_date,
insights_lookback_window=1,
breakdowns=breakdowns,
)
assert stream.primary_key == expect_pks
@pytest.mark.parametrize(
"breakdowns, expect_pks",
(
(["body_asset"], ["date_start", "account_id", "ad_id", "body_asset_id"]),
(["call_to_action_asset"], ["date_start", "account_id", "ad_id", "call_to_action_asset_id"]),
(["description_asset"], ["date_start", "account_id", "ad_id", "description_asset_id"]),
(["image_asset"], ["date_start", "account_id", "ad_id", "image_asset_id"]),
(["link_url_asset"], ["date_start", "account_id", "ad_id", "link_url_asset_id"]),
(["title_asset"], ["date_start", "account_id", "ad_id", "title_asset_id"]),
(["video_asset"], ["date_start", "account_id", "ad_id", "video_asset_id"]),
(
["video_asset", "skan_conversion_id", "place_page_id"],
["date_start", "account_id", "ad_id", "video_asset_id", "skan_conversion_id", "place_page_id"],
),
(
["video_asset", "link_url_asset", "skan_conversion_id", "place_page_id", "gender"],
[
"date_start",
"account_id",
"ad_id",
"video_asset_id",
"link_url_asset_id",
"skan_conversion_id",
"place_page_id",
"gender",
],
),
),
)
def test_object_pk_added_to_schema(self, api, some_config, breakdowns, expect_pks):
start_date = ab_datetime_parse("2024-01-01")
end_date = start_date + timedelta(days=10)
stream = AdsInsights(
api=api,
account_ids=some_config["account_ids"],
start_date=start_date,
end_date=end_date,
insights_lookback_window=1,
breakdowns=breakdowns,
)
schema = stream.get_json_schema()
assert schema
assert stream.primary_key == expect_pks
for pk in expect_pks:
assert pk in schema["properties"]
def test_all_breakdowns_have_schemas(self):
stream = AdsInsights(
api=None,
account_ids=["act_123"],
start_date=datetime.today().replace(hour=0, minute=0, second=0, microsecond=0),
end_date=datetime.today().replace(hour=0, minute=0, second=0, microsecond=0),
)
loader = ResourceSchemaLoader(package_name_from_class(stream.__class__))
breakdowns_properties = loader.get_schema("ads_insights_breakdowns")["properties"]
valid_breakdowns = [breakdown.name for breakdown in ValidBreakdowns]
# Check for missing breakdowns
missing_breakdowns = [b for b in valid_breakdowns if b not in breakdowns_properties]
assert (
not missing_breakdowns
), f"Schema file 'ads_insights_breakdowns.json' is missing definitions for breakdowns: {missing_breakdowns}"
|
TestBaseInsightsStream
|
python
|
pytorch__pytorch
|
torch/nn/modules/conv.py
|
{
"start": 75564,
"end": 78515
}
|
class ____(_LazyConvXdMixin, ConvTranspose3d): # type: ignore[misc]
r"""A :class:`torch.nn.ConvTranspose3d` module with lazy initialization of the ``in_channels`` argument.
The ``in_channels`` argument of the :class:`ConvTranspose3d` is inferred from
the ``input.size(1)``.
The attributes that will be lazily initialized are `weight` and `bias`.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.
Args:
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
will be added to both sides of each dimension in the input. Default: 0
output_padding (int or tuple, optional): Additional size added to one side
of each dimension in the output shape. Default: 0
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
.. seealso:: :class:`torch.nn.ConvTranspose3d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
"""
# super class define this variable as None. "type: ignore[..] is required
# since we are redefining the variable.
cls_to_become = ConvTranspose3d # type: ignore[assignment]
def __init__(
self,
out_channels: int,
kernel_size: _size_3_t,
stride: _size_3_t = 1,
padding: _size_3_t = 0,
output_padding: _size_3_t = 0,
groups: int = 1,
bias: bool = True,
dilation: _size_3_t = 1,
padding_mode: Literal["zeros", "reflect", "replicate", "circular"] = "zeros",
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
# pyrefly: ignore [bad-argument-type]
super().__init__(
0,
0,
kernel_size,
stride,
padding,
output_padding,
groups,
# bias is hardcoded to False to avoid creating tensor
# that will soon be overwritten.
False,
dilation,
padding_mode,
**factory_kwargs,
)
# pyrefly: ignore [bad-override, bad-argument-type]
self.weight = UninitializedParameter(**factory_kwargs)
self.out_channels = out_channels
if bias:
# pyrefly: ignore [bad-override, bad-argument-type]
self.bias = UninitializedParameter(**factory_kwargs)
def _get_num_spatial_dims(self) -> int:
return 3
|
LazyConvTranspose3d
|
python
|
getsentry__sentry
|
src/sentry/migrations/0951_delete_ds_waiver.py
|
{
"start": 240,
"end": 1492
}
|
class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("sentry", "0950_safe_del_dswaiver"),
]
operations = [
SafeDeleteModel(
name="DataSecrecyWaiver",
deletion_action=DeletionAction.DELETE,
),
]
|
Migration
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/backends/backend_webagg_core.py
|
{
"start": 4690,
"end": 14867
}
|
class ____(backend_agg.FigureCanvasAgg):
manager_class = _api.classproperty(lambda cls: FigureManagerWebAgg)
_timer_cls = TimerAsyncio
# Webagg and friends having the right methods, but still
# having bugs in practice. Do not advertise that it works until
# we can debug this.
supports_blit = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Set to True when the renderer contains data that is newer
# than the PNG buffer.
self._png_is_old = True
# Set to True by the `refresh` message so that the next frame
# sent to the clients will be a full frame.
self._force_full = True
# The last buffer, for diff mode.
self._last_buff = np.empty((0, 0))
# Store the current image mode so that at any point, clients can
# request the information. This should be changed by calling
# self.set_image_mode(mode) so that the notification can be given
# to the connected clients.
self._current_image_mode = 'full'
# Track mouse events to fill in the x, y position of key events.
self._last_mouse_xy = (None, None)
# Control whether scroll events prevent default browser behavior
self._capture_scroll = False
def show(self):
# show the figure window
from matplotlib.pyplot import show
show()
def draw(self):
self._png_is_old = True
try:
super().draw()
finally:
self.manager.refresh_all() # Swap the frames.
def blit(self, bbox=None):
self._png_is_old = True
self.manager.refresh_all()
def draw_idle(self):
self.send_event("draw")
def set_cursor(self, cursor):
# docstring inherited
cursor = _api.check_getitem({
backend_tools.Cursors.HAND: 'pointer',
backend_tools.Cursors.POINTER: 'default',
backend_tools.Cursors.SELECT_REGION: 'crosshair',
backend_tools.Cursors.MOVE: 'move',
backend_tools.Cursors.WAIT: 'wait',
backend_tools.Cursors.RESIZE_HORIZONTAL: 'ew-resize',
backend_tools.Cursors.RESIZE_VERTICAL: 'ns-resize',
}, cursor=cursor)
self.send_event('cursor', cursor=cursor)
def set_image_mode(self, mode):
"""
Set the image mode for any subsequent images which will be sent
to the clients. The modes may currently be either 'full' or 'diff'.
Note: diff images may not contain transparency, therefore upon
draw this mode may be changed if the resulting image has any
transparent component.
"""
_api.check_in_list(['full', 'diff'], mode=mode)
if self._current_image_mode != mode:
self._current_image_mode = mode
self.handle_send_image_mode(None)
def set_capture_scroll(self, capture):
"""
Set whether the scroll events on the canvas will scroll the page.
Parameters
----------
capture : bool
"""
if self._capture_scroll != capture:
self._capture_scroll = capture
self.send_event("capture_scroll", capture_scroll=capture)
def get_capture_scroll(self):
"""
Get whether scroll events are currently captured by the canvas.
Returns
-------
bool
"""
return self._capture_scroll
def get_diff_image(self):
if self._png_is_old:
renderer = self.get_renderer()
pixels = np.asarray(renderer.buffer_rgba())
# The buffer is created as type uint32 so that entire
# pixels can be compared in one numpy call, rather than
# needing to compare each plane separately.
buff = pixels.view(np.uint32).squeeze(2)
if (self._force_full
# If the buffer has changed size we need to do a full draw.
or buff.shape != self._last_buff.shape
# If any pixels have transparency, we need to force a full
# draw as we cannot overlay new on top of old.
or (pixels[:, :, 3] != 255).any()):
self.set_image_mode('full')
output = buff
else:
self.set_image_mode('diff')
diff = buff != self._last_buff
output = np.where(diff, buff, 0)
# Store the current buffer so we can compute the next diff.
self._last_buff = buff.copy()
self._force_full = False
self._png_is_old = False
data = output.view(dtype=np.uint8).reshape((*output.shape, 4))
with BytesIO() as png:
Image.fromarray(data).save(png, format="png")
return png.getvalue()
def handle_event(self, event):
e_type = event['type']
handler = getattr(self, f'handle_{e_type}',
self.handle_unknown_event)
return handler(event)
def handle_unknown_event(self, event):
_log.warning('Unhandled message type %s. %s', event["type"], event)
def handle_ack(self, event):
# Network latency tends to decrease if traffic is flowing
# in both directions. Therefore, the browser sends back
# an "ack" message after each image frame is received.
# This could also be used as a simple sanity check in the
# future, but for now the performance increase is enough
# to justify it, even if the server does nothing with it.
pass
def handle_draw(self, event):
self.draw()
def _handle_mouse(self, event):
x = event['x']
y = event['y']
y = self.get_renderer().height - y
self._last_mouse_xy = x, y
e_type = event['type']
button = event['button'] + 1 # JS numbers off by 1 compared to mpl.
buttons = { # JS ordering different compared to mpl.
button for button, mask in [
(MouseButton.LEFT, 1),
(MouseButton.RIGHT, 2),
(MouseButton.MIDDLE, 4),
(MouseButton.BACK, 8),
(MouseButton.FORWARD, 16),
] if event['buttons'] & mask # State *after* press/release.
}
modifiers = event['modifiers']
guiEvent = event.get('guiEvent')
if e_type in ['button_press', 'button_release']:
MouseEvent(e_type + '_event', self, x, y, button,
modifiers=modifiers, guiEvent=guiEvent)._process()
elif e_type == 'dblclick':
MouseEvent('button_press_event', self, x, y, button, dblclick=True,
modifiers=modifiers, guiEvent=guiEvent)._process()
elif e_type == 'scroll':
MouseEvent('scroll_event', self, x, y, step=event['step'],
modifiers=modifiers, guiEvent=guiEvent)._process()
elif e_type == 'motion_notify':
MouseEvent(e_type + '_event', self, x, y,
buttons=buttons, modifiers=modifiers, guiEvent=guiEvent,
)._process()
elif e_type in ['figure_enter', 'figure_leave']:
LocationEvent(e_type + '_event', self, x, y,
modifiers=modifiers, guiEvent=guiEvent)._process()
handle_button_press = handle_button_release = handle_dblclick = \
handle_figure_enter = handle_figure_leave = handle_motion_notify = \
handle_scroll = _handle_mouse
def _handle_key(self, event):
KeyEvent(event['type'] + '_event', self,
_handle_key(event['key']), *self._last_mouse_xy,
guiEvent=event.get('guiEvent'))._process()
handle_key_press = handle_key_release = _handle_key
def handle_toolbar_button(self, event):
# TODO: Be more suspicious of the input
getattr(self.toolbar, event['name'])()
def handle_refresh(self, event):
if self.manager:
self.send_event('figure_label', label=self.manager.get_window_title())
self._force_full = True
if self.toolbar:
# Normal toolbar init would refresh this, but it happens before the
# browser canvas is set up.
self.toolbar.set_history_buttons()
# Send the current capture_scroll state to newly connected clients
self.send_event('capture_scroll', capture_scroll=self._capture_scroll)
self.draw_idle()
def handle_resize(self, event):
x = int(event.get('width', 800)) * self.device_pixel_ratio
y = int(event.get('height', 800)) * self.device_pixel_ratio
fig = self.figure
# An attempt at approximating the figure size in pixels.
fig.set_size_inches(x / fig.dpi, y / fig.dpi, forward=False)
# Acknowledge the resize, and force the viewer to update the
# canvas size to the figure's new size (which is hopefully
# identical or within a pixel or so).
self._png_is_old = True
self.manager.resize(*fig.bbox.size, forward=False)
ResizeEvent('resize_event', self)._process()
self.draw_idle()
def handle_send_image_mode(self, event):
# The client requests notification of what the current image mode is.
self.send_event('image_mode', mode=self._current_image_mode)
def handle_set_device_pixel_ratio(self, event):
self._handle_set_device_pixel_ratio(event.get('device_pixel_ratio', 1))
def handle_set_dpi_ratio(self, event):
# This handler is for backwards-compatibility with older ipympl.
self._handle_set_device_pixel_ratio(event.get('dpi_ratio', 1))
def _handle_set_device_pixel_ratio(self, device_pixel_ratio):
if self._set_device_pixel_ratio(device_pixel_ratio):
self._force_full = True
self.draw_idle()
def send_event(self, event_type, **kwargs):
if self.manager:
self.manager._send_event(event_type, **kwargs)
_ALLOWED_TOOL_ITEMS = {
'home',
'back',
'forward',
'pan',
'zoom',
'download',
None,
}
|
FigureCanvasWebAggCore
|
python
|
facelessuser__soupsieve
|
tests/test_level4/test_open.py
|
{
"start": 49,
"end": 1484
}
|
class ____(util.TestCase):
"""Test open selectors."""
MARKUP = """
<!DOCTYPE html>
<html>
<body>
<details id="1">
<summary>This is closed.</summary<
<p>A closed details element.</p>
</details>
<details id="2" open>
<summary>This is open.</summary<
<p>An open details element.</p>
</details>
<dialog id="3" open>
<p>Greetings, one and all!</p>
<form method="dialog">
<button>OK</button>
</form>
</dialog>
<dialog id="4">
<p>Goodbye, one and all!</p>
<form method="dialog">
<button>OK</button>
</form>
</dialog>
</body>
</html>
"""
def test_open(self):
"""Test open."""
self.assert_selector(
self.MARKUP,
":open",
['2', '3'],
flags=util.HTML
)
def test_targted_open(self):
"""Test targeted open."""
self.assert_selector(
self.MARKUP,
"details:open",
['2'],
flags=util.HTML
)
self.assert_selector(
self.MARKUP,
"dialog:open",
['3'],
flags=util.HTML
)
def test_not_open(self):
"""Test not open."""
self.assert_selector(
self.MARKUP,
":is(dialog, details):not(:open)",
["1", "4"],
flags=util.HTML
)
|
TestOpen
|
python
|
huggingface__transformers
|
src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py
|
{
"start": 78669,
"end": 85055
}
|
class ____(Wav2Vec2ConformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.wav2vec2_conformer = Wav2Vec2ConformerModel(config)
num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
if config.use_weighted_layer_sum:
self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
self.projector = nn.Linear(config.hidden_size, config.tdnn_dim[0])
tdnn_layers = [TDNNLayer(config, i) for i in range(len(config.tdnn_dim))]
self.tdnn = nn.ModuleList(tdnn_layers)
self.feature_extractor = nn.Linear(config.tdnn_dim[-1] * 2, config.xvector_output_dim)
self.classifier = nn.Linear(config.xvector_output_dim, config.xvector_output_dim)
self.objective = AMSoftmaxLoss(config.xvector_output_dim, config.num_labels)
self.post_init()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.wav2vec2_conformer.feature_extractor._freeze_parameters()
def freeze_base_model(self):
"""
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
"""
for param in self.wav2vec2_conformer.parameters():
param.requires_grad = False
def _get_tdnn_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
"""
Computes the output length of the TDNN layers
"""
def _conv_out_length(input_length, kernel_size, stride):
# 1D convolutional layer output length formula taken
# from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
return (input_length - kernel_size) // stride + 1
for kernel_size in self.config.tdnn_kernel:
input_lengths = _conv_out_length(input_lengths, kernel_size, 1)
return input_lengths
@auto_docstring
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[tuple, XVectorOutput]:
r"""
input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library
(`pip install torchcodec`) or the soundfile library (`pip install soundfile`).
To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion
into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2ConformerProcessor.__call__`] for details.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
outputs = self.wav2vec2_conformer(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.config.use_weighted_layer_sum:
hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
hidden_states = torch.stack(hidden_states, dim=1)
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
else:
hidden_states = outputs[0]
hidden_states = self.projector(hidden_states)
for tdnn_layer in self.tdnn:
hidden_states = tdnn_layer(hidden_states)
# Statistic Pooling
if attention_mask is None:
mean_features = hidden_states.mean(dim=1)
std_features = hidden_states.std(dim=1)
else:
feat_extract_output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(dim=1))
tdnn_output_lengths = self._get_tdnn_output_lengths(feat_extract_output_lengths)
mean_features = []
std_features = []
for i, length in enumerate(tdnn_output_lengths):
mean_features.append(hidden_states[i, :length].mean(dim=0))
std_features.append(hidden_states[i, :length].std(dim=0))
mean_features = torch.stack(mean_features)
std_features = torch.stack(std_features)
statistic_pooling = torch.cat([mean_features, std_features], dim=-1)
output_embeddings = self.feature_extractor(statistic_pooling)
logits = self.classifier(output_embeddings)
loss = None
if labels is not None:
loss = self.objective(logits, labels)
if not return_dict:
output = (logits, output_embeddings) + outputs[_HIDDEN_STATES_START_POSITION:]
return ((loss,) + output) if loss is not None else output
return XVectorOutput(
loss=loss,
logits=logits,
embeddings=output_embeddings,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = [
"Wav2Vec2ConformerForAudioFrameClassification",
"Wav2Vec2ConformerForCTC",
"Wav2Vec2ConformerForPreTraining",
"Wav2Vec2ConformerForSequenceClassification",
"Wav2Vec2ConformerForXVector",
"Wav2Vec2ConformerModel",
"Wav2Vec2ConformerPreTrainedModel",
]
|
Wav2Vec2ConformerForXVector
|
python
|
scipy__scipy
|
scipy/signal/tests/test_windows.py
|
{
"start": 42313,
"end": 55199
}
|
class ____:
"""Unit test for `scipy.signal.get_windows`. """
def test_WIN_FUNC_DATA_integrity(self):
"""Verify that the `_windows._WIN_FUNC_DATA` dict is consistent.
The keys of _WIN_FUNC_DATA are made of tuples of strings of allowed window
names. Its values are 2-tuples made up of the window function and a
entry characterizing the existence of window parameters as ``True``,
``False`` or ``'OPTIONAL'``.
It is verified that the correct window name (i.e., corresponding to the
function in the value tuple) is included in the key tuple. It is also checked
that the second entry in the value tuple is either ``True``, ``False`` or
``'OPTIONAL'``.
"""
for nn_, v_ in _WIN_FUNC_DATA.items():
func_name = v_[0].__name__
msg = f"Function name in {nn_} does not contain name of actual function!"
assert func_name in nn_, msg
assert v_[1] in (True, False, 'OPTIONAL')
@make_xp_test_case(windows.boxcar)
def test_boxcar(self, xp):
w = windows.get_window('boxcar', 12, xp=xp)
xp_assert_equal(w, xp.ones_like(w))
# window is a tuple of len 1
w = windows.get_window(('boxcar',), 16, xp=xp)
xp_assert_equal(w, xp.ones_like(w))
@make_xp_test_case(windows.chebwin)
def test_cheb_odd(self, xp):
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "This window is not suitable", UserWarning)
w = windows.get_window(('chebwin', -40), 53, fftbins=False, xp=xp)
assert_array_almost_equal(
w, xp.asarray(cheb_odd_true, dtype=xp.float64), decimal=4
)
@make_xp_test_case(windows.chebwin)
def test_cheb_even(self, xp):
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "This window is not suitable", UserWarning)
w = windows.get_window(('chebwin', 40), 54, fftbins=False, xp=xp)
assert_array_almost_equal(w, xp.asarray(cheb_even_true), decimal=4)
@make_xp_test_case(windows.dpss)
def test_dpss(self, xp):
win1 = windows.get_window(('dpss', 3), 64, fftbins=False, xp=xp)
win2 = windows.dpss(64, 3, xp=xp)
xp_assert_equal(win1, win2)
@make_xp_test_case(windows.kaiser)
def test_kaiser_float(self, xp):
win1 = windows.get_window(7.2, 64, xp=xp)
win2 = windows.kaiser(64, 7.2, False, xp=xp)
if is_jax(xp):
# On JAX with jit enabled, there is a very small discrepancy
# in the results.
xp_assert_close(win1, win2, rtol=xp.finfo(win1.dtype).eps)
else:
xp_assert_equal(win1, win2)
@pytest.mark.parametrize('Nx', [-1, 3.0, np.float64(3)])
@make_xp_test_case(windows.hann)
def test_invalid_parameter_NX(self, Nx, xp):
with pytest.raises(ValueError, match="^Parameter Nx=.*"):
windows.get_window('hann', Nx, xp=xp)
# noinspection PyTypeChecker
def test_invalid_inputs(self, xp):
"""Raise all exceptions (except those concerning parameter `Nx`). """
with pytest.raises(ValueError, match="^Parameter fftbins=.*"):
windows.get_window('hann', 5, fftbins=1, xp=xp)
with pytest.raises(ValueError, match="^Parameter window=.*"):
windows.get_window(['hann',], 5, xp=xp)
with pytest.raises(ValueError, match="^First tuple entry of parameter win.*"):
windows.get_window((42,), 5, xp=xp)
with pytest.raises(ValueError, match="^Invalid window name 'INVALID'.*"):
windows.get_window('INVALID', 5, xp=xp)
with pytest.raises(ValueError, match="^'hann' does not allow parameters.*"):
windows.get_window(('hann', 1), 5, xp=xp)
with pytest.raises(ValueError, match="^'kaiser' must have parameters.*"):
windows.get_window('kaiser', 5, xp=xp)
with pytest.raises(ValueError, match="^Window dpss must have one.*"):
windows.get_window(('dpss', 1, 2), 5, xp=xp)
with pytest.raises(ValueError, match="^'general_cosine' does not accept.*"):
xp_ = xp or np # ensure parameter xp_ is not None
windows.get_window(('general cosine', [1, 2]), 5, xp=xp_)
@make_xp_test_case(windows.bartlett)
def test_symmetric_periodic(self, xp):
"""Ensure that suffixes `_periodic` and `_symmetric` work for window names. """
w_sym = windows.bartlett(5, sym=True, xp=xp)
xp_assert_close(get_window('bartlett', 5, fftbins=False, xp=xp), w_sym)
xp_assert_close(get_window('bartlett_symmetric', 5, xp=xp), w_sym)
# overwrite parameter `fftbins`:
xp_assert_close(get_window('bartlett_symmetric', 5, fftbins=True, xp=xp), w_sym)
w_per = windows.bartlett(5, sym=False, xp=xp)
xp_assert_close(get_window('bartlett', 5, xp=xp), w_per)
xp_assert_close(get_window('bartlett', 5, fftbins=True, xp=xp), w_per)
xp_assert_close(get_window('bartlett_periodic', 5, xp=xp), w_per)
# overwrite parameter `fftbins`:
xp_assert_close(get_window('bartlett_periodic', 5, fftbins=False, xp=xp),
w_per)
@make_xp_test_case(windows.kaiser)
def test_array_as_window(self, xp):
# github issue 3603
osfactor = 128
sig = xp.arange(128)
win = windows.get_window(('kaiser', 8.0), osfactor // 2, xp=xp)
mesg = "^window must" if is_cupy(xp) else "^window.shape="
with assert_raises(ValueError, match=mesg):
resample(sig, sig.shape[0] * osfactor, window=win)
@make_xp_test_case(windows.general_cosine)
def test_general_cosine(self, xp):
xp_assert_close(get_window(('general_cosine', xp.asarray([0.5, 0.3, 0.2])), 4),
xp.asarray([0.4, 0.3, 1, 0.3], dtype=xp.float64))
xp_assert_close(get_window(('general_cosine', xp.asarray([0.5, 0.3, 0.2])), 4,
fftbins=False),
xp.asarray([0.4, 0.55, 0.55, 0.4], dtype=xp.float64))
with pytest.raises(ValueError):
get_window(('general_cosine', [0.5, 0.3, 0.2]), 4, xp=xp)
@make_xp_test_case(windows.general_hamming)
def test_general_hamming(self, xp):
xp_assert_close(get_window(('general_hamming', 0.7), 5, xp=xp),
xp.asarray([0.4, 0.6072949, 0.9427051, 0.9427051, 0.6072949],
dtype=xp.float64))
xp_assert_close(get_window(('general_hamming', 0.7), 5, fftbins=False, xp=xp),
xp.asarray([0.4, 0.7, 1.0, 0.7, 0.4], dtype=xp.float64))
@make_xp_test_case(windows.lanczos)
def test_lanczos(self, xp):
xp_assert_close(get_window('lanczos', 6, xp=xp),
xp.asarray([0., 0.413496672, 0.826993343, 1., 0.826993343,
0.413496672], dtype=xp.float64), atol=1e-9)
xp_assert_close(get_window('lanczos', 6, fftbins=False, xp=xp),
xp.asarray([0., 0.504551152, 0.935489284, 0.935489284,
0.504551152, 0.], dtype=xp.float64), atol=1e-9)
xp_assert_close(get_window('lanczos', 6, xp=xp),
get_window('sinc', 6, xp=xp))
def test_xp_default(self, xp):
# no explicit xp= argument, default to numpy
win = get_window('lanczos', 6)
assert isinstance(win, np.ndarray)
win = get_window('lanczos', 6, xp=xp)
if not is_numpy(xp):
assert not isinstance(win, np.ndarray)
@skip_xp_backends("dask.array", reason="https://github.com/dask/dask/issues/2620")
@pytest.mark.parametrize(
"window,window_name,params",
[
make_xp_pytest_param(getattr(windows, window_name), window_name, params)
for window_name, params in window_funcs
]
)
def test_windowfunc_basics(window, window_name, params, xp):
window = getattr(windows, window_name)
if is_jax(xp) and window_name in ['taylor', 'chebwin']:
pytest.skip(reason=f'{window_name = }: item assignment')
if window_name in ['dpss']:
if is_cupy(xp):
pytest.skip(reason='dpss window is not implemented for cupy')
if is_torch(xp) and SCIPY_DEVICE != 'cpu':
pytest.skip(reason='needs eight_tridiagonal which is CPU only')
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "This window is not suitable", UserWarning)
# Check symmetry for odd and even lengths
w1 = window(8, *params, sym=True, xp=xp)
w2 = window(7, *params, sym=False, xp=xp)
xp_assert_close(w1[:-1], w2)
w1 = window(9, *params, sym=True, xp=xp)
w2 = window(8, *params, sym=False, xp=xp)
xp_assert_close(w1[:-1], w2)
# Check that functions run and output lengths are correct
assert window(6, *params, sym=True, xp=xp).shape[0] == 6
assert window(6, *params, sym=False, xp=xp).shape[0] == 6
assert window(7, *params, sym=True, xp=xp).shape[0] == 7
assert window(7, *params, sym=False, xp=xp).shape[0] == 7
# Check invalid lengths
assert_raises(ValueError, window, 5.5, *params, xp=xp)
assert_raises(ValueError, window, -7, *params, xp=xp)
# Check degenerate cases
xp_assert_equal(window(0, *params, sym=True, xp=xp),
xp.asarray([], dtype=xp.float64))
xp_assert_equal(window(0, *params, sym=False, xp=xp),
xp.asarray([], dtype=xp.float64))
xp_assert_equal(window(1, *params, sym=True, xp=xp),
xp.asarray([1.], dtype=xp.float64))
xp_assert_equal(window(1, *params, sym=False, xp=xp),
xp.asarray([1.], dtype=xp.float64))
# Check dtype
assert window(0, *params, sym=True, xp=xp).dtype == xp.float64
assert window(0, *params, sym=False, xp=xp).dtype == xp.float64
assert window(1, *params, sym=True, xp=xp).dtype == xp.float64
assert window(1, *params, sym=False, xp=xp).dtype == xp.float64
assert window(6, *params, sym=True, xp=xp).dtype == xp.float64
assert window(6, *params, sym=False, xp=xp).dtype == xp.float64
# Check normalization
assert xp.all(window(10, *params, sym=True, xp=xp) < 1.01)
assert xp.all(window(10, *params, sym=False, xp=xp) < 1.01)
assert xp.all(window(9, *params, sym=True, xp=xp) < 1.01)
assert xp.all(window(9, *params, sym=False, xp=xp) < 1.01)
# Check that DFT-even spectrum is purely real for odd and even
res = fft(window(10, *params, sym=False, xp=xp))
res = xp.imag(res)
xp_assert_close(res, xp.zeros_like(res), atol=1e-14)
res = fft(window(11, *params, sym=False, xp=xp))
res = xp.imag(res)
xp_assert_close(res, xp.zeros_like(res), atol=1e-14)
@make_xp_test_case(get_window)
def test_needs_params(xp):
for winstr in ['kaiser', 'ksr', 'kaiser_bessel_derived', 'kbd',
'gaussian', 'gauss', 'gss',
'general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs',
'dss', 'dpss', 'general cosine', 'general_cosine',
'chebwin', 'cheb', 'general hamming', 'general_hamming',
]:
assert_raises(ValueError, get_window, winstr, 7, xp=xp)
_winstr = ['barthann',
'bartlett',
'blackman',
'blackmanharris',
'bohman',
'boxcar',
'cosine',
'flattop',
'hamming',
'nuttall',
'parzen',
'taylor',
'exponential',
'poisson',
'tukey',
'tuk',
'triangle',
'lanczos',
'sinc',
]
@pytest.mark.parametrize(
'window,winstr',
[
make_xp_pytest_param(_WIN_FUNCS[winstr][0], winstr)
for winstr in _winstr
]
)
@make_xp_test_case(get_window)
def test_not_needs_params(xp, window, winstr):
if is_jax(xp) and winstr in ['taylor']:
pytest.skip(reason=f'{winstr}: item assignment')
win = get_window(winstr, 7, xp=xp)
assert win.shape[0] == 7
@make_xp_test_case(windows.lanczos)
def test_symmetric(xp):
for win in [windows.lanczos]:
# Even sampling points
w = win(4096, xp=xp)
flip = array_namespace(w).flip
error = xp.max(xp.abs(w - flip(w)))
xp_assert_equal(error, xp.asarray(0.0), check_dtype=False, check_0d=False)
# Odd sampling points
w = win(4097, xp=xp)
error = xp.max(xp.abs(w - flip(w)))
xp_assert_equal(error, xp.asarray(0.0), check_dtype=False, check_0d=False)
|
TestGetWindow
|
python
|
apache__airflow
|
airflow-core/src/airflow/jobs/triggerer_job_runner.py
|
{
"start": 28230,
"end": 30270
}
|
class ____(CommsDecoder[ToTriggerRunner, ToTriggerSupervisor]):
_async_writer: asyncio.StreamWriter = attrs.field(alias="async_writer")
_async_reader: asyncio.StreamReader = attrs.field(alias="async_reader")
body_decoder: TypeAdapter[ToTriggerRunner] = attrs.field(
factory=lambda: TypeAdapter(ToTriggerRunner), repr=False
)
_lock: asyncio.Lock = attrs.field(factory=asyncio.Lock, repr=False)
def _read_frame(self):
from asgiref.sync import async_to_sync
return async_to_sync(self._aread_frame)()
def send(self, msg: ToTriggerSupervisor) -> ToTriggerRunner | None:
from asgiref.sync import async_to_sync
return async_to_sync(self.asend)(msg)
async def _aread_frame(self):
try:
len_bytes = await self._async_reader.readexactly(4)
except ConnectionResetError:
asyncio.current_task().cancel("Supervisor closed")
length = int.from_bytes(len_bytes, byteorder="big")
if length >= 2**32:
raise OverflowError(f"Refusing to receive messages larger than 4GiB {length=}")
buffer = await self._async_reader.readexactly(length)
return self.resp_decoder.decode(buffer)
async def _aget_response(self, expect_id: int) -> ToTriggerRunner | None:
frame = await self._aread_frame()
if frame.id != expect_id:
# Given the lock we take out in `asend`, this _shouldn't_ be possible, but I'd rather fail with
# this explicit error return the wrong type of message back to a Trigger
raise RuntimeError(f"Response read out of order! Got {frame.id=}, {expect_id=}")
return self._from_frame(frame)
async def asend(self, msg: ToTriggerSupervisor) -> ToTriggerRunner | None:
frame = _RequestFrame(id=next(self.id_counter), body=msg.model_dump())
bytes = frame.as_bytes()
async with self._lock:
self._async_writer.write(bytes)
return await self._aget_response(frame.id)
|
TriggerCommsDecoder
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/links/dataplex.py
|
{
"start": 4075,
"end": 4347
}
|
class ____(BaseGoogleLink):
"""Helper class for constructing Dataplex Catalog AspectTypes link."""
name = "Dataplex Catalog AspectTypes"
key = "dataplex_catalog_aspect_types_key"
format_str = DATAPLEX_CATALOG_ASPECT_TYPES_LINK
|
DataplexCatalogAspectTypesLink
|
python
|
pallets__itsdangerous
|
src/itsdangerous/serializer.py
|
{
"start": 1339,
"end": 15563
}
|
class ____(t.Generic[_TSerialized]):
"""A serializer wraps a :class:`~itsdangerous.signer.Signer` to
enable serializing and securely signing data other than bytes. It
can unsign to verify that the data hasn't been changed.
The serializer provides :meth:`dumps` and :meth:`loads`, similar to
:mod:`json`, and by default uses :mod:`json` internally to serialize
the data to bytes.
The secret key should be a random string of ``bytes`` and should not
be saved to code or version control. Different salts should be used
to distinguish signing in different contexts. See :doc:`/concepts`
for information about the security of the secret key and salt.
:param secret_key: The secret key to sign and verify with. Can be a
list of keys, oldest to newest, to support key rotation.
:param salt: Extra key to combine with ``secret_key`` to distinguish
signatures in different contexts.
:param serializer: An object that provides ``dumps`` and ``loads``
methods for serializing data to a string. Defaults to
:attr:`default_serializer`, which defaults to :mod:`json`.
:param serializer_kwargs: Keyword arguments to pass when calling
``serializer.dumps``.
:param signer: A ``Signer`` class to instantiate when signing data.
Defaults to :attr:`default_signer`, which defaults to
:class:`~itsdangerous.signer.Signer`.
:param signer_kwargs: Keyword arguments to pass when instantiating
the ``Signer`` class.
:param fallback_signers: List of signer parameters to try when
unsigning with the default signer fails. Each item can be a dict
of ``signer_kwargs``, a ``Signer`` class, or a tuple of
``(signer, signer_kwargs)``. Defaults to
:attr:`default_fallback_signers`.
.. versionchanged:: 2.0
Added support for key rotation by passing a list to
``secret_key``.
.. versionchanged:: 2.0
Removed the default SHA-512 fallback signer from
``default_fallback_signers``.
.. versionchanged:: 1.1
Added support for ``fallback_signers`` and configured a default
SHA-512 fallback. This fallback is for users who used the yanked
1.0.0 release which defaulted to SHA-512.
.. versionchanged:: 0.14
The ``signer`` and ``signer_kwargs`` parameters were added to
the constructor.
"""
#: The default serialization module to use to serialize data to a
#: string internally. The default is :mod:`json`, but can be changed
#: to any object that provides ``dumps`` and ``loads`` methods.
default_serializer: _PDataSerializer[t.Any] = json
#: The default ``Signer`` class to instantiate when signing data.
#: The default is :class:`itsdangerous.signer.Signer`.
default_signer: type[Signer] = Signer
#: The default fallback signers to try when unsigning fails.
default_fallback_signers: list[
dict[str, t.Any] | tuple[type[Signer], dict[str, t.Any]] | type[Signer]
] = []
# Serializer[str] if no data serializer is provided, or if it returns str.
@t.overload
def __init__(
self: Serializer[str],
secret_key: str | bytes | cabc.Iterable[str] | cabc.Iterable[bytes],
salt: str | bytes | None = b"itsdangerous",
serializer: None | _PDataSerializer[str] = None,
serializer_kwargs: dict[str, t.Any] | None = None,
signer: type[Signer] | None = None,
signer_kwargs: dict[str, t.Any] | None = None,
fallback_signers: list[
dict[str, t.Any] | tuple[type[Signer], dict[str, t.Any]] | type[Signer]
]
| None = None,
): ...
# Serializer[bytes] with a bytes data serializer positional argument.
@t.overload
def __init__(
self: Serializer[bytes],
secret_key: str | bytes | cabc.Iterable[str] | cabc.Iterable[bytes],
salt: str | bytes | None,
serializer: _PDataSerializer[bytes],
serializer_kwargs: dict[str, t.Any] | None = None,
signer: type[Signer] | None = None,
signer_kwargs: dict[str, t.Any] | None = None,
fallback_signers: list[
dict[str, t.Any] | tuple[type[Signer], dict[str, t.Any]] | type[Signer]
]
| None = None,
): ...
# Serializer[bytes] with a bytes data serializer keyword argument.
@t.overload
def __init__(
self: Serializer[bytes],
secret_key: str | bytes | cabc.Iterable[str] | cabc.Iterable[bytes],
salt: str | bytes | None = b"itsdangerous",
*,
serializer: _PDataSerializer[bytes],
serializer_kwargs: dict[str, t.Any] | None = None,
signer: type[Signer] | None = None,
signer_kwargs: dict[str, t.Any] | None = None,
fallback_signers: list[
dict[str, t.Any] | tuple[type[Signer], dict[str, t.Any]] | type[Signer]
]
| None = None,
): ...
# Fall back with a positional argument. If the strict signature of
# _PDataSerializer doesn't match, fall back to a union, requiring the user
# to specify the type.
@t.overload
def __init__(
self,
secret_key: str | bytes | cabc.Iterable[str] | cabc.Iterable[bytes],
salt: str | bytes | None,
serializer: t.Any,
serializer_kwargs: dict[str, t.Any] | None = None,
signer: type[Signer] | None = None,
signer_kwargs: dict[str, t.Any] | None = None,
fallback_signers: list[
dict[str, t.Any] | tuple[type[Signer], dict[str, t.Any]] | type[Signer]
]
| None = None,
): ...
# Fall back with a keyword argument.
@t.overload
def __init__(
self,
secret_key: str | bytes | cabc.Iterable[str] | cabc.Iterable[bytes],
salt: str | bytes | None = b"itsdangerous",
*,
serializer: t.Any,
serializer_kwargs: dict[str, t.Any] | None = None,
signer: type[Signer] | None = None,
signer_kwargs: dict[str, t.Any] | None = None,
fallback_signers: list[
dict[str, t.Any] | tuple[type[Signer], dict[str, t.Any]] | type[Signer]
]
| None = None,
): ...
def __init__(
self,
secret_key: str | bytes | cabc.Iterable[str] | cabc.Iterable[bytes],
salt: str | bytes | None = b"itsdangerous",
serializer: t.Any | None = None,
serializer_kwargs: dict[str, t.Any] | None = None,
signer: type[Signer] | None = None,
signer_kwargs: dict[str, t.Any] | None = None,
fallback_signers: list[
dict[str, t.Any] | tuple[type[Signer], dict[str, t.Any]] | type[Signer]
]
| None = None,
):
#: The list of secret keys to try for verifying signatures, from
#: oldest to newest. The newest (last) key is used for signing.
#:
#: This allows a key rotation system to keep a list of allowed
#: keys and remove expired ones.
self.secret_keys: list[bytes] = _make_keys_list(secret_key)
if salt is not None:
salt = want_bytes(salt)
# if salt is None then the signer's default is used
self.salt = salt
if serializer is None:
serializer = self.default_serializer
self.serializer: _PDataSerializer[_TSerialized] = serializer
self.is_text_serializer: bool = is_text_serializer(serializer)
if signer is None:
signer = self.default_signer
self.signer: type[Signer] = signer
self.signer_kwargs: dict[str, t.Any] = signer_kwargs or {}
if fallback_signers is None:
fallback_signers = list(self.default_fallback_signers)
self.fallback_signers: list[
dict[str, t.Any] | tuple[type[Signer], dict[str, t.Any]] | type[Signer]
] = fallback_signers
self.serializer_kwargs: dict[str, t.Any] = serializer_kwargs or {}
@property
def secret_key(self) -> bytes:
"""The newest (last) entry in the :attr:`secret_keys` list. This
is for compatibility from before key rotation support was added.
"""
return self.secret_keys[-1]
def load_payload(
self, payload: bytes, serializer: _PDataSerializer[t.Any] | None = None
) -> t.Any:
"""Loads the encoded object. This function raises
:class:`.BadPayload` if the payload is not valid. The
``serializer`` parameter can be used to override the serializer
stored on the class. The encoded ``payload`` should always be
bytes.
"""
if serializer is None:
use_serializer = self.serializer
is_text = self.is_text_serializer
else:
use_serializer = serializer
is_text = is_text_serializer(serializer)
try:
if is_text:
return use_serializer.loads(payload.decode("utf-8")) # type: ignore[arg-type]
return use_serializer.loads(payload) # type: ignore[arg-type]
except Exception as e:
raise BadPayload(
"Could not load the payload because an exception"
" occurred on unserializing the data.",
original_error=e,
) from e
def dump_payload(self, obj: t.Any) -> bytes:
"""Dumps the encoded object. The return value is always bytes.
If the internal serializer returns text, the value will be
encoded as UTF-8.
"""
return want_bytes(self.serializer.dumps(obj, **self.serializer_kwargs))
def make_signer(self, salt: str | bytes | None = None) -> Signer:
"""Creates a new instance of the signer to be used. The default
implementation uses the :class:`.Signer` base class.
"""
if salt is None:
salt = self.salt
return self.signer(self.secret_keys, salt=salt, **self.signer_kwargs)
def iter_unsigners(self, salt: str | bytes | None = None) -> cabc.Iterator[Signer]:
"""Iterates over all signers to be tried for unsigning. Starts
with the configured signer, then constructs each signer
specified in ``fallback_signers``.
"""
if salt is None:
salt = self.salt
yield self.make_signer(salt)
for fallback in self.fallback_signers:
if isinstance(fallback, dict):
kwargs = fallback
fallback = self.signer
elif isinstance(fallback, tuple):
fallback, kwargs = fallback
else:
kwargs = self.signer_kwargs
for secret_key in self.secret_keys:
yield fallback(secret_key, salt=salt, **kwargs)
def dumps(self, obj: t.Any, salt: str | bytes | None = None) -> _TSerialized:
"""Returns a signed string serialized with the internal
serializer. The return value can be either a byte or unicode
string depending on the format of the internal serializer.
"""
payload = want_bytes(self.dump_payload(obj))
rv = self.make_signer(salt).sign(payload)
if self.is_text_serializer:
return rv.decode("utf-8") # type: ignore[return-value]
return rv # type: ignore[return-value]
def dump(self, obj: t.Any, f: t.IO[t.Any], salt: str | bytes | None = None) -> None:
"""Like :meth:`dumps` but dumps into a file. The file handle has
to be compatible with what the internal serializer expects.
"""
f.write(self.dumps(obj, salt))
def loads(
self, s: str | bytes, salt: str | bytes | None = None, **kwargs: t.Any
) -> t.Any:
"""Reverse of :meth:`dumps`. Raises :exc:`.BadSignature` if the
signature validation fails.
"""
s = want_bytes(s)
last_exception = None
for signer in self.iter_unsigners(salt):
try:
return self.load_payload(signer.unsign(s))
except BadSignature as err:
last_exception = err
raise t.cast(BadSignature, last_exception)
def load(self, f: t.IO[t.Any], salt: str | bytes | None = None) -> t.Any:
"""Like :meth:`loads` but loads from a file."""
return self.loads(f.read(), salt)
def loads_unsafe(
self, s: str | bytes, salt: str | bytes | None = None
) -> tuple[bool, t.Any]:
"""Like :meth:`loads` but without verifying the signature. This
is potentially very dangerous to use depending on how your
serializer works. The return value is ``(signature_valid,
payload)`` instead of just the payload. The first item will be a
boolean that indicates if the signature is valid. This function
never fails.
Use it for debugging only and if you know that your serializer
module is not exploitable (for example, do not use it with a
pickle serializer).
.. versionadded:: 0.15
"""
return self._loads_unsafe_impl(s, salt)
def _loads_unsafe_impl(
self,
s: str | bytes,
salt: str | bytes | None,
load_kwargs: dict[str, t.Any] | None = None,
load_payload_kwargs: dict[str, t.Any] | None = None,
) -> tuple[bool, t.Any]:
"""Low level helper function to implement :meth:`loads_unsafe`
in serializer subclasses.
"""
if load_kwargs is None:
load_kwargs = {}
try:
return True, self.loads(s, salt=salt, **load_kwargs)
except BadSignature as e:
if e.payload is None:
return False, None
if load_payload_kwargs is None:
load_payload_kwargs = {}
try:
return (
False,
self.load_payload(e.payload, **load_payload_kwargs),
)
except BadPayload:
return False, None
def load_unsafe(
self, f: t.IO[t.Any], salt: str | bytes | None = None
) -> tuple[bool, t.Any]:
"""Like :meth:`loads_unsafe` but loads from a file.
.. versionadded:: 0.15
"""
return self.loads_unsafe(f.read(), salt=salt)
|
Serializer
|
python
|
django-mptt__django-mptt
|
tests/myapp/models.py
|
{
"start": 9619,
"end": 9833
}
|
class ____(MPTTModel):
parent = TreeForeignKey(
"self", null=True, blank=True, related_name="children", on_delete=models.CASCADE
)
not_concrete_field = FakeNotConcreteField()
|
NotConcreteFieldModel
|
python
|
spack__spack
|
lib/spack/spack/builder.py
|
{
"start": 15972,
"end": 19430
}
|
class ____(metaclass=BuilderMeta):
"""An interface for builders, without any phases defined. This class is exposed in the package
API, so that packagers can create a single class to define :meth:`setup_build_environment` and
:func:`spack.phase_callbacks.run_before` and :func:`spack.phase_callbacks.run_after`
callbacks that can be shared among different builders.
Example:
.. code-block:: python
class AnyBuilder(BaseBuilder):
@run_after("install")
def fixup_install(self):
# do something after the package is installed
pass
def setup_build_environment(self, env: EnvironmentModifications) -> None:
env.set("MY_ENV_VAR", "my_value")
class CMakeBuilder(cmake.CMakeBuilder, AnyBuilder):
pass
class AutotoolsBuilder(autotools.AutotoolsBuilder, AnyBuilder):
pass
"""
def __init__(self, pkg: spack.package_base.PackageBase) -> None:
self.pkg = pkg
@property
def spec(self) -> spack.spec.Spec:
return self.pkg.spec
@property
def stage(self):
return self.pkg.stage
@property
def prefix(self):
return self.pkg.prefix
def setup_build_environment(
self, env: spack.util.environment.EnvironmentModifications
) -> None:
"""Sets up the build environment for a package.
This method will be called before the current package prefix exists in
Spack's store.
Args:
env: environment modifications to be applied when the package is built. Package authors
can call methods on it to alter the build environment.
"""
if not hasattr(super(), "setup_build_environment"):
return
super().setup_build_environment(env) # type: ignore
def setup_dependent_build_environment(
self, env: spack.util.environment.EnvironmentModifications, dependent_spec: spack.spec.Spec
) -> None:
"""Sets up the build environment of a package that depends on this one.
This is similar to ``setup_build_environment``, but it is used to modify the build
environment of a package that *depends* on this one.
This gives packages the ability to set environment variables for the build of the
dependent, which can be useful to provide search hints for headers or libraries if they are
not in standard locations.
This method will be called before the dependent package prefix exists in Spack's store.
Args:
env: environment modifications to be applied when the dependent package is built.
Package authors can call methods on it to alter the build environment.
dependent_spec: the spec of the dependent package about to be built. This allows the
extendee (self) to query the dependent's state. Note that *this* package's spec is
available as ``self.spec``
"""
if not hasattr(super(), "setup_dependent_build_environment"):
return
super().setup_dependent_build_environment(env, dependent_spec) # type: ignore
def __repr__(self):
fmt = "{name}{/hash:7}"
return f"{self.__class__.__name__}({self.spec.format(fmt)})"
def __str__(self):
fmt = "{name}{/hash:7}"
return f'"{self.__class__.__name__}" builder for "{self.spec.format(fmt)}"'
|
BaseBuilder
|
python
|
huggingface__transformers
|
src/transformers/models/vjepa2/modeling_vjepa2.py
|
{
"start": 35160,
"end": 36007
}
|
class ____(nn.Module):
"""Attentive Pooler"""
def __init__(self, config: VJEPA2Config):
super().__init__()
self.query_tokens = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.cross_attention_layer = VJEPA2PoolerCrossAttentionLayer(config)
self.self_attention_layers = nn.ModuleList(
[VJEPA2PoolerSelfAttentionLayer(config) for _ in range(config.num_pooler_layers)]
)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
for layer in self.self_attention_layers:
hidden_state = layer(hidden_state, attention_mask=None)[0]
queries = self.query_tokens.repeat(hidden_state.shape[0], 1, 1)
hidden_state = self.cross_attention_layer(queries, hidden_state)[0]
return hidden_state.squeeze(1)
@auto_docstring
|
VJEPA2AttentivePooler
|
python
|
huggingface__transformers
|
src/transformers/models/vits/modeling_vits.py
|
{
"start": 24415,
"end": 25743
}
|
class ____(nn.Module):
def __init__(self, config: VitsConfig):
super().__init__()
self.half_channels = config.flow_size // 2
self.conv_pre = nn.Conv1d(self.half_channels, config.hidden_size, 1)
self.wavenet = VitsWaveNet(config, num_layers=config.prior_encoder_num_wavenet_layers)
self.conv_post = nn.Conv1d(config.hidden_size, self.half_channels, 1)
def forward(self, inputs, padding_mask, global_conditioning=None, reverse=False):
first_half, second_half = torch.split(inputs, [self.half_channels] * 2, dim=1)
hidden_states = self.conv_pre(first_half) * padding_mask
hidden_states = self.wavenet(hidden_states, padding_mask, global_conditioning)
mean = self.conv_post(hidden_states) * padding_mask
log_stddev = torch.zeros_like(mean)
if not reverse:
second_half = mean + second_half * torch.exp(log_stddev) * padding_mask
outputs = torch.cat([first_half, second_half], dim=1)
log_determinant = torch.sum(log_stddev, [1, 2])
return outputs, log_determinant
else:
second_half = (second_half - mean) * torch.exp(-log_stddev) * padding_mask
outputs = torch.cat([first_half, second_half], dim=1)
return outputs, None
|
VitsResidualCouplingLayer
|
python
|
huggingface__transformers
|
src/transformers/pipelines/question_answering.py
|
{
"start": 9134,
"end": 30192
}
|
class ____(ChunkPipeline):
"""
Question Answering pipeline using any `ModelForQuestionAnswering`. See the [question answering
examples](../task_summary#question-answering) for more information.
Example:
```python
>>> from transformers import pipeline
>>> oracle = pipeline(model="deepset/roberta-base-squad2")
>>> oracle(question="Where do I live?", context="My name is Wolfgang and I live in Berlin")
{'score': 0.9191, 'start': 34, 'end': 40, 'answer': 'Berlin'}
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This question answering pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"question-answering"`.
The models that this pipeline can use are models that have been fine-tuned on a question answering task. See the
up-to-date list of available models on
[huggingface.co/models](https://huggingface.co/models?filter=question-answering).
"""
default_input_names = "question,context"
handle_impossible_answer = False
def __init__(
self,
model: "PreTrainedModel",
tokenizer: PreTrainedTokenizer,
modelcard: ModelCard | None = None,
task: str = "",
**kwargs,
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
task=task,
**kwargs,
)
self._args_parser = QuestionAnsweringArgumentHandler()
self.check_model_type(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES)
@staticmethod
def create_sample(question: str | list[str], context: str | list[str]) -> SquadExample | list[SquadExample]:
"""
QuestionAnsweringPipeline leverages the [`SquadExample`] internally. This helper method encapsulate all the
logic for converting question(s) and context(s) to [`SquadExample`].
We currently support extractive question answering.
Arguments:
question (`str` or `list[str]`): The question(s) asked.
context (`str` or `list[str]`): The context(s) in which we will look for the answer.
Returns:
One or a list of [`SquadExample`]: The corresponding [`SquadExample`] grouping question and context.
"""
if isinstance(question, list):
return [SquadExample(None, q, c, None, None, None) for q, c in zip(question, context)]
else:
return SquadExample(None, question, context, None, None, None)
def _sanitize_parameters(
self,
padding=None,
topk=None,
top_k=None,
doc_stride=None,
max_answer_len=None,
max_seq_len=None,
max_question_len=None,
handle_impossible_answer=None,
align_to_words=None,
**kwargs,
):
# Set defaults values
preprocess_params = {}
if padding is not None:
preprocess_params["padding"] = padding
if doc_stride is not None:
preprocess_params["doc_stride"] = doc_stride
if max_question_len is not None:
preprocess_params["max_question_len"] = max_question_len
if max_seq_len is not None:
preprocess_params["max_seq_len"] = max_seq_len
postprocess_params = {}
if topk is not None and top_k is None:
warnings.warn("topk parameter is deprecated, use top_k instead", UserWarning)
top_k = topk
if top_k is not None:
if top_k < 1:
raise ValueError(f"top_k parameter should be >= 1 (got {top_k})")
postprocess_params["top_k"] = top_k
if max_answer_len is not None:
if max_answer_len < 1:
raise ValueError(f"max_answer_len parameter should be >= 1 (got {max_answer_len}")
postprocess_params["max_answer_len"] = max_answer_len
if handle_impossible_answer is not None:
postprocess_params["handle_impossible_answer"] = handle_impossible_answer
if align_to_words is not None:
postprocess_params["align_to_words"] = align_to_words
return preprocess_params, {}, postprocess_params
def __call__(self, *args, **kwargs):
"""
Answer the question(s) given as inputs by using the context(s).
Args:
question (`str` or `list[str]`):
One or several question(s) (must be used in conjunction with the `context` argument).
context (`str` or `list[str]`):
One or several context(s) associated with the question(s) (must be used in conjunction with the
`question` argument).
top_k (`int`, *optional*, defaults to 1):
The number of answers to return (will be chosen by order of likelihood). Note that we return less than
top_k answers if there are not enough options available within the context.
doc_stride (`int`, *optional*, defaults to 128):
If the context is too long to fit with the question for the model, it will be split in several chunks
with some overlap. This argument controls the size of that overlap.
max_answer_len (`int`, *optional*, defaults to 15):
The maximum length of predicted answers (e.g., only answers with a shorter length are considered).
max_seq_len (`int`, *optional*, defaults to 384):
The maximum length of the total sentence (context + question) in tokens of each chunk passed to the
model. The context will be split in several chunks (using `doc_stride` as overlap) if needed.
max_question_len (`int`, *optional*, defaults to 64):
The maximum length of the question after tokenization. It will be truncated if needed.
handle_impossible_answer (`bool`, *optional*, defaults to `False`):
Whether or not we accept impossible as an answer.
align_to_words (`bool`, *optional*, defaults to `True`):
Attempts to align the answer to real words. Improves quality on space separated languages. Might hurt on
non-space-separated languages (like Japanese or Chinese)
Return:
A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys:
- **score** (`float`) -- The probability associated to the answer.
- **start** (`int`) -- The character start index of the answer (in the tokenized version of the input).
- **end** (`int`) -- The character end index of the answer (in the tokenized version of the input).
- **answer** (`str`) -- The answer to the question.
"""
# Convert inputs to features
if args:
warnings.warn(
"Passing a list of SQuAD examples to the pipeline is deprecated and will be removed in v5. Inputs should be passed using the `question` and `context` keyword arguments instead.",
FutureWarning,
)
examples = self._args_parser(*args, **kwargs)
if isinstance(examples, (list, tuple)) and len(examples) == 1:
return super().__call__(examples[0], **kwargs)
return super().__call__(examples, **kwargs)
def preprocess(self, example, padding="do_not_pad", doc_stride=None, max_question_len=64, max_seq_len=None):
# XXX: This is special, args_parser will not handle anything generator or dataset like
# For those we expect user to send a simple valid example either directly as a SquadExample or simple dict.
# So we still need a little sanitation here.
if isinstance(example, dict):
example = SquadExample(None, example["question"], example["context"], None, None, None)
if max_seq_len is None:
max_seq_len = min(self.tokenizer.model_max_length, 384)
if doc_stride is None:
doc_stride = min(max_seq_len // 2, 128)
if doc_stride > max_seq_len:
raise ValueError(f"`doc_stride` ({doc_stride}) is larger than `max_seq_len` ({max_seq_len})")
if not self.tokenizer.is_fast:
features = squad_convert_examples_to_features(
examples=[example],
tokenizer=self.tokenizer,
max_seq_length=max_seq_len,
doc_stride=doc_stride,
max_query_length=max_question_len,
padding_strategy=PaddingStrategy.MAX_LENGTH,
is_training=False,
tqdm_enabled=False,
)
else:
# Define the side we want to truncate / pad and the text/pair sorting
question_first = self.tokenizer.padding_side == "right"
encoded_inputs = self.tokenizer(
text=example.question_text if question_first else example.context_text,
text_pair=example.context_text if question_first else example.question_text,
padding=padding,
truncation="only_second" if question_first else "only_first",
max_length=max_seq_len,
stride=doc_stride,
return_token_type_ids=True,
return_overflowing_tokens=True,
return_offsets_mapping=True,
return_special_tokens_mask=True,
)
# When the input is too long, it's converted in a batch of inputs with overflowing tokens
# and a stride of overlap between the inputs. If a batch of inputs is given, a special output
# "overflow_to_sample_mapping" indicate which member of the encoded batch belong to which original batch sample.
# Here we tokenize examples one-by-one so we don't need to use "overflow_to_sample_mapping".
# "num_span" is the number of output samples generated from the overflowing tokens.
num_spans = len(encoded_inputs["input_ids"])
# p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
# We put 0 on the tokens from the context and 1 everywhere else (question and special tokens)
p_mask = [
[tok != 1 if question_first else 0 for tok in encoded_inputs.sequence_ids(span_id)]
for span_id in range(num_spans)
]
features = []
for span_idx in range(num_spans):
input_ids_span_idx = encoded_inputs["input_ids"][span_idx]
attention_mask_span_idx = (
encoded_inputs["attention_mask"][span_idx] if "attention_mask" in encoded_inputs else None
)
token_type_ids_span_idx = (
encoded_inputs["token_type_ids"][span_idx] if "token_type_ids" in encoded_inputs else None
)
# keep the cls_token unmasked (some models use it to indicate unanswerable questions)
if self.tokenizer.cls_token_id is not None:
cls_indices = np.nonzero(np.array(input_ids_span_idx) == self.tokenizer.cls_token_id)[0]
for cls_index in cls_indices:
p_mask[span_idx][cls_index] = 0
submask = p_mask[span_idx]
features.append(
SquadFeatures(
input_ids=input_ids_span_idx,
attention_mask=attention_mask_span_idx,
token_type_ids=token_type_ids_span_idx,
p_mask=submask,
encoding=encoded_inputs[span_idx],
# We don't use the rest of the values - and actually
# for Fast tokenizer we could totally avoid using SquadFeatures and SquadExample
cls_index=None,
token_to_orig_map={},
example_index=0,
unique_id=0,
paragraph_len=0,
token_is_max_context=0,
tokens=[],
start_position=0,
end_position=0,
is_impossible=False,
qas_id=None,
)
)
for i, feature in enumerate(features):
fw_args = {}
others = {}
model_input_names = self.tokenizer.model_input_names + ["p_mask", "token_type_ids"]
for k, v in feature.__dict__.items():
if k in model_input_names:
tensor = torch.tensor(v)
if tensor.dtype == torch.int32:
tensor = tensor.long()
fw_args[k] = tensor.unsqueeze(0)
else:
others[k] = v
is_last = i == len(features) - 1
yield {"example": example, "is_last": is_last, **fw_args, **others}
def _forward(self, inputs):
example = inputs["example"]
model_inputs = {k: inputs[k] for k in self.tokenizer.model_input_names}
# `XXXForSequenceClassification` models should not use `use_cache=True` even if it's supported
model_forward = self.model.forward
if "use_cache" in inspect.signature(model_forward).parameters:
model_inputs["use_cache"] = False
output = self.model(**model_inputs)
if isinstance(output, dict):
return {"start": output["start_logits"], "end": output["end_logits"], "example": example, **inputs}
else:
start, end = output[:2]
return {"start": start, "end": end, "example": example, **inputs}
def postprocess(
self,
model_outputs,
top_k=1,
handle_impossible_answer=False,
max_answer_len=15,
align_to_words=True,
):
min_null_score = 1000000 # large and positive
answers = []
for output in model_outputs:
if output["start"].dtype == torch.bfloat16:
start_ = output["start"].to(torch.float32)
end_ = output["end"].to(torch.float32)
else:
start_ = output["start"]
end_ = output["end"]
example = output["example"]
p_mask = output["p_mask"]
attention_mask = (
output["attention_mask"].numpy() if output.get("attention_mask", None) is not None else None
)
pre_topk = (
top_k * 2 + 10 if align_to_words else top_k
) # Some candidates may be deleted if we align to words
starts, ends, scores, min_null_score = select_starts_ends(
start_,
end_,
p_mask,
attention_mask,
min_null_score,
pre_topk,
handle_impossible_answer,
max_answer_len,
)
if not self.tokenizer.is_fast:
char_to_word = np.array(example.char_to_word_offset)
# Convert the answer (tokens) back to the original text
# Score: score from the model
# Start: Index of the first character of the answer in the context string
# End: Index of the character following the last character of the answer in the context string
# Answer: Plain text of the answer
for s, e, score in zip(starts, ends, scores):
token_to_orig_map = output["token_to_orig_map"]
answers.append(
{
"score": score.item(),
"start": np.where(char_to_word == token_to_orig_map[s])[0][0].item(),
"end": np.where(char_to_word == token_to_orig_map[e])[0][-1].item(),
"answer": " ".join(example.doc_tokens[token_to_orig_map[s] : token_to_orig_map[e] + 1]),
}
)
else:
# Convert the answer (tokens) back to the original text
# Score: score from the model
# Start: Index of the first character of the answer in the context string
# End: Index of the character following the last character of the answer in the context string
# Answer: Plain text of the answer
question_first = self.tokenizer.padding_side == "right"
enc = output["encoding"]
# Encoding was *not* padded, input_ids *might*.
# It doesn't make a difference unless we're padding on
# the left hand side, since now we have different offsets
# everywhere.
if self.tokenizer.padding_side == "left":
offset = (output["input_ids"] == self.tokenizer.pad_token_id).numpy().sum()
else:
offset = 0
# Sometimes the max probability token is in the middle of a word so:
# - we start by finding the right word containing the token with `token_to_word`
# - then we convert this word in a character span with `word_to_chars`
sequence_index = 1 if question_first else 0
for s, e, score in zip(starts, ends, scores):
s = s - offset
e = e - offset
start_index, end_index = self.get_indices(enc, s, e, sequence_index, align_to_words)
target_answer = example.context_text[start_index:end_index]
answer = self.get_answer(answers, target_answer)
if answer:
answer["score"] += score.item()
else:
answers.append(
{
"score": score.item(),
"start": start_index,
"end": end_index,
"answer": example.context_text[start_index:end_index],
}
)
if handle_impossible_answer:
answers.append({"score": min_null_score, "start": 0, "end": 0, "answer": ""})
answers = sorted(answers, key=lambda x: x["score"], reverse=True)[:top_k]
if len(answers) == 1:
return answers[0]
return answers
def get_answer(self, answers: list[dict], target: str) -> dict | None:
for answer in answers:
if answer["answer"].lower() == target.lower():
return answer
return None
def get_indices(
self, enc: "tokenizers.Encoding", s: int, e: int, sequence_index: int, align_to_words: bool
) -> tuple[int, int]:
if align_to_words:
try:
start_word = enc.token_to_word(s)
end_word = enc.token_to_word(e)
start_index = enc.word_to_chars(start_word, sequence_index=sequence_index)[0]
end_index = enc.word_to_chars(end_word, sequence_index=sequence_index)[1]
except Exception:
# Some tokenizers don't really handle words. Keep to offsets then.
start_index = enc.offsets[s][0]
end_index = enc.offsets[e][1]
else:
start_index = enc.offsets[s][0]
end_index = enc.offsets[e][1]
return start_index, end_index
def span_to_answer(self, text: str, start: int, end: int) -> dict[str, str | int]:
"""
When decoding from token probabilities, this method maps token indexes to actual word in the initial context.
Args:
text (`str`): The actual context to extract the answer from.
start (`int`): The answer starting token index.
end (`int`): The answer end token index.
Returns:
Dictionary like `{'answer': str, 'start': int, 'end': int}`
"""
words = []
token_idx = char_start_idx = char_end_idx = chars_idx = 0
for word in text.split(" "):
token = self.tokenizer.tokenize(word)
# Append words if they are in the span
if start <= token_idx <= end:
if token_idx == start:
char_start_idx = chars_idx
if token_idx == end:
char_end_idx = chars_idx + len(word)
words += [word]
# Stop if we went over the end of the answer
if token_idx > end:
break
# Append the subtokenization length to the running index
token_idx += len(token)
chars_idx += len(word) + 1
# Join text with spaces
return {
"answer": " ".join(words),
"start": max(0, char_start_idx),
"end": min(len(text), char_end_idx),
}
|
QuestionAnsweringPipeline
|
python
|
uqfoundation__dill
|
dill/tests/test_classdef.py
|
{
"start": 5577,
"end": 8600
}
|
class ____(object):
__slots__ = 'y'
def __init__(self, y):
self.y = y
def test_slots():
assert dill.pickles(Y)
assert dill.pickles(y)
assert dill.pickles(Y.y)
assert dill.copy(y).y == value
assert dill.copy(Y2(value)).y == value
def test_origbases():
assert dill.copy(customIntList).__orig_bases__ == customIntList.__orig_bases__
def test_attr():
import attr
@attr.s
class A:
a = attr.ib()
v = A(1)
assert dill.copy(v) == v
def test_metaclass():
class metaclass_with_new(type):
def __new__(mcls, name, bases, ns, **kwds):
cls = super().__new__(mcls, name, bases, ns, **kwds)
assert mcls is not None
assert cls.method(mcls)
return cls
def method(cls, mcls):
return isinstance(cls, mcls)
l = locals()
exec("""class subclass_with_new(metaclass=metaclass_with_new):
def __new__(cls):
self = super().__new__(cls)
return self""", None, l)
subclass_with_new = l['subclass_with_new']
assert dill.copy(subclass_with_new())
def test_enummeta():
from http import HTTPStatus
import enum
assert dill.copy(HTTPStatus.OK) is HTTPStatus.OK
assert dill.copy(enum.EnumMeta) is enum.EnumMeta
def test_inherit(): #NOTE: see issue #612
class Foo:
w = 0
x = 1
y = 1.1
a = ()
b = (1,)
n = None
class Bar(Foo):
w = 2
x = 1
y = 1.1
z = 0.2
a = ()
b = (1,)
c = (2,)
n = None
Baz = dill.copy(Bar)
import platform
is_pypy = platform.python_implementation() == 'PyPy'
assert Bar.__dict__ == Baz.__dict__
# ints
assert 'w' in Bar.__dict__ and 'w' in Baz.__dict__
assert Bar.__dict__['w'] is Baz.__dict__['w']
assert 'x' in Bar.__dict__ and 'x' in Baz.__dict__
assert Bar.__dict__['x'] is Baz.__dict__['x']
# floats
assert 'y' in Bar.__dict__ and 'y' in Baz.__dict__
same = Bar.__dict__['y'] is Baz.__dict__['y']
assert same if is_pypy else not same
assert 'z' in Bar.__dict__ and 'z' in Baz.__dict__
same = Bar.__dict__['z'] is Baz.__dict__['z']
assert same if is_pypy else not same
# tuples
assert 'a' in Bar.__dict__ and 'a' in Baz.__dict__
assert Bar.__dict__['a'] is Baz.__dict__['a']
assert 'b' in Bar.__dict__ and 'b' in Baz.__dict__
assert Bar.__dict__['b'] is not Baz.__dict__['b']
assert 'c' in Bar.__dict__ and 'c' in Baz.__dict__
assert Bar.__dict__['c'] is not Baz.__dict__['c']
# None
assert 'n' in Bar.__dict__ and 'n' in Baz.__dict__
assert Bar.__dict__['n'] is Baz.__dict__['n']
if __name__ == '__main__':
test_class_instances()
test_class_objects()
test_specialtypes()
test_namedtuple()
test_dtype()
test_array_nested()
test_array_subclass()
test_method_decorator()
test_slots()
test_origbases()
test_metaclass()
test_enummeta()
test_inherit()
|
Y2
|
python
|
streamlit__streamlit
|
lib/streamlit/runtime/runtime.py
|
{
"start": 2821,
"end": 2955
}
|
class ____(Exception):
"""Raised by operations on a Runtime instance that is stopped."""
@dataclass(frozen=True)
|
RuntimeStoppedError
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/deprecated7.py
|
{
"start": 261,
"end": 498
}
|
class ____: ...
# This should generate an error if reportDeprecated is enabled.
ClassA()
@todo
def func1() -> None:
pass
# This should generate an error if reportDeprecated is enabled.
func1()
def func2() -> None:
pass
|
ClassA
|
python
|
doocs__leetcode
|
solution/0900-0999/0984.String Without AAA or BBB/Solution.py
|
{
"start": 0,
"end": 502
}
|
class ____:
def strWithout3a3b(self, a: int, b: int) -> str:
ans = []
while a and b:
if a > b:
ans.append('aab')
a, b = a - 2, b - 1
elif a < b:
ans.append('bba')
a, b = a - 1, b - 2
else:
ans.append('ab')
a, b = a - 1, b - 1
if a:
ans.append('a' * a)
if b:
ans.append('b' * b)
return ''.join(ans)
|
Solution
|
python
|
fastapi__sqlmodel
|
docs_src/tutorial/fastapi/update/tutorial002_py39.py
|
{
"start": 513,
"end": 2857
}
|
class ____(SQLModel):
name: Optional[str] = None
secret_name: Optional[str] = None
age: Optional[int] = None
password: Optional[str] = None
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
connect_args = {"check_same_thread": False}
engine = create_engine(sqlite_url, echo=True, connect_args=connect_args)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def hash_password(password: str) -> str:
# Use something like passlib here
return f"not really hashed {password} hehehe"
app = FastAPI()
@app.on_event("startup")
def on_startup():
create_db_and_tables()
@app.post("/heroes/", response_model=HeroPublic)
def create_hero(hero: HeroCreate):
hashed_password = hash_password(hero.password)
with Session(engine) as session:
extra_data = {"hashed_password": hashed_password}
db_hero = Hero.model_validate(hero, update=extra_data)
session.add(db_hero)
session.commit()
session.refresh(db_hero)
return db_hero
@app.get("/heroes/", response_model=list[HeroPublic])
def read_heroes(offset: int = 0, limit: int = Query(default=100, le=100)):
with Session(engine) as session:
heroes = session.exec(select(Hero).offset(offset).limit(limit)).all()
return heroes
@app.get("/heroes/{hero_id}", response_model=HeroPublic)
def read_hero(hero_id: int):
with Session(engine) as session:
hero = session.get(Hero, hero_id)
if not hero:
raise HTTPException(status_code=404, detail="Hero not found")
return hero
@app.patch("/heroes/{hero_id}", response_model=HeroPublic)
def update_hero(hero_id: int, hero: HeroUpdate):
with Session(engine) as session:
db_hero = session.get(Hero, hero_id)
if not db_hero:
raise HTTPException(status_code=404, detail="Hero not found")
hero_data = hero.model_dump(exclude_unset=True)
extra_data = {}
if "password" in hero_data:
password = hero_data["password"]
hashed_password = hash_password(password)
extra_data["hashed_password"] = hashed_password
db_hero.sqlmodel_update(hero_data, update=extra_data)
session.add(db_hero)
session.commit()
session.refresh(db_hero)
return db_hero
|
HeroUpdate
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/protocol38.py
|
{
"start": 138,
"end": 342
}
|
class ____(Protocol):
def __neg__(self) -> "Negatable": ...
def func1(x: Negatable) -> None: ...
func1(0)
def func2(val: Literal[0, 1]):
func1(val)
T = TypeVar("T", covariant=True)
|
Negatable
|
python
|
donnemartin__system-design-primer
|
solutions/system_design/mint/mint_mapreduce.py
|
{
"start": 55,
"end": 1475
}
|
class ____(MRJob):
def __init__(self, categorizer):
self.categorizer = categorizer
...
def current_year_month(self):
"""Return the current year and month."""
...
def extract_year_month(self, timestamp):
"""Return the year and month portions of the timestamp."""
...
def handle_budget_notifications(self, key, total):
"""Call notification API if nearing or exceeded budget."""
...
def mapper(self, _, line):
"""Parse each log line, extract and transform relevant lines.
Emit key value pairs of the form:
(2016-01, shopping), 25
(2016-01, shopping), 100
(2016-01, gas), 50
"""
timestamp, category, amount = line.split('\t')
period = self. extract_year_month(timestamp)
if period == self.current_year_month():
yield (period, category), amount
def reducer(self, key, values):
"""Sum values for each key.
(2016-01, shopping), 125
(2016-01, gas), 50
"""
total = sum(values)
self.handle_budget_notifications(key, total)
yield key, sum(values)
def steps(self):
"""Run the map and reduce steps."""
return [
self.mr(mapper=self.mapper,
reducer=self.reducer)
]
if __name__ == '__main__':
SpendingByCategory.run()
|
SpendingByCategory
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 264103,
"end": 264613
}
|
class ____(sgqlc.types.Input):
"""Ways in which lists of projects can be ordered upon return."""
__schema__ = github_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(sgqlc.types.non_null(ProjectV2OrderField), graphql_name="field")
"""The field in which to order projects by."""
direction = sgqlc.types.Field(sgqlc.types.non_null(OrderDirection), graphql_name="direction")
"""The direction in which to order projects by the specified field."""
|
ProjectV2Order
|
python
|
hynek__structlog
|
tests/processors/test_renderers.py
|
{
"start": 15347,
"end": 18368
}
|
class ____:
def test_custom_formatter(self):
"""
The exception formatter can be changed.
"""
formatter = ExceptionRenderer(lambda _: "There is no exception!")
try:
raise CustomError("test")
except CustomError as e:
exc = e
assert formatter(None, None, {"exc_info": exc}) == {
"exception": "There is no exception!"
}
@pytest.mark.parametrize("ei", [False, None, ""])
def test_nop(self, ei):
"""
If exc_info is falsey, only remove the key.
"""
assert {} == ExceptionRenderer()(None, None, {"exc_info": ei})
def test_nop_missing(self):
"""
If event dict doesn't contain exc_info, do nothing.
"""
assert {} == ExceptionRenderer()(None, None, {})
def test_formats_tuple(self):
"""
If exc_info is an arbitrary 3-tuple, it is not used.
"""
formatter = ExceptionRenderer(lambda exc_info: exc_info)
d = formatter(None, None, {"exc_info": (None, None, 42)})
assert {} == d
def test_gets_exc_info_on_bool(self):
"""
If exc_info is True, it is obtained using sys.exc_info().
"""
# monkeypatching sys.exc_info makes currently pytest return 1 on
# success.
try:
raise ValueError("test")
except ValueError:
d = ExceptionRenderer()(None, None, {"exc_info": True})
assert "exc_info" not in d
assert 'raise ValueError("test")' in d["exception"]
assert "ValueError: test" in d["exception"]
def test_exception(self):
"""
Passing exceptions as exc_info is valid.
"""
formatter = ExceptionRenderer(lambda exc_info: exc_info)
try:
raise ValueError("test")
except ValueError as e:
exc = e
else:
pytest.fail("Exception not raised.")
assert {
"exception": (ValueError, exc, exc.__traceback__)
} == formatter(None, None, {"exc_info": exc})
def test_exception_without_traceback(self):
"""
If an Exception is missing a traceback, render it anyway.
"""
rv = ExceptionRenderer()(
None, None, {"exc_info": Exception("no traceback!")}
)
assert {"exception": "Exception: no traceback!"} == rv
def test_format_exception(self):
"""
"format_exception" is the "ExceptionRenderer" with default settings.
"""
try:
raise ValueError("test")
except ValueError as e:
a = format_exc_info(None, None, {"exc_info": e})
b = ExceptionRenderer()(None, None, {"exc_info": e})
assert a == b
@pytest.mark.parametrize("ei", [True, (None, None, None)])
def test_no_exception(self, ei):
"""
A missing exception does not blow up.
"""
assert {} == format_exc_info(None, None, {"exc_info": ei})
|
TestFormatExcInfo
|
python
|
pytorch__pytorch
|
test/test_torch.py
|
{
"start": 290822,
"end": 473833
}
|
class ____(TestCase):
exact_dtype = True
def test_dir(self):
dir(torch)
def test_wildcard_import(self):
exec('from torch import *')
def test_newaxis_numpy_comparison(self):
def run_test(tensor, *idx):
npt = tensor.numpy()
self.assertEqual(tensor[idx], npt[idx])
# 1D Tensor Tests
x = torch.arange(0, 10)
cases = [
[None],
[None, None],
[Ellipsis, None],
[None, Ellipsis],
[2, None],
[None, 2],
[Ellipsis, None, 2],
[Ellipsis, 2, None],
[2, Ellipsis, None],
[2, None, Ellipsis],
[None, 2, Ellipsis],
[None, Ellipsis, 2],
]
for case in cases:
run_test(x, *case)
# 2D Tensor Tests
x = torch.arange(0, 12).view(3, 4)
cases = [
[None],
[None, None],
[None, None, None],
[Ellipsis, None],
[Ellipsis, None, None],
[None, Ellipsis],
[None, Ellipsis, None],
[None, None, Ellipsis],
[2, None],
[2, None, Ellipsis],
[2, Ellipsis, None],
[None, 2, Ellipsis],
[Ellipsis, 2, None],
[Ellipsis, None, 2],
[None, Ellipsis, 2],
[1, 2, None],
[1, 2, Ellipsis, None],
[1, Ellipsis, 2, None],
[Ellipsis, 1, None, 2],
[Ellipsis, 1, 2, None],
[1, None, 2, Ellipsis],
[None, 1, Ellipsis, 2],
[None, 1, 2, Ellipsis],
]
for case in cases:
run_test(x, *case)
def _consecutive(self, size, start=1):
sequence = torch.ones(torch.tensor(size).prod(0)).cumsum(0)
sequence.add_(start - 1)
return sequence.resize_(*size)
def test_newindex(self):
reference = self._consecutive((3, 3, 3))
# This relies on __index__() being correct - but we have separate tests for that
def checkPartialAssign(index):
reference = torch.zeros(3, 3, 3)
reference[index] = self._consecutive((3, 3, 3))[index]
self.assertEqual(reference[index], self._consecutive((3, 3, 3))[index], atol=0, rtol=0)
reference[index] = 0
self.assertEqual(reference, torch.zeros(3, 3, 3), atol=0, rtol=0)
checkPartialAssign(0)
checkPartialAssign(1)
checkPartialAssign(2)
checkPartialAssign((0, 1))
checkPartialAssign((1, 2))
checkPartialAssign((0, 2))
checkPartialAssign(torch.LongTensor((0, 2)))
with self.assertRaises(IndexError):
reference[1, 1, 1, 1] = 1
with self.assertRaises(IndexError):
reference[1, 1, 1, (1, 1)] = 1
with self.assertRaises(IndexError):
reference[3, 3, 3, 3, 3, 3, 3, 3] = 1
with self.assertRaises(IndexError):
reference[0.0] = 1
with self.assertRaises(TypeError):
reference[0.0:2.0] = 1
with self.assertRaises(IndexError):
reference[0.0, 0.0:2.0] = 1
with self.assertRaises(IndexError):
reference[0.0, :, 0.0:2.0] = 1
with self.assertRaises(IndexError):
reference[0.0, ..., 0.0:2.0] = 1
with self.assertRaises(IndexError):
reference[0.0, :, 0.0] = 1
# Test `torch._check*` functions
def test_check(self):
test_cases = [
# check function, expected error
(torch._check, RuntimeError),
(torch._check_index, IndexError),
(torch._check_value, ValueError),
(torch._check_type, TypeError),
(torch._check_not_implemented, NotImplementedError),
]
for check_fn, expected_error in test_cases:
# cond=True should not raise an error
check_fn(True)
# Test default failure message for cond=False
default_message = 'Expected cond to be True'
with self.assertRaisesRegex(expected_error, default_message):
check_fn(False)
# Test a simple failure message
message = 'message'
with self.assertRaisesRegex(expected_error, message):
check_fn(False, lambda: message)
# Test message with tensor
def message():
return torch.arange(4)
with self.assertRaisesRegex(expected_error, re.escape(str(message()))):
check_fn(False, message)
# Test format string message
def message():
return f"{'test'} {[1, 2, 'a', True]} {True} {100} {torch.arange(4)}"
with self.assertRaisesRegex(expected_error, re.escape(str(message()))):
check_fn(False, message)
# Test incorrect `cond` arg type
with self.assertRaisesRegex(TypeError, 'cond must be a bool'):
check_fn('wrong type')
with self.assertRaisesRegex(TypeError, 'cond must be a bool'):
check_fn(torch.tensor(True))
# FIXME: move to indexing test suite
def test_index_add(self):
for device in get_all_device_types():
for dest_contig, src_contig, index_contig in product([True, False], repeat=3):
for other_sizes in ((), (4, 5)):
for dtype in [torch.int, torch.long]:
num_copy, num_dest = 3, 3
dest = torch.randn(num_dest, *other_sizes, device=device)
if not dest_contig:
dest = make_tensor(dest.shape, device=device, dtype=dest.dtype, noncontiguous=True)
src = torch.randn(num_copy, *other_sizes, device=device)
if not src_contig:
src = noncontiguous_like(src)
idx = torch.randperm(num_dest, dtype=dtype, device=device).narrow(0, 0, num_copy)
if not index_contig:
idx = noncontiguous_like(idx)
# index_add_ without alpha argument
dest2 = dest.clone()
dest.index_add_(0, idx, src)
for i in range(idx.size(0)):
dest2[idx[i]] += src[i]
self.assertEqual(dest, dest2)
# index_add_ with alpha argument
dest2 = dest.clone()
dest.index_add_(0, idx, src, alpha=2)
for i in range(idx.size(0)):
dest2[idx[i]] += src[i] * 2
self.assertEqual(dest, dest2)
# FIXME: resolve comment below and move this to indexing test suite
# add coverage for issue with atomic add that appeared only for
# specific dtypes on cuda:
# https://github.com/pytorch/pytorch/issues/29153
def test_index_add_all_dtypes(self):
for device in get_all_device_types():
for dtype in get_all_math_dtypes(device):
for idx_dtype in [torch.int, torch.long]:
size = [5, 5]
if dtype.is_floating_point or dtype.is_complex:
tensor = torch.rand(size, dtype=dtype, device=device)
elif dtype.is_signed:
tensor = torch.randint(-5, 15, size, dtype=dtype, device=device)
else:
tensor = torch.randint(0, 10, size, dtype=dtype, device=device)
# index_add calls atomicAdd on cuda.
zeros = torch.zeros(size, dtype=dtype, device=device)
added = zeros.index_add(0, torch.arange(0, size[0], dtype=idx_dtype, device=device), tensor)
self.assertEqual(added, tensor)
added = zeros.index_add(0, torch.arange(0, size[0], dtype=idx_dtype, device=device), tensor, alpha=-1)
self.assertEqual(added, -tensor)
@unittest.mock.patch.object(torch._dynamo.config, "suppress_errors", False)
@set_default_dtype(torch.double)
def test_index_add_correctness(self):
# Check whether index_add can get correct result when
# alpha is 1, and dtype of index is torch.long,
# i.e., using scatter_add
def helper(dim, dtype, device, size_result, size_source):
tensor = torch.zeros(size_result, dtype=dtype, device=device)
index = torch.randint(0, size_result[dim], (size_source[dim],),
dtype=torch.long, device=device)
if dtype.is_floating_point or dtype.is_complex:
source = torch.rand(size_source, dtype=dtype, device=device)
elif dtype.is_signed:
source = torch.randint(-2, 5, size_source, dtype=dtype, device=device)
else:
source = torch.randint(0, 5, size_source, dtype=dtype, device=device)
ref_out = tensor.index_add(dim, index, source, alpha=2.) / 2.
ref_out = ref_out.to(dtype=dtype)
out = tensor.index_add(dim, index, source)
if device == 'cuda':
self.assertEqual(out, ref_out, atol=1e-2, rtol=1e-2)
else:
# scatter_add uses fp32 as accumulate type, while index_add doesn't.
self.assertEqual(out, ref_out.to(dtype=dtype), atol=1e-2, rtol=1e-2)
for dim in [-1, -2, -3]:
for dtype in all_types_and_complex_and(torch.half, torch.bfloat16):
for device in get_all_device_types():
for size in [(2, 512, 256), (5, 256, 256)]:
helper(dim, dtype, device, size, size)
# Check bound
result = torch.zeros(1, 512, 256, dtype=dtype)
source = torch.ones(1, 512, 256, dtype=dtype)
index = torch.ones(257).to(dtype=torch.long)
self.assertRaises(RuntimeError, lambda: result.index_add_(dim, index, source))
index = (torch.ones(256) * 257).to(dtype=torch.long)
self.assertRaises(RuntimeError, lambda: result.index_add_(dim, index, source))
def test_index_add_cornercase(self):
for device in get_all_device_types():
dest = torch.randn((), device=device)
index = torch.tensor([0], device=device)
source = torch.randn(1, 1, 1, device=device)
with self.assertRaisesRegex(
RuntimeError,
r"source tensor shape must match self tensor shape, excluding the specified dimension",
):
dest.index_add(0, index, source)
def test_linspace_logspace(self):
# Ensure the output does not require grad regardless of inputs requiring guard or not.
# The output of factory functions should not be part of any computational graph.
start = 0.0
end = 3.0
for step in [0, 1, 2]:
self.assertFalse(
torch.linspace(
torch.tensor(start, requires_grad=True),
torch.tensor(end, requires_grad=True), step
).requires_grad
)
self.assertFalse(torch.linspace(torch.tensor(start, requires_grad=True), end, step).requires_grad)
self.assertFalse(torch.linspace(start, torch.tensor(end, requires_grad=True), step).requires_grad)
self.assertFalse(
torch.logspace(
torch.tensor(start, requires_grad=True),
torch.tensor(end, requires_grad=True), step
).requires_grad
)
self.assertFalse(torch.logspace(torch.tensor(start, requires_grad=True), end, step).requires_grad)
self.assertFalse(torch.logspace(start, torch.tensor(end, requires_grad=True), step).requires_grad)
# FIXME: move to shape ops test suite
def test_unflatten(self):
# test args: tensor, int, sizes
self.assertEqual(torch.tensor([]).unflatten(0, (0, 1)), torch.empty(0, 1))
self.assertEqual(torch.tensor([1]).unflatten(0, (1, 1)), torch.tensor([[1]]))
self.assertEqual(torch.tensor([1, 2, 3, 4]).unflatten(0, (2, 2)), torch.tensor([[1, 2], [3, 4]]))
self.assertEqual(torch.tensor([1, 2, 3, 4]).unflatten(0, [2, 2]), torch.tensor([[1, 2], [3, 4]]))
self.assertEqual(torch.tensor([1, 2, 3, 4]).unflatten(0, torch.Size([2, 2])), torch.tensor([[1, 2], [3, 4]]))
self.assertEqual(torch.ones(2, 10).unflatten(1, (5, 2)), torch.ones(2, 5, 2))
self.assertEqual(torch.tensor([1, 2, 3, 4]).unflatten(0, (-1, 2)),
torch.tensor([[1, 2], [3, 4]]))
self.assertEqual(torch.ones(2, 10).unflatten(1, (5, -1)),
torch.ones(2, 5, 2))
self.assertEqual(torch.ones(2, 10).unflatten(1, (-1,)),
torch.ones(2, 10))
self.assertEqual(torch.ones(2, 3 * 4 * 5 * 6).unflatten(1, (3, 4, -1, 6)),
torch.ones(2, 3, 4, 5, 6))
self.assertEqual(torch.ones(2, 0, 2).unflatten(1, (3, -1, 4, 5)),
torch.ones(2, 3, 0, 4, 5, 2))
# test invalid args: tensor, str, sizes
with self.assertRaisesRegex(TypeError, r"unflatten\(\): argument 'dim' \(position 1\) must be int, not str"):
torch.tensor([1]).unflatten('A', (1, 1))
# test invalid args: tensor, str, namedshape
with self.assertRaisesRegex(RuntimeError, r"Name 'A' not found in Tensor\[None\]."):
torch.ones(4).unflatten('A', (('A', 2), ('B', 2)))
# test other invalid arguments
with self.assertRaisesRegex(RuntimeError, r"sizes must be non-empty"):
torch.tensor([1]).unflatten(0, [])
with self.assertRaisesRegex(RuntimeError, r"Provided sizes \[2, 2\] don't multiply up to the size of dim 0 \(1\)"):
torch.tensor([1]).unflatten(0, [2, 2])
with self.assertRaisesRegex(RuntimeError, r".*Dimension specified as 0 but tensor has no dimensions.*"):
torch.tensor(1).unflatten(0, [0])
with self.assertRaisesRegex(RuntimeError, r"only one dimension can be inferred"):
torch.randn(5, 10).unflatten(1, (-1, -1))
with self.assertRaisesRegex(RuntimeError,
r"Provided sizes \[-1, 4\] don't multiply up to the size of dim 1 \(10\)"):
torch.randn(5, 10).unflatten(1, (-1, 4))
with self.assertRaisesRegex(RuntimeError,
r"the unspecified dimension size -1 can be any value and is ambiguous"):
torch.randn(2, 0).unflatten(1, (2, -1, 0))
# Test that warnings generated from C++ are translated to the correct type
def test_warn_types(self):
test_cases = [
# function, warning type, message
(torch._C._warn, UserWarning, r"Test message for TORCH_WARN"),
(torch._C._warn_deprecation, DeprecationWarning, r"Test message for TORCH_WARN_DEPRECATION"),
]
for fn, warning_type, message in test_cases:
with warnings.catch_warnings(record=True) as w:
warnings.resetwarnings()
warnings.filterwarnings('always', category=warning_type)
fn()
self.assertEqual(len(w), 1, msg=f'{warning_type} not raised')
warning = w[0].message
self.assertTrue(isinstance(warning, warning_type), msg=f'{warning_type} not raised')
self.assertTrue(re.search(
message,
str(warning)))
def test_structseq_repr(self):
a = torch.arange(250).reshape(5, 5, 10)
expected = """
torch.return_types.max(
values=tensor([[ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49],
[ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99],
[140, 141, 142, 143, 144, 145, 146, 147, 148, 149],
[190, 191, 192, 193, 194, 195, 196, 197, 198, 199],
[240, 241, 242, 243, 244, 245, 246, 247, 248, 249]]),
indices=tensor([[4, 4, 4, 4, 4, 4, 4, 4, 4, 4],
[4, 4, 4, 4, 4, 4, 4, 4, 4, 4],
[4, 4, 4, 4, 4, 4, 4, 4, 4, 4],
[4, 4, 4, 4, 4, 4, 4, 4, 4, 4],
[4, 4, 4, 4, 4, 4, 4, 4, 4, 4]]))"""
self.assertEqual(repr(a.max(1)), textwrap.dedent(expected).strip())
def test_is_same_size(self):
t1 = torch.empty(3, 4, 9, 10)
t2 = torch.empty(3, 4)
t3 = torch.empty(1, 9, 3, 3)
t4 = torch.empty(3, 4, 9, 10)
self.assertFalse(t1.is_same_size(t2))
self.assertFalse(t1.is_same_size(t3))
self.assertTrue(t1.is_same_size(t4))
nt1 = torch.nested.nested_tensor([torch.ones(2, 4), torch.ones(3, 4), torch.ones(5, 4)])
nt2 = torch.nested.nested_tensor([torch.ones(2, 4), torch.ones(2, 4), torch.ones(2, 4)])
nt3 = torch.nested.nested_tensor([torch.ones(2, 4, 5), torch.ones(2, 6, 5)])
nt4 = torch.nested.nested_tensor([torch.ones(2, 4), torch.ones(3, 4), torch.ones(5, 4)])
self.assertFalse(nt1.is_same_size(nt2))
self.assertFalse(nt1.is_same_size(nt3))
self.assertTrue(nt1.is_same_size(nt4))
with self.assertRaisesRegex(RuntimeError, "Expected both self and other to be nested tensors."):
t1.is_same_size(nt1)
with self.assertRaisesRegex(RuntimeError, "Expected both self and other to be nested tensors."):
nt1.is_same_size(t1)
def test_tensor_set(self):
t1 = torch.tensor([])
t2 = torch.empty(3, 4, 9, 10).uniform_()
t1.set_(t2)
self.assertEqual(t1.storage()._cdata, t2.storage()._cdata)
size = torch.Size([9, 3, 4, 10])
t1.set_(t2.storage(), 0, size)
self.assertEqual(t1.size(), size)
t1.set_(t2.storage(), 0, tuple(size))
self.assertEqual(t1.size(), size)
self.assertEqual(t1.stride(), (120, 40, 10, 1))
stride = (10, 360, 90, 1)
t1.set_(t2.storage(), 0, size, stride)
self.assertEqual(t1.stride(), stride)
t1.set_(t2.storage(), 0, size=size, stride=stride)
self.assertEqual(t1.size(), size)
self.assertEqual(t1.stride(), stride)
# test argument names
t1 = torch.tensor([])
# 1. case when source is tensor
t1.set_(source=t2)
self.assertEqual(t1.storage()._cdata, t2.storage()._cdata)
# 2. case when source is storage
t1.set_(source=t2.storage())
self.assertEqual(t1.storage()._cdata, t2.storage()._cdata)
# 3. case when source is storage, and other args also specified
t1.set_(source=t2.storage(), storage_offset=0, size=size, stride=stride)
self.assertEqual(t1.size(), size)
self.assertEqual(t1.stride(), stride)
t1 = torch.tensor([True, True], dtype=torch.bool)
t2 = torch.tensor([False, False], dtype=torch.bool)
t1.set_(t2)
self.assertEqual(t1.storage()._cdata, t2.storage()._cdata)
def test_tensor_set_errors(self):
f_cpu = torch.randn((2, 3), dtype=torch.float32)
d_cpu = torch.randn((2, 3), dtype=torch.float64)
storage_offset = 0x41414141
with self.assertRaisesRegex(RuntimeError, "out of bounds for storage of size"):
t = torch.randn(1)
t.set_(t.untyped_storage(), storage_offset, t.size())
# if size changes, set_ will resize the storage inplace
t = torch.randn(1)
size = torch.Size([2, 3])
t.set_(t.untyped_storage(), storage_offset, size)
self.assertEqual(t.storage_offset(), storage_offset)
self.assertEqual(t.untyped_storage().nbytes(), (storage_offset + size[0] * size[1]) * 4)
# change dtype
self.assertRaises(RuntimeError, lambda: f_cpu.set_(d_cpu.storage()))
self.assertRaises(RuntimeError,
lambda: f_cpu.set_(d_cpu.storage(), 0, d_cpu.size(), d_cpu.stride()))
self.assertRaises(RuntimeError, lambda: f_cpu.set_(d_cpu))
# change device
if torch.cuda.is_available():
f_cuda = torch.randn((2, 3), dtype=torch.float32, device='cuda')
# cpu -> cuda
self.assertRaises(RuntimeError, lambda: f_cpu.set_(f_cuda.storage()))
self.assertRaises(RuntimeError,
lambda: f_cpu.set_(f_cuda.storage(), 0, f_cuda.size(), f_cuda.stride()))
self.assertRaises(RuntimeError, lambda: f_cpu.set_(f_cuda))
# cuda -> cpu
self.assertRaises(RuntimeError, lambda: f_cuda.set_(f_cpu.storage()))
self.assertRaises(RuntimeError,
lambda: f_cuda.set_(f_cpu.storage(), 0, f_cpu.size(), f_cpu.stride()))
self.assertRaises(RuntimeError, lambda: f_cuda.set_(f_cpu))
# FIXME: move this test test_testing.py (along with allclose testing)
# NOTE: test_equal will be deprecated in favor of torch.testing.assert_close
# once torch.testing is out of beta
def test_equal(self):
for device in ["cpu", "cuda"]:
if device == "cuda" and not torch.cuda.is_available():
continue
# Contiguous, 1D
t1 = torch.tensor((3., 4., 9., 10.), device=device)
t2 = t1.contiguous()
t3 = torch.tensor((1., 9., 3., 10.), device=device)
t4 = torch.tensor((3., 4., 9.), device=device)
t5 = torch.tensor([], device=device)
self.assertTrue(t1.equal(t2))
self.assertFalse(t1.equal(t3))
self.assertFalse(t1.equal(t4))
self.assertFalse(t1.equal(t5))
self.assertTrue(torch.equal(t1, t2))
self.assertFalse(torch.equal(t1, t3))
self.assertFalse(torch.equal(t1, t4))
self.assertFalse(torch.equal(t1, t5))
# Non contiguous, 2D
s = torch.tensor(((1, 2, 3, 4), (5, 6, 7, 8)), device=device)
s1 = s[:, 1:3]
s2 = s1.clone()
s3 = torch.tensor(((2, 3), (6, 7)), device=device)
s4 = torch.tensor(((0, 0), (0, 0)), device=device)
self.assertFalse(s1.is_contiguous())
self.assertTrue(s1.equal(s2))
self.assertTrue(s1.equal(s3))
self.assertFalse(s1.equal(s4))
self.assertTrue(torch.equal(s1, s2))
self.assertTrue(torch.equal(s1, s3))
self.assertFalse(torch.equal(s1, s4))
# Different dtypes
x = torch.tensor((1, 2, 3), dtype=torch.float, device=device)
y = torch.tensor((1, 2, 3), dtype=torch.int, device=device)
z = torch.tensor((1, -1), dtype=torch.int, device=device)
self.assertTrue(torch.equal(x, y))
self.assertFalse(torch.equal(z, x))
# Fast path test: tensor flags, like neg and conj
neg_0 = torch.tensor((1, 2, 3), dtype=torch.float, device=device)
neg_1 = neg_0._neg_view()
self.assertTrue(neg_1.is_neg())
self.assertEqual(neg_0.data_ptr(), neg_1.data_ptr())
self.assertEqual(neg_0.storage_offset(), neg_1.storage_offset())
self.assertEqual(neg_0.stride(), neg_1.stride())
self.assertEqual(neg_0.size(), neg_1.size())
self.assertFalse(torch.equal(neg_0, neg_1))
# FIXME: Disable the following check due to the inductor failure
# See https://github.com/pytorch/pytorch/issues/100340 and
# https://github.com/pytorch/pytorch/issues/98175
if not TEST_WITH_TORCHINDUCTOR:
self.assertTrue(torch.equal(neg_0, neg_1._neg_view()))
conj_0 = torch.tensor([1.0 + 2.0j, 2.0 + 1.0j], device=device)
conj_1 = conj_0.conj()
self.assertTrue(conj_1.is_conj())
self.assertEqual(conj_0.data_ptr(), conj_1.data_ptr())
self.assertEqual(conj_0.storage_offset(), conj_1.storage_offset())
self.assertEqual(conj_0.stride(), conj_1.stride())
self.assertEqual(conj_0.size(), conj_1.size())
self.assertFalse(torch.equal(conj_0, conj_1))
# FIXME: Disable the following check due to the inductor failure
# See https://github.com/pytorch/pytorch/issues/100340 and
# https://github.com/pytorch/pytorch/issues/98175
if not TEST_WITH_TORCHINDUCTOR:
self.assertTrue(torch.equal(conj_0, conj_1.conj()))
# Fast path test: two tensors share the same storage, but different dtype
s_0 = torch.rand((2, 3), dtype=torch.float, device=device)
s_1 = s_0.view(dtype=torch.int32)
self.assertEqual(s_0.data_ptr(), s_1.data_ptr())
self.assertEqual(s_0.storage_offset(), s_1.storage_offset())
self.assertEqual(s_0.stride(), s_1.stride())
self.assertEqual(s_0.size(), s_1.size())
self.assertFalse(torch.equal(s_0, s_1))
# Fast path test: two tensors share the same storage, but different strides
t_0 = torch.rand((2, 3), dtype=torch.float, device=device)
t_1 = t_0.t()
self.assertEqual(t_0.data_ptr(), t_1.data_ptr())
self.assertEqual(t_0.storage_offset(), t_1.storage_offset())
self.assertNotEqual(t_0.stride(), t_1.stride())
self.assertNotEqual(t_0.size(), t_1.size())
self.assertFalse(torch.equal(t_0, t_1))
# Fast path: tensor containing `nan` is not equal to self
for dtype in floating_and_complex_types():
t = torch.tensor([1., float('nan')], dtype=dtype)
self.assertFalse(torch.equal(t, t))
def test_element_size(self):
byte = torch.ByteStorage().element_size()
char = torch.CharStorage().element_size()
short = torch.ShortStorage().element_size()
int = torch.IntStorage().element_size()
long = torch.LongStorage().element_size()
float = torch.FloatStorage().element_size()
double = torch.DoubleStorage().element_size()
bool = torch.BoolStorage().element_size()
bfloat16 = torch.BFloat16Storage().element_size()
complexfloat = torch.ComplexFloatStorage().element_size()
complexdouble = torch.ComplexDoubleStorage().element_size()
self.assertEqual(byte, torch.ByteTensor().element_size())
self.assertEqual(byte, torch.ByteTensor().itemsize)
self.assertEqual(char, torch.CharTensor().element_size())
self.assertEqual(char, torch.CharTensor().itemsize)
self.assertEqual(short, torch.ShortTensor().element_size())
self.assertEqual(short, torch.ShortTensor().itemsize)
self.assertEqual(int, torch.IntTensor().element_size())
self.assertEqual(int, torch.IntTensor().itemsize)
self.assertEqual(long, torch.LongTensor().element_size())
self.assertEqual(long, torch.LongTensor().itemsize)
self.assertEqual(float, torch.FloatTensor().element_size())
self.assertEqual(float, torch.FloatTensor().itemsize)
self.assertEqual(double, torch.DoubleTensor().element_size())
self.assertEqual(double, torch.DoubleTensor().itemsize)
self.assertEqual(bool, torch.BoolTensor().element_size())
self.assertEqual(bool, torch.BoolTensor().itemsize)
self.assertEqual(bfloat16, torch.tensor([], dtype=torch.bfloat16).element_size())
self.assertEqual(bfloat16, torch.tensor([], dtype=torch.bfloat16).itemsize)
self.assertEqual(complexfloat, torch.tensor([], dtype=torch.complex64).element_size())
self.assertEqual(complexfloat, torch.tensor([], dtype=torch.complex64).itemsize)
self.assertEqual(complexdouble, torch.tensor([], dtype=torch.complex128).element_size())
self.assertEqual(complexdouble, torch.tensor([], dtype=torch.complex128).itemsize)
self.assertGreater(byte, 0)
self.assertGreater(char, 0)
self.assertGreater(short, 0)
self.assertGreater(int, 0)
self.assertGreater(long, 0)
self.assertGreater(float, 0)
self.assertGreater(double, 0)
self.assertGreater(bool, 0)
self.assertGreater(bfloat16, 0)
self.assertGreater(complexfloat, 0)
self.assertGreater(complexdouble, 0)
# These tests are portable, not necessarily strict for your system.
self.assertEqual(byte, 1)
self.assertEqual(char, 1)
self.assertEqual(bool, 1)
self.assertGreaterEqual(short, 2)
self.assertGreaterEqual(int, 2)
self.assertGreaterEqual(int, short)
self.assertGreaterEqual(long, 4)
self.assertGreaterEqual(long, int)
self.assertGreaterEqual(double, float)
def test_permute(self):
orig = [1, 2, 3, 4, 5, 6, 7]
perm = torch.randperm(7).tolist()
x = torch.empty(*orig).fill_(0)
new = [i - 1 for i in x.permute(*perm).size()]
self.assertEqual(perm, new)
self.assertEqual(x.size(), orig)
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_reversed(self):
val = torch.arange(0, 10)
self.assertEqual(reversed(val), torch.arange(9, -1, -1))
val = torch.arange(1, 10).view(3, 3)
self.assertEqual(reversed(val), torch.tensor([[7, 8, 9], [4, 5, 6], [1, 2, 3]]))
val = torch.tensor(42)
self.assertEqual(reversed(val), torch.tensor(42))
def test_contains(self):
x = torch.arange(0, 10)
self.assertEqual(4 in x, True)
self.assertEqual(12 in x, False)
x = torch.arange(1, 10).view(3, 3)
val = torch.arange(1, 4)
self.assertEqual(val in x, True)
val += 10
self.assertEqual(val in x, False)
self.assertRaisesRegex(
RuntimeError,
f"Tensor.__contains__ only supports Tensor or scalar, but you passed in a {str}.",
lambda: "foo" in x)
self.assertRaisesRegex(
RuntimeError,
f"Tensor.__contains__ only supports Tensor or scalar, but you passed in a {type([1, 2])}.",
lambda: [1, 2] in x)
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_deepcopy_parameter(self):
from copy import deepcopy
l = torch.nn.Linear(10, 1)
s = l.state_dict(keep_vars=True)
self.assertEqual(torch.nn.Parameter, type(s['weight']))
self.assertEqual(torch.nn.Parameter, type(s['bias']))
s2 = deepcopy(s)
self.assertEqual(torch.nn.Parameter, type(s2['weight']))
self.assertEqual(torch.nn.Parameter, type(s2['bias']))
def test_pickle(self):
import pickle
a = torch.randn(5, 5)
serialized = pickle.dumps(a)
b = pickle.loads(serialized)
self.assertEqual(a, b)
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_pickle_parameter(self):
import pickle
a = torch.nn.Parameter(torch.randn(5, 5))
serialized = pickle.dumps(a)
b = pickle.loads(serialized)
self.assertTrue(isinstance(b, torch.nn.Parameter))
self.assertEqual(a.requires_grad, b.requires_grad)
self.assertEqual(a, b)
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_pickle_parameter_no_requires_grad(self):
import pickle
a = torch.nn.Parameter(torch.randn(5, 5), requires_grad=False)
serialized = pickle.dumps(a)
b = pickle.loads(serialized)
self.assertTrue(isinstance(b, torch.nn.Parameter))
self.assertEqual(a.requires_grad, b.requires_grad)
self.assertEqual(a, b)
def test_pickle_dtype(self):
t = torch.float32
serialized = pickle.dumps(t)
b = pickle.loads(serialized)
self.assertTrue(isinstance(b, torch.dtype))
self.assertEqual(id(b), id(t))
def test_pickle_size(self):
a = torch.rand(10).size()
serialized = pickle.dumps(a)
b = pickle.loads(serialized)
self.assertTrue(isinstance(b, torch.Size))
self.assertEqual(a, b)
def test_pickle_function(self):
# https://github.com/pytorch/pytorch/issues/37703
a = torch.tanh
serialized = pickle.dumps(a)
b = pickle.loads(serialized)
self.assertEqual(a, b)
def test_generator_cpu(self):
# test default generators are equal
self.assertEqual(torch.default_generator, torch.default_generator)
# tests Generator API
# manual_seed, seed, initial_seed, get_state, set_state
g1 = torch.Generator()
g2 = torch.Generator()
g1.manual_seed(12345)
g2.manual_seed(12345)
self.assertEqual(g1.initial_seed(), g2.initial_seed())
g1.seed()
g2.seed()
self.assertNotEqual(g1.initial_seed(), g2.initial_seed())
g1 = torch.Generator()
g2_state = g2.get_state()
g2_randn = torch.randn(1, generator=g2)
g1.set_state(g2_state)
g1_randn = torch.randn(1, generator=g1)
self.assertEqual(g1_randn, g2_randn)
default_state = torch.default_generator.get_state()
q = torch.empty(100)
g1_normal = q.normal_()
g2 = torch.Generator()
g2.set_state(default_state)
g2_normal = q.normal_(generator=g2)
self.assertEqual(g1_normal, g2_normal)
def test_invalid_generator_raises(self):
self.assertRaises(RuntimeError, lambda: torch.Generator('opengl'))
def test_pickle_generator(self) -> None:
devices = ['cpu']
if torch.cuda.is_available():
devices += ['cuda']
for device in devices:
with self.subTest(device=device):
generator = torch.Generator(device=device).manual_seed(12345)
if device != "cpu":
generator.set_offset(100)
torch.randn((100, 100), generator=generator, device=device) # progress the RNG state
reserialized: torch.Generator = pickle.loads(pickle.dumps(generator))
self.assertEqual(generator.device, reserialized.device)
self.assertEqual(generator.initial_seed(), reserialized.initial_seed())
if device != "cpu":
self.assertEqual(generator.get_offset(), reserialized.get_offset())
torch.testing.assert_close(generator.get_state(), reserialized.get_state())
def _sobol_reference_samples(self, scramble: bool) -> torch.Tensor:
if not scramble:
# theoretical values from Joe Kuo 2010
return torch.tensor(
[
[0., 0.],
[0.5, 0.5],
[0.75, 0.25],
[0.25, 0.75],
[0.375, 0.375],
[0.875, 0.875],
[0.625, 0.125],
[0.125, 0.625],
],
)
else:
# theoretical values unknown: convergence properties checked
return torch.tensor(
[
[0.50860737, 0.29320504],
[0.07116939, 0.89594537],
[0.49354145, 0.11524881],
[0.93097717, 0.70244044],
[0.87266153, 0.23887917],
[0.31021884, 0.57600391],
[0.13687253, 0.42054182],
[0.69931293, 0.77336788],
],
)
def test_sobolengine_bounds(self, scramble: bool = False):
engine = torch.quasirandom.SobolEngine(100, scramble=scramble, seed=123456)
sample = engine.draw(512)
self.assertTrue(torch.all(sample >= 0))
self.assertTrue(torch.all(sample <= 1))
def test_sobolengine_bounds_scrambled(self):
self.test_sobolengine_bounds(scramble=True)
def test_sobolengine_draw(self, scramble: bool = False):
ref_sample = self._sobol_reference_samples(scramble=scramble)
engine = torch.quasirandom.SobolEngine(2, scramble=scramble, seed=123456)
sample = engine.draw(n=len(ref_sample))
self.assertEqual(sample, ref_sample)
self.assertEqual(engine.num_generated, len(ref_sample))
def test_sobolengine_draw_scrambled(self):
self.test_sobolengine_draw(scramble=True)
def test_sobolengine_first_point(self):
for dtype in (torch.float, torch.double):
engine = torch.quasirandom.SobolEngine(2, scramble=False)
sample = engine.draw(1, dtype=dtype)
self.assertTrue(torch.all(sample == 0))
self.assertEqual(sample.dtype, dtype)
for dtype in (torch.float, torch.double):
engine = torch.quasirandom.SobolEngine(2, scramble=True, seed=123456)
sample = engine.draw(1, dtype=dtype)
self.assertTrue(torch.all(sample != 0))
self.assertEqual(sample.dtype, dtype)
def test_sobolengine_continuing(self, scramble: bool = False):
ref_sample = self._sobol_reference_samples(scramble=scramble)
engine = torch.quasirandom.SobolEngine(2, scramble=scramble, seed=123456)
n_half = len(ref_sample) // 2
_ = engine.draw(n=n_half)
sample = engine.draw(n=n_half)
torch.testing.assert_close(sample, ref_sample[n_half:])
def test_sobolengine_continuing_scrambled(self):
self.test_sobolengine_continuing(scramble=True)
def test_sobolengine_reset(self, scramble: bool = False):
ref_sample = self._sobol_reference_samples(scramble=scramble)
engine = torch.quasirandom.SobolEngine(2, scramble=scramble, seed=123456)
_ = engine.draw(n=len(ref_sample) // 2)
engine.reset()
self.assertEqual(engine.num_generated, 0)
sample = engine.draw(n=len(ref_sample))
torch.testing.assert_close(sample, ref_sample)
def test_sobolengine_reset_scrambled(self):
self.test_sobolengine_reset(scramble=True)
def test_sobolengine_fast_forward(self, scramble: bool = False):
ref_sample = self._sobol_reference_samples(scramble=scramble)
engine = torch.quasirandom.SobolEngine(2, scramble=scramble, seed=123456)
engine.fast_forward(4)
sample = engine.draw(n=4)
torch.testing.assert_close(sample, ref_sample[4:])
# alternate fast forwarding with sampling
engine.reset()
even_draws = []
for i in range(8):
if i % 2 == 0:
even_draws.append(engine.draw())
else:
engine.fast_forward(1)
torch.testing.assert_close(
ref_sample[[i for i in range(8) if i % 2 == 0]],
torch.from_numpy(np.concatenate(even_draws)),
)
def test_sobolengine_fast_forward_scrambled(self):
self.test_sobolengine_fast_forward(scramble=True)
def test_sobolengine_default_dtype(self):
engine = torch.quasirandom.SobolEngine(dimension=3, scramble=True, seed=123456)
# Check that default dtype is correctly handled
self.assertEqual(engine.draw(n=5).dtype, torch.float32)
with set_default_dtype(torch.float64):
engine = torch.quasirandom.SobolEngine(dimension=3, scramble=True, seed=123456)
# Check that default dtype is correctly handled (when set to float64)
self.assertEqual(engine.draw(n=5).dtype, torch.float64)
# Check that explicitly passed dtype is adhered to
self.assertEqual(engine.draw(n=5, dtype=torch.float32).dtype, torch.float32)
# Reinitialize the engine and check that first draw dtype is correctly handled
engine = torch.quasirandom.SobolEngine(dimension=3, scramble=True, seed=123456)
self.assertEqual(engine.draw(n=5, dtype=torch.float32).dtype, torch.float32)
@skipIfTorchDynamo("np.float64 restored as float32 after graph break.")
def test_sobolengine_distribution(self, scramble=False):
d = 50
engine = torch.quasirandom.SobolEngine(d, scramble=scramble, seed=123456)
sample = engine.draw(1024)
torch.testing.assert_close(
torch.mean(sample, dim=0), torch.full((d,), 0.5), atol=2, rtol=2
)
torch.testing.assert_close(
np.percentile(sample, 25, axis=0).astype(np.float64), np.repeat(0.25, d), atol=2, rtol=2
)
torch.testing.assert_close(
np.percentile(sample, 75, axis=0).astype(np.float64), np.repeat(0.75, d), atol=2, rtol=2
)
@skipIfTorchDynamo("np.float64 restored as float32 after graph break.")
def test_sobolengine_distribution_scrambled(self):
self.test_sobolengine_distribution(scramble=True)
def test_sobolengine_draw_base2(self, scramble=False):
ref_sample = self._sobol_reference_samples(scramble=scramble)
engine = torch.quasirandom.SobolEngine(2, scramble=scramble, seed=123456)
sample = engine.draw_base2(2)
self.assertEqual(ref_sample[:4], sample)
# resampling still having N=2**n
sample = engine.draw_base2(2)
self.assertEqual(ref_sample[4:8], sample)
def test_sobolengine_draw_base2_scrambled(self):
self.test_sobolengine_draw_base2(scramble=True)
def test_sobolengine_raise(self):
maxdim = torch.quasirandom.SobolEngine.MAXDIM
with self.assertRaises(ValueError):
torch.quasirandom.SobolEngine(maxdim + 1)
def test_sobolengine_high_dim(self):
engine = torch.quasirandom.SobolEngine(1111, scramble=False, seed=123456)
samples1 = engine.draw()
vals1, counts1 = torch.unique(samples1, return_counts=True)
samples2 = engine.draw()
vals2, counts2 = torch.unique(samples2, return_counts=True)
self.assertEqual(vals1.item(), 0.0)
self.assertEqual(counts1.item(), 1111)
self.assertEqual(vals2.item(), 0.5)
self.assertEqual(counts1.item(), 1111)
def test_parsing_int64(self):
# accepts integer arguments
x = torch.cumsum(torch.ones(5, 5), 0)
self.assertEqual(x, torch.cumsum(torch.ones(5, 5), torch.tensor(0)))
# doesn't accept floating point variables
self.assertRaises(TypeError, lambda: torch.cumsum(torch.ones(5, 5), torch.tensor(0.)))
def test_parsing_double(self):
# accepts floating point and integer arguments
x = torch.randn(2, 3)
torch.isclose(x, x, 1, 1)
self.assertTrue(torch.isclose(x, x, 1, 1).all())
self.assertTrue(torch.isclose(x, x, 1.5, 1.).all())
# accepts floating point and integer tensors
self.assertTrue(torch.isclose(x, x, torch.tensor(1), torch.tensor(1)).all())
self.assertTrue(torch.isclose(x, x, torch.tensor(1.5), torch.tensor(1.)).all())
# doesn't accept variables with requires_grad
self.assertRaises(TypeError,
lambda: torch.isclose(x, x, torch.tensor(1.5), torch.tensor(1., requires_grad=True)).all())
def test_parsing_intlist(self):
# parse with integer variables
self.assertEqual(torch.Size([3, 4]), torch.ones((torch.tensor(3), torch.tensor(4))).shape)
self.assertEqual(torch.Size([3, 4]), torch.ones(torch.tensor(3), torch.tensor(4)).shape)
# parse with numpy integers
self.assertEqual(torch.Size([3, 4]), torch.ones((np.array(3), np.int64(4))).shape)
self.assertEqual(torch.Size([3, 4]), torch.ones(np.array(3), np.int64(4)).shape)
self.assertEqual(torch.Size([3, 4]), torch.ones((np.int64(3), np.array(4))).shape)
self.assertEqual(torch.Size([3, 4]), torch.ones(np.int64(3), np.array(4)).shape)
# fail parse with float variables
self.assertRaises(TypeError, lambda: torch.ones((torch.tensor(3.), torch.tensor(4))))
# fail parse with numpy floats
self.assertRaises(TypeError, lambda: torch.ones((3., torch.tensor(4))))
self.assertRaises(TypeError, lambda: torch.ones((np.array(3.), torch.tensor(4))))
# fail parse with > 1 element variables
self.assertRaises(TypeError, lambda: torch.ones(torch.tensor(3, 3)))
self.assertRaises(TypeError, lambda: torch.ones(torch.tensor(3, 3)))
self.assertRaises(TypeError, lambda: torch.ones(np.array(3, 3)))
self.assertRaises(TypeError, lambda: torch.ones(np.array(3, 3)))
# fail parse with additional positional args after intlist arg
self.assertRaisesRegex(TypeError,
"received an invalid combination of arguments",
lambda: torch.LongTensor((6, 0), 1, 1, 0))
self.assertRaisesRegex(TypeError,
"missing 1 required positional arguments",
lambda: torch.tensor().new_zeros((5, 5), 0))
# ensure ones() throws an error when extra positional (non-keyword) arguments are given.
self.assertRaises(TypeError, lambda: torch.ones((3, 3), torch.float32))
def test_from_buffer(self):
a = bytearray([1, 2, 3, 4])
self.assertEqual(torch.ByteStorage.from_buffer(a).tolist(), [1, 2, 3, 4])
shorts = torch.ShortStorage.from_buffer(a, 'big')
self.assertEqual(shorts.size(), 2)
self.assertEqual(shorts.tolist(), [258, 772])
ints = torch.IntStorage.from_buffer(a, 'little')
self.assertEqual(ints.size(), 1)
self.assertEqual(ints[0], 67305985)
f = bytearray([0x40, 0x10, 0x00, 0x00])
floats = torch.FloatStorage.from_buffer(f, 'big')
self.assertEqual(floats.size(), 1)
self.assertEqual(floats[0], 2.25)
f = bytearray([0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x10, 0x40])
bools = torch.BoolStorage.from_buffer(f, 'big')
self.assertEqual(bools.size(), 8)
self.assertEqual(bools.tolist(), [False, True, True, True, True, True, True, True])
self.assertEqual(bools.type(), 'torch.BoolStorage')
self.assertTrue(isinstance(bools, torch.BoolStorage))
f = bytearray(b'\x80\x02\x8a\nl\xfc\x9cF\xf9 j\xa8P\x19.\x80\x02M\xe9')
bools = torch.BoolStorage.from_buffer(f, 'big')
self.assertEqual(bools.size(), 19)
f = bytearray(b'\0x4A')
bools = torch.BoolStorage.from_buffer(f, 'big')
self.assertEqual(bools.size(), 4)
self.assertEqual(bools.tolist(), [False, True, True, True])
bytes = torch.ByteStorage.from_buffer(a)
self.assertEqual(bytes.nbytes(), 4)
self.assertEqual(bytes.tolist(), [1, 2, 3, 4])
self.assertTrue(isinstance(bytes, torch.ByteStorage))
def test_storage_error(self):
quantized_storages = [
torch.QInt32Storage,
torch.QInt8Storage,
torch.QUInt2x4Storage,
torch.QUInt4x2Storage,
torch.QUInt8Storage,
]
with self.assertRaisesRegex(RuntimeError, r"Only child classes of _LegacyStorage can be instantiated"):
torch.storage._LegacyStorage()
for storage_class in torch._storage_classes:
if storage_class in [torch.UntypedStorage, torch.TypedStorage]:
continue
device = 'cuda' if storage_class.__module__ == 'torch.cuda' else 'cpu'
dtype = storage_class.dtype
if device == 'cuda' and not torch.cuda.is_available():
continue
# Legacy <type>Storage constructor errors
with self.assertRaisesRegex(RuntimeError, r"'device' cannot be specified"):
storage_class(device='cpu')
with self.assertRaisesRegex(RuntimeError, r"'dtype' cannot be specified"):
storage_class(dtype=torch.float)
with self.assertRaisesRegex(TypeError, r"got an unexpected keyword"):
storage_class(sdlkjf=torch.float)
with self.assertRaisesRegex(RuntimeError, r"Too many positional arguments"):
storage_class(0, 0)
with self.assertRaisesRegex(TypeError, r"invalid data type"):
storage_class('string')
with self.assertRaisesRegex(TypeError, r"Argument type not recognized"):
storage_class(torch.tensor([]))
s = storage_class()
with self.assertRaisesRegex(RuntimeError, r"No positional arguments"):
storage_class(0, wrap_storage=s.untyped())
with self.assertRaisesRegex(TypeError, r"must be UntypedStorage"):
storage_class(wrap_storage=s)
if torch.cuda.is_available():
if storage_class in quantized_storages:
with self.assertRaisesRegex(RuntimeError, r"Cannot create CUDA storage with quantized dtype"):
s.cuda()
else:
if s.is_cuda:
s_other_device = s.cpu()
else:
s_other_device = s.cuda()
with self.assertRaisesRegex(RuntimeError, r"Device of 'wrap_storage' must be"):
storage_class(wrap_storage=s_other_device.untyped())
# TypedStorage constructor errors
with self.assertRaisesRegex(RuntimeError, r"No positional arguments"):
torch.TypedStorage(0, wrap_storage=s.untyped(), dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r"Argument 'dtype' must be specified"):
torch.TypedStorage(wrap_storage=s.untyped())
with self.assertRaisesRegex(TypeError, r"Argument 'dtype' must be torch.dtype"):
torch.TypedStorage(wrap_storage=s.untyped(), dtype=0)
with self.assertRaisesRegex(RuntimeError, r"Argument 'device' should not be specified"):
torch.TypedStorage(wrap_storage=s.untyped(), dtype=dtype, device=device)
with self.assertRaisesRegex(TypeError, r"Argument 'wrap_storage' must be UntypedStorage"):
torch.TypedStorage(wrap_storage=s, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r"Storage device not recognized"):
torch.TypedStorage(dtype=dtype, device='xla')
if torch.cuda.is_available():
if storage_class in quantized_storages:
with self.assertRaisesRegex(RuntimeError, r"Cannot create CUDA storage with quantized dtype"):
torch.TypedStorage(dtype=dtype, device='cuda')
with self.assertRaisesRegex(TypeError, r"Argument type not recognized"):
torch.TypedStorage(torch.tensor([]), dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, r"Too many positional arguments"):
torch.TypedStorage(0, 0, dtype=dtype, device=device)
if isinstance(s, torch.TypedStorage):
s_other = torch.TypedStorage([1, 2, 3, 4], device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'cannot set item'):
s.fill_(s_other)
def test_storage_error_no_attribute(self):
storage_classes = [
torch.cuda.ByteStorage,
torch.cuda.FloatStorage,
]
for storage_class in storage_classes:
with self.assertRaisesRegex(RuntimeError, r'Not available for CUDA storage'):
storage_class.from_buffer()
with self.assertRaisesRegex(RuntimeError, r'Not available for CUDA storage'):
storage_class._new_with_weak_ptr()
with self.assertRaisesRegex(RuntimeError, r'Not available for CUDA storage'):
storage_class._new_shared_filename(0, 0, 0)
def test_storage_casts(self):
storage = torch.IntStorage([-1, 0, 1, 2, 3, 4])
self.assertEqual(storage.size(), 6)
self.assertEqual(storage.tolist(), [-1, 0, 1, 2, 3, 4])
self.assertEqual(storage.type(), 'torch.IntStorage')
self.assertIs(storage.dtype, torch.int32)
floatStorage = storage.float()
self.assertEqual(floatStorage.size(), 6)
self.assertEqual(floatStorage.tolist(), [-1, 0, 1, 2, 3, 4])
self.assertEqual(floatStorage.type(), 'torch.FloatStorage')
self.assertEqual(floatStorage.int().tolist(), [-1, 0, 1, 2, 3, 4])
self.assertIs(floatStorage.dtype, torch.float32)
halfStorage = storage.half()
self.assertEqual(halfStorage.size(), 6)
self.assertEqual(halfStorage.tolist(), [-1, 0, 1, 2, 3, 4])
self.assertEqual(halfStorage.type(), 'torch.HalfStorage')
self.assertEqual(halfStorage.int().tolist(), [-1, 0, 1, 2, 3, 4])
self.assertIs(halfStorage.dtype, torch.float16)
bfloat16Storage = storage.bfloat16()
self.assertEqual(bfloat16Storage.size(), 6)
self.assertEqual(bfloat16Storage.tolist(), [-1, 0, 1, 2, 3, 4])
self.assertEqual(bfloat16Storage.type(), 'torch.BFloat16Storage')
self.assertEqual(bfloat16Storage.int().tolist(), [-1, 0, 1, 2, 3, 4])
self.assertIs(bfloat16Storage.dtype, torch.bfloat16)
longStorage = storage.long()
self.assertEqual(longStorage.size(), 6)
self.assertEqual(longStorage.tolist(), [-1, 0, 1, 2, 3, 4])
self.assertEqual(longStorage.type(), 'torch.LongStorage')
self.assertEqual(longStorage.int().tolist(), [-1, 0, 1, 2, 3, 4])
self.assertIs(longStorage.dtype, torch.int64)
shortStorage = storage.short()
self.assertEqual(shortStorage.size(), 6)
self.assertEqual(shortStorage.tolist(), [-1, 0, 1, 2, 3, 4])
self.assertEqual(shortStorage.type(), 'torch.ShortStorage')
self.assertEqual(shortStorage.int().tolist(), [-1, 0, 1, 2, 3, 4])
self.assertIs(shortStorage.dtype, torch.int16)
doubleStorage = storage.double()
self.assertEqual(doubleStorage.size(), 6)
self.assertEqual(doubleStorage.tolist(), [-1.0, 0.0, 1.0, 2.0, 3.0, 4.0])
self.assertEqual(doubleStorage.type(), 'torch.DoubleStorage')
self.assertEqual(doubleStorage.int().tolist(), [-1, 0, 1, 2, 3, 4])
self.assertIs(doubleStorage.dtype, torch.float64)
charStorage = storage.char()
self.assertEqual(charStorage.size(), 6)
self.assertEqual(charStorage.tolist(), [-1.0, 0.0, 1.0, 2.0, 3.0, 4.0])
self.assertEqual(charStorage.type(), 'torch.CharStorage')
self.assertEqual(charStorage.int().tolist(), [-1, 0, 1, 2, 3, 4])
self.assertIs(charStorage.dtype, torch.int8)
byteStorage = storage.byte()
self.assertEqual(byteStorage.size(), 6)
self.assertEqual(byteStorage.tolist(), [255, 0, 1, 2, 3, 4])
self.assertEqual(byteStorage.type(), 'torch.ByteStorage')
self.assertEqual(byteStorage.int().tolist(), [255, 0, 1, 2, 3, 4])
self.assertIs(byteStorage.dtype, torch.uint8)
boolStorage = storage.bool()
self.assertEqual(boolStorage.size(), 6)
self.assertEqual(boolStorage.tolist(), [True, False, True, True, True, True])
self.assertEqual(boolStorage.type(), 'torch.BoolStorage')
self.assertEqual(boolStorage.int().tolist(), [1, 0, 1, 1, 1, 1])
self.assertIs(boolStorage.dtype, torch.bool)
complexfloat_storage = torch.ComplexFloatStorage([-1, 0, 1 + 2j, 2.5j, 3.5, 4 - 2j])
self.assertEqual(complexfloat_storage.size(), 6)
self.assertEqual(complexfloat_storage.tolist(), [-1, 0, 1 + 2j, 2.5j, 3.5, 4 - 2j])
self.assertEqual(complexfloat_storage.type(), 'torch.ComplexFloatStorage')
self.assertIs(complexfloat_storage.dtype, torch.complex64)
complexdouble_storage = complexfloat_storage.complex_double()
self.assertEqual(complexdouble_storage.size(), 6)
self.assertEqual(complexdouble_storage.tolist(), [-1, 0, 1 + 2j, 2.5j, 3.5, 4 - 2j])
self.assertEqual(complexdouble_storage.type(), 'torch.ComplexDoubleStorage')
self.assertIs(complexdouble_storage.dtype, torch.complex128)
def test_storage_byteswap(self):
input = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
swapped_8bytes = [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]
swapped_4bytes = [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]
swapped_2bytes = [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]
swapped_1byte = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
storage = torch.storage.TypedStorage(input, dtype=torch.uint8)._untyped_storage
storage_f64 = storage.__copy__()
storage_f64.byteswap(torch.float64)
self.assertEqual(storage_f64.tolist(), swapped_8bytes)
storage_f32 = storage.__copy__()
storage_f32.byteswap(torch.float32)
self.assertEqual(storage_f32.tolist(), swapped_4bytes)
storage_f16 = storage.__copy__()
storage_f16.byteswap(torch.float16)
self.assertEqual(storage_f16.tolist(), swapped_2bytes)
storage_bf16 = storage.__copy__()
storage_bf16.byteswap(torch.bfloat16)
self.assertEqual(storage_bf16.tolist(), swapped_2bytes)
storage_i64 = storage.__copy__()
storage_i64.byteswap(torch.int64)
self.assertEqual(storage_i64.tolist(), swapped_8bytes)
storage_i32 = storage.__copy__()
storage_i32.byteswap(torch.int32)
self.assertEqual(storage_i32.tolist(), swapped_4bytes)
storage_i16 = storage.__copy__()
storage_i16.byteswap(torch.int16)
self.assertEqual(storage_i16.tolist(), swapped_2bytes)
storage_i8 = storage.__copy__()
storage_i8.byteswap(torch.int8)
self.assertEqual(storage_i8.tolist(), swapped_1byte)
storage_ui8 = storage.__copy__()
storage_ui8.byteswap(torch.uint8)
self.assertEqual(storage_ui8.tolist(), swapped_1byte)
storage_bool = storage.__copy__()
storage_bool.byteswap(torch.bool)
self.assertEqual(storage_bool.tolist(), swapped_1byte)
storage_c128 = storage.__copy__()
storage_c128.byteswap(torch.complex128)
self.assertEqual(storage_c128.tolist(), swapped_8bytes)
storage_c64 = storage.__copy__()
storage_c64.byteswap(torch.complex64)
self.assertEqual(storage_c64.tolist(), swapped_4bytes)
# Test that internal versions of functions related to TypedStorage do not
# produce a deprecation warning
def test_typed_storage_internal_no_warning(self):
s0 = torch.FloatStorage(10)
s0_untyped = s0.untyped()
t0 = torch.randn(10)
funcs = [
lambda: torch.FloatStorage(_internal=True),
lambda: torch.TypedStorage(
dtype=torch.float,
device='cpu',
_internal=True),
lambda: torch.TypedStorage(
wrap_storage=s0_untyped,
dtype=s0.dtype,
_internal=True),
lambda: torch.FloatStorage._dtype,
lambda: s0._resize_(20),
lambda: s0._size(),
lambda: s0._untyped_storage,
lambda: s0._is_shared(),
lambda: s0._share_memory_(),
lambda: s0._pickle_storage_type(),
lambda: s0._setitem(slice(0, s0._size()), 1),
lambda: s0._element_size(),
lambda: s0._deepcopy({}),
lambda: s0._data_ptr(),
lambda: s0._nbytes(),
lambda: t0._typed_storage(),
]
if torch.cuda.is_available():
s1 = torch.cuda.FloatStorage(10)
s1_untyped = s1.untyped()
t1 = torch.randn(10, device='cuda')
funcs += [
lambda: torch.cuda.FloatStorage(_internal=True),
lambda: torch.TypedStorage(
dtype=torch.float,
device='cuda',
_internal=True),
lambda: torch.TypedStorage(
wrap_storage=s1_untyped,
dtype=s1.dtype,
_internal=True),
lambda: torch.cuda.FloatStorage._dtype,
lambda: s1._resize_(20),
lambda: s1._size(),
lambda: s1._untyped_storage,
lambda: s1._is_shared(),
lambda: s1._share_memory_(),
lambda: s1._pickle_storage_type(),
lambda: s1._setitem(slice(0, s1._size()), 1),
lambda: s1._element_size(),
lambda: s1._deepcopy({}),
lambda: s1._data_ptr(),
lambda: s1._nbytes(),
lambda: t1._typed_storage(),
]
# Check that each of the TypedStorage internal function calls do not
# produce a deprecation warning
for f in funcs:
with warnings.catch_warnings():
warnings.filterwarnings('error', "TypedStorage is deprecated")
f()
# Test that public functions related to TypedStorage produce a deprecation
# warning
@skipIfTorchInductor("FIXME")
def test_typed_storage_deprecation_warning(self):
s0 = torch.FloatStorage(10)
funcs = [
lambda: torch.FloatStorage(),
lambda: torch.FloatStorage.dtype,
lambda: s0.fill_(0),
lambda: s0.is_cuda,
lambda: s0.untyped(),
lambda: len(s0),
lambda: s0[0],
]
if torch.cuda.is_available():
s1 = torch.cuda.FloatStorage(10)
funcs += [
lambda: torch.cuda.FloatStorage(),
lambda: torch.cuda.FloatStorage.dtype,
lambda: s1.fill_(0),
lambda: s1.is_cuda,
lambda: s1.untyped(),
lambda: len(s1),
lambda: s1[0],
]
# Check that each of the TypedStorage function calls produce a warning
# if warnings are reset between each
for f in funcs:
with AlwaysWarnTypedStorageRemoval(True):
with warnings.catch_warnings(record=True) as w:
warnings.resetwarnings()
f()
self.assertEqual(len(w), 1, msg=str([str(a) for a in w]))
warning = w[0].message
self.assertTrue(warning, DeprecationWarning)
self.assertTrue(re.search(
'^TypedStorage is deprecated',
str(warning)))
# Test that only the first warning is raised by default
torch.storage._reset_warn_typed_storage_removal()
with warnings.catch_warnings(record=True) as w:
warnings.resetwarnings()
torch.FloatStorage()
torch.randn(10).storage()
self.assertEqual(len(w), 1, msg=str([str(a) for a in w]))
warning = w[0].message
self.assertTrue(re.search(
'^TypedStorage is deprecated',
str(warning)))
# Check the line of code from the warning's stack
with open(w[0].filename, encoding="utf-8") as f:
code_line = f.readlines()[w[0].lineno - 1]
self.assertTrue(re.search(re.escape('torch.FloatStorage()'), code_line))
# Check that warnings are not emitted if it happened in the past
with warnings.catch_warnings(record=True) as w:
warnings.resetwarnings()
torch.FloatStorage()
torch.randn(10).storage()
self.assertEqual(len(w), 0, msg=str([str(a) for a in w]))
def test_from_file(self):
def assert_with_filename(filename):
size = 10000
s1 = torch.FloatStorage.from_file(filename, True, size)
t1 = torch.FloatTensor(s1).copy_(torch.randn(size))
self.assertEqual(s1.data_ptr(), torch.FloatTensor(s1).data_ptr())
# check mapping
s2 = torch.FloatStorage.from_file(filename, True, size)
t2 = torch.FloatTensor(s2)
self.assertEqual(t1, t2, atol=0, rtol=0)
# check changes to t1 from t2
rnum = random.uniform(-1, 1)
t1.fill_(rnum)
self.assertEqual(t1, t2, atol=0, rtol=0)
# check changes to t2 from t1
rnum = random.uniform(-1, 1)
t2.fill_(rnum)
self.assertEqual(t1, t2, atol=0, rtol=0)
# release the tensors
del s1, t1, s2, t2
with TemporaryFileName() as fname:
assert_with_filename(fname)
if IS_FILESYSTEM_UTF8_ENCODING:
with TemporaryDirectoryName(suffix='\u4e2d\u6587') as dname, TemporaryFileName(dir=dname) as fname:
assert_with_filename(fname)
def test_torch_from_file(self):
def assert_with_filename(filename):
size = 10000
s1 = torch.from_file(filename, True, size, dtype=torch.float)
t1 = torch.FloatTensor(s1).copy_(torch.randn(size))
# check mapping
s2 = torch.from_file(filename, True, size, dtype=torch.float)
t2 = torch.FloatTensor(s2)
self.assertEqual(t1, t2, atol=0, rtol=0)
# check changes to t1 from t2
rnum = random.uniform(-1, 1)
t1.fill_(rnum)
self.assertEqual(t1, t2, atol=0, rtol=0)
# check changes to t2 from t1
rnum = random.uniform(-1, 1)
t2.fill_(rnum)
self.assertEqual(t1, t2, atol=0, rtol=0)
# release the tensors
del s1, t1, s2, t2
with TemporaryFileName() as fname:
assert_with_filename(fname)
if IS_FILESYSTEM_UTF8_ENCODING:
with TemporaryDirectoryName(suffix='\u4e2d\u6587') as dname, TemporaryFileName(dir=dname) as fname:
assert_with_filename(fname)
def test_print(self):
default_type = torch.tensor([]).type()
for t in torch._tensor_classes:
if t == torch.HalfTensor:
continue # HalfTensor does not support fill
if t.is_sparse:
continue
if t.is_cuda and not torch.cuda.is_available():
continue
obj = t(100, 100).fill_(1)
obj.__repr__()
str(obj)
# test half tensor
obj = torch.rand(100, 100, device='cpu').half()
obj.__repr__()
str(obj)
for t in torch._storage_classes:
if t == torch.BFloat16Storage:
continue # Fix once fill is enabled for bfloat16
if t.is_cuda and not torch.cuda.is_available():
continue
if t == torch.BoolStorage or t == torch.cuda.BoolStorage:
obj = t(100).fill_(True)
else:
obj = t(100).fill_(1)
obj.__repr__()
str(obj)
# test complex tensor
# complex tensor print uses two formatters, one for real values
# and the other for imag values. this is consistent with numpy
x = torch.tensor([2.3 + 4j, 7 + 6j])
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([2.3000+4.j, 7.0000+6.j])''')
# test complex half tensor
x = torch.tensor([1.25 + 4j, -7. + 6j], dtype=torch.chalf)
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([ 1.2500+4.j, -7.0000+6.j], dtype=torch.complex32)''')
# test scientific notation for complex tensors
x = torch.tensor([1e28 + 2j , -1e-28j])
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([1.0000e+28+2.0000e+00j, -0.0000e+00-1.0000e-28j])''')
# test big integer
x = torch.tensor(2341234123412341)
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor(2341234123412341)''')
# test scientific notation
x = torch.tensor([1e28, 1e-28])
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([1.0000e+28, 1.0000e-28])''')
# test scientific notation using set_printoptions
x = torch.tensor([1e2, 1e-2])
torch.set_printoptions(sci_mode=True)
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([1.0000e+02, 1.0000e-02])''')
torch.set_printoptions(sci_mode=False)
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([100.0000, 0.0100])''')
torch.set_printoptions(sci_mode=None) # reset to the default value
# test no leading space if all elements positive
x = torch.tensor([1, 2])
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([1, 2])''')
# test for leading space if there are negative elements
x = torch.tensor([1, -2])
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([ 1, -2])''')
# test inf and nan
x = torch.tensor([4, inf, 1.5, -inf, 0, nan, 1])
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([4.0000, inf, 1.5000, -inf, 0.0000, nan, 1.0000])''')
y = torch.tensor([4, inf, complex(1.5, inf), complex(-inf, 4), 0, complex(nan, inf), complex(3, nan)])
self.assertEqual(y.__repr__(), str(y))
expected_str = '''\
tensor([4.0000+0.j, inf+0.j, 1.5000+infj, -inf+4.j, 0.0000+0.j, nan+infj,
3.0000+nanj])'''
self.assertExpectedInline(str(y), expected_str)
# test dtype
with set_default_dtype(torch.float):
x = torch.tensor([1e-324, 1e-323, 1e-322, 1e307, 1e308, 1e309], dtype=torch.float64)
self.assertEqual(x.__repr__(), str(x))
expected_str = '''\
tensor([ 0.0000e+00, 9.8813e-324, 9.8813e-323, 1.0000e+307, 1.0000e+308,
inf], dtype=torch.float64)'''
self.assertExpectedInline(str(x), expected_str)
# test changing default dtype
with set_default_dtype(torch.float64):
self.assertEqual(x.__repr__(), str(x))
expected_str = '''\
tensor([ 0.0000e+00, 9.8813e-324, 9.8813e-323, 1.0000e+307, 1.0000e+308,
inf])'''
self.assertExpectedInline(str(x), expected_str)
# test summary
x = torch.zeros(10000)
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([0., 0., 0., ..., 0., 0., 0.])''')
# test internal summary function
x = torch.rand(1, 20, 5, 30)
summary = torch._tensor_str.get_summarized_data(x)
self.assertEqual(summary.shape, (1, 6, 5, 6))
first_and_last = [0, 1, 2, -3, -2, -1]
self.assertEqual(summary, x[:, first_and_last][..., first_and_last])
# test device
if torch.cuda.is_available():
x = torch.tensor([123], device='cuda:0')
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([123], device='cuda:0')''')
# test changing default to cuda
torch.set_default_tensor_type(torch.cuda.FloatTensor)
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([123])''')
# test printing a tensor on a different gpu than current one.
if torch.cuda.device_count() >= 2:
with torch.cuda.device(1):
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([123], device='cuda:0')''')
# test printing cpu tensor when default device is cuda
y = torch.tensor([123], device='cpu')
self.assertEqual(y.__repr__(), str(y))
self.assertExpectedInline(str(y), '''tensor([123], device='cpu')''')
torch.set_default_tensor_type(default_type)
# test integral floats and requires_grad
x = torch.tensor([123.], requires_grad=True)
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([123.], requires_grad=True)''')
# test non-contiguous print
# sliced tensor should have > PRINT_OPTS.threshold elements
x = torch.ones(100, 2, 2, 10)
y = x.as_strided(size=(100, 2, 10), stride=(2 * 2 * 10, 2 * 10, 1))
self.assertEqual(str(y), y.__repr__())
expected_str = '''\
tensor([[[1., 1., 1., ..., 1., 1., 1.],
[1., 1., 1., ..., 1., 1., 1.]],
[[1., 1., 1., ..., 1., 1., 1.],
[1., 1., 1., ..., 1., 1., 1.]],
[[1., 1., 1., ..., 1., 1., 1.],
[1., 1., 1., ..., 1., 1., 1.]],
...,
[[1., 1., 1., ..., 1., 1., 1.],
[1., 1., 1., ..., 1., 1., 1.]],
[[1., 1., 1., ..., 1., 1., 1.],
[1., 1., 1., ..., 1., 1., 1.]],
[[1., 1., 1., ..., 1., 1., 1.],
[1., 1., 1., ..., 1., 1., 1.]]])\
'''
self.assertExpectedInline(str(y), expected_str)
x = torch.ones(100, 2, 2, 10) * (1 + 1j)
y = x.as_strided(size=(100, 2, 10), stride=(2 * 2 * 10, 2 * 10, 1))
self.assertEqual(str(y), y.__repr__())
expected_str = '''\
tensor([[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j],
[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]],
[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j],
[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]],
[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j],
[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]],
...,
[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j],
[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]],
[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j],
[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]],
[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j],
[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]]])\
'''
self.assertExpectedInline(str(y), expected_str)
# test print 0-dim tensor: there's no 0-dim in Numpy, we match arrayprint style
x = torch.tensor(0.00002)
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor(2.0000e-05)''')
# test print boolean tensor
x = torch.tensor([True])
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([True])''')
x = torch.tensor(True)
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor(True)''')
# [Numpy] test print float in sci_mode when min < 0.0001.
x = torch.tensor([0.00002])
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([2.0000e-05])''')
# [Numpy] test print complex in sci_mode when real_min < 0.0001 and (or) imag_min < 0.0001.
x = torch.tensor([0.00002]) * (1 + 1j)
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([2.0000e-05+2.0000e-05j])''')
# [Numpy] test print float in sci_mode when max > 1e8.
# TODO: Pytorch uses fixed precision to print, while Numpy uses dragon4_scientific
# to do automatic trimming and padding.
x = torch.tensor([123456789.])
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([1.2346e+08])''')
# [Numpy] test print float in sci_mode when max / min > 1000.
x = torch.tensor([0.01, 11])
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([1.0000e-02, 1.1000e+01])''')
# [Numpy] test print int max / min > 1000, no sci_mode
x = torch.tensor([1, 1010])
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([ 1, 1010])''')
# [Numpy] test print int > 1e8, no sci_mode
x = torch.tensor([1000000000]) # 1e9
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([1000000000])''')
# [Numpy] test printing float in int_mode
x = torch.tensor([1., 1000.])
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([ 1., 1000.])''')
# [Numpy] test printing float in int_mode in sci format when max / min > 1000.
x = torch.tensor([1., 1010.])
self.assertEqual(x.__repr__(), str(x))
self.assertExpectedInline(str(x), '''tensor([1.0000e+00, 1.0100e+03])''')
def test_sizeof(self) -> None:
sizeof_empty = torch.randn(0).storage().__sizeof__()
sizeof_10 = torch.randn(10).storage().__sizeof__()
sizeof_100 = torch.randn(100).storage().__sizeof__()
self.assertEqual((sizeof_100 - sizeof_empty) // (sizeof_10 - sizeof_empty), 10)
self.assertEqual((sizeof_100 - sizeof_empty) % (sizeof_10 - sizeof_empty), 0)
sizeof_empty = torch.randn(0).to(torch.uint8).storage().__sizeof__()
sizeof_10 = torch.randn(10).to(torch.uint8).storage().__sizeof__()
sizeof_100 = torch.randn(100).to(torch.uint8).storage().__sizeof__()
self.assertEqual((sizeof_100 - sizeof_empty) // (sizeof_10 - sizeof_empty), 10)
self.assertEqual((sizeof_100 - sizeof_empty) % (sizeof_10 - sizeof_empty), 0)
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
def test_resizable(self) -> None:
x = torch.randn(5)
self.assertTrue(x.storage().resizable())
x.numpy()
self.assertFalse(x.storage().resizable())
def test_iter(self) -> None:
x = torch.randn(5, 5)
for i, sub in enumerate(x):
self.assertEqual(sub, x[i]) # noqa: PLR1736
x = torch.tensor([])
self.assertEqual(list(x), [])
def test_new(self) -> None:
x = torch.autograd.Variable(torch.tensor([]))
y = torch.autograd.Variable(torch.randn(4, 4))
z = torch.autograd.Variable(torch.IntTensor([1, 2, 3]))
self.assertEqual(x.new().shape, [0])
self.assertEqual(x.new(), x)
self.assertEqual(x.new(1, 2).shape, [1, 2])
self.assertEqual(x.new(torch.Size([3, 4])).shape, [3, 4])
self.assertEqual(x.new([3, 4]).shape, [2])
self.assertEqual(x.new([3, 4]).tolist(), [3, 4])
self.assertEqual(x.new((3, 4)).tolist(), [3, 4])
self.assertEqual(x.new([np.int32(3), np.float64(4)]).tolist(), [3, 4])
self.assertEqual(x.new(np.array((3, 4))).tolist(), [3, 4])
self.assertEqual(x.new([z[2], z[0] + 3]).tolist(), [3, 4])
self.assertEqual(x.new(size=(3, 4)).shape, [3, 4])
self.assertEqual(x.new(()).shape, [0])
self.assertEqual(x.new(y.storage()).data_ptr(), y.data_ptr())
self.assertEqual(x.new(y).data_ptr(), y.data_ptr())
self.assertIsNot(x.new(y), y)
self.assertRaises(TypeError, lambda: x.new(z))
# TypeError would be better
self.assertRaises(RuntimeError, lambda: x.new(z.storage()))
@unittest.skipIf(PYTORCH_CUDA_MEMCHECK, "is_pinned uses failure to detect pointer property")
def test_pin_memory(self):
x = torch.randn(3, 5)
self.assertFalse(x.is_pinned())
if torch.cuda.is_available():
pinned = x.pin_memory()
self.assertTrue(pinned.is_pinned())
self.assertEqual(pinned, x)
self.assertNotEqual(pinned.data_ptr(), x.data_ptr())
# test that pin_memory on already pinned tensor has no effect
self.assertIs(pinned, pinned.pin_memory())
self.assertEqual(pinned.data_ptr(), pinned.pin_memory().data_ptr())
def test_error_msg_type_translation(self):
with self.assertRaisesRegex(
RuntimeError,
# message includes both Double and Long
'(?=.*Double)(?=.*Long)'):
# Calls model with a LongTensor input but DoubleTensor weights
input = torch.zeros(1, 1, 1, 6, dtype=torch.long)
weight = torch.nn.Parameter(torch.zeros(1, 1, 1, 3, dtype=torch.double))
model = torch.nn.Conv2d(1, 1, (1, 3), stride=1, padding=0, bias=False)
model.weight = weight
model(input)
def test_apply(self):
x = torch.arange(1, 6)
res = x.clone().apply_(lambda k: k + k)
self.assertEqual(res, x * 2)
self.assertRaises(TypeError, lambda: x.apply_(lambda k: "str"))
def test_map(self):
x = torch.autograd.Variable(torch.randn(3, 3))
y = torch.autograd.Variable(torch.randn(3))
res = x.clone()
res.map_(y, lambda a, b: a + b)
self.assertEqual(res, x + y)
self.assertRaisesRegex(TypeError, "not callable", lambda: res.map_(y, "str"))
def test_map2(self):
x = torch.autograd.Variable(torch.randn(3, 3))
y = torch.autograd.Variable(torch.randn(3))
z = torch.autograd.Variable(torch.randn(1, 3))
res = x.clone()
res.map2_(y, z, lambda a, b, c: a + b * c)
self.assertEqual(res, x + y * z)
z.requires_grad = True
self.assertRaisesRegex(
RuntimeError, "requires grad",
lambda: res.map2_(y, z, lambda a, b, c: a + b * c))
def test_Size(self):
# expects iterable of int, not Tensor
self.assertRaises(TypeError, lambda: torch.Size(torch.ones(3)))
# initialization
empty_size = torch.Size([])
size = torch.Size([1, 2, 3])
self.assertIsInstance(empty_size, tuple)
self.assertIsInstance(size, tuple)
# value check __len__
self.assertEqual(len(empty_size), 0)
self.assertEqual(len(size), 3)
# type check __getitem__[int]
self.assertIsInstance(size[0], int)
self.assertIsInstance(size[1], int)
self.assertIsInstance(size[2], int)
# value check __getitem__[int]
self.assertEqual(size[0], 1)
self.assertEqual(size[1], 2)
self.assertEqual(size[2], 3)
# type check __getitem__[slice]
self.assertIsInstance(size[:], torch.Size)
self.assertIsInstance(size[:-1], torch.Size)
self.assertIsInstance(size[0:0], torch.Size)
# value check __getitem__[slice]
self.assertEqual(size[:], (1, 2, 3))
self.assertEqual(size[:-1], (1, 2))
self.assertEqual(size[0:0], ())
# type check __add__
self.assertIsInstance(empty_size + (), torch.Size)
self.assertIsInstance(size + (), torch.Size)
self.assertIsInstance(size + (4, 5), torch.Size)
self.assertIsInstance(size + size, torch.Size)
# value check __add__
self.assertEqual(empty_size + (), ())
self.assertEqual(size + (), (1, 2, 3))
self.assertEqual(size + (4, 5), (1, 2, 3, 4, 5))
self.assertEqual(size + size, (1, 2, 3, 1, 2, 3))
# type check __radd__
self.assertIsInstance(() + empty_size, torch.Size)
self.assertIsInstance((4, 5) + size, torch.Size)
# value check __radd__
self.assertEqual(() + size, (1, 2, 3))
self.assertEqual((4, 5) + size, (4, 5, 1, 2, 3))
# type check __mul__
self.assertIsInstance(empty_size * 0, torch.Size)
self.assertIsInstance(size * 0, torch.Size)
self.assertIsInstance(size * 1, torch.Size)
self.assertIsInstance(size * 2, torch.Size)
# value check __mul__
self.assertEqual(empty_size * 0, ())
self.assertEqual(size * 0, ())
self.assertEqual(size * 1, (1, 2, 3))
self.assertEqual(size * 2, (1, 2, 3, 1, 2, 3))
# type check __rmul__
self.assertIsInstance(0 * empty_size, torch.Size)
self.assertIsInstance(0 * size, torch.Size)
self.assertIsInstance(1 * size, torch.Size)
self.assertIsInstance(2 * size, torch.Size)
# value check __rmul__
self.assertEqual(0 * empty_size, ())
self.assertEqual(0 * size, ())
self.assertEqual(1 * size, (1, 2, 3))
self.assertEqual(2 * size, (1, 2, 3, 1, 2, 3))
def test_Size_concat_non_tuple_sequence(self):
# check that TypeError gets raised on adding non-tuple sequences.
from collections.abc import Sequence
class DummySequence(Sequence):
vals = list(range(5))
def __len__(self): return len(self.vals)
def __getitem__(self, i): return self.vals[i]
def __iter__(self): return iter(self.vals)
size = torch.Size([1, 2, 3])
seq = DummySequence()
msg = r"can only concatenate tuple \(not \w+\) to torch.Size"
self.assertRaisesRegex(TypeError, msg, lambda: size + seq)
msg = r"unsupported operand type"
self.assertRaisesRegex(TypeError, msg, lambda: seq + size)
def test_Size_concat_wildcard(self):
# check that 3rd party classes can support addition with torch.Size
class Wildcard:
def __add__(self, other): return 42
def __radd__(self, other): return 42
size = torch.Size([1, 2, 3])
wildcard = Wildcard()
self.assertEqual(wildcard + size, 42)
self.assertEqual(size + wildcard, 42)
def test_Size_scalar(self):
three = torch.tensor(3)
two = torch.tensor(2)
x = torch.Size([0, 1, two, three, 4])
for i in range(1, 5):
self.assertEqual(x[i], i)
def test_Size_iter(self):
for sizes in [iter([1, 2, 3, 4, 5]), range(1, 6)]:
x = torch.Size(sizes)
for i in range(5):
self.assertEqual(x[i], i + 1)
def test_t_not_2d_error(self):
self.assertRaises(RuntimeError, lambda: torch.randn(2, 3, 4).t())
self.assertRaises(RuntimeError, lambda: torch.randn(2, 3, 4).t_())
# skip this test for now as it affects all tests
@unittest.skipIf(True, "flush_denormal not supported")
def test_set_flush_denormal(self):
tiny_float = 1e-42
tiny_double = 1e-320
float_tensor = torch.FloatTensor([1.0, tiny_float])
double_tensor = torch.DoubleTensor([1.0, tiny_float, tiny_double])
self.assertEqual(float_tensor[0], 1.0, atol=0.0, rtol=0)
self.assertEqual(float_tensor[1], tiny_float, atol=tiny_float / 16, rtol=0)
self.assertEqual(double_tensor[0], 1.0, atol=0.0, rtol=0)
self.assertEqual(double_tensor[1], tiny_float, atol=0.0, rtol=0)
self.assertEqual(double_tensor[2], tiny_double, atol=0.0, rtol=0)
torch.set_flush_denormal(True)
self.assertEqual(float_tensor[0], 1.0, atol=0.0, rtol=0)
self.assertEqual(float_tensor[1], 0.0, atol=0.0, rtol=0) # tiny_float to zero
self.assertEqual(double_tensor[0], 1.0, atol=0.0, rtol=0)
# tiny_float is not converted to zero in double type
self.assertEqual(double_tensor[1], tiny_float, atol=0.0, rtol=0)
self.assertEqual(double_tensor[2], 0.0, atol=0.0, rtol=0) # tiny_double to zero
torch.set_flush_denormal(False)
def test_show_config(self):
# We can't usefully test the output; just make sure this doesn't crash
torch.__config__.show()
@unittest.skipIf(IS_FBCODE, "CXX_FLAGS is only for OSS build.")
def test_cxx_flags(self):
torch.__config__._cxx_flags()
def test_parallel_info(self):
torch.__config__.parallel_info()
def test_get_cpu_capability(self):
# This method is primarily exposed for torchvision's resize
torch.backends.cpu.get_cpu_capability()
# We have to ensure that method is torchscriptable as torchvision's resize
# should be torchscriptable
torch.jit.script(torch.backends.cpu.get_cpu_capability)
@slowTest
def test_slow_test(self):
# Just a smoketest to make sure our slowTest decorator works.
pass
def test_is_nonzero(self):
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with no values is ambiguous"):
torch.tensor([]).is_nonzero()
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with more than one value is ambiguous"):
torch.tensor([0, 0]).is_nonzero()
self.assertFalse(torch.tensor(0).is_nonzero())
self.assertTrue(torch.tensor(1).is_nonzero())
self.assertFalse(torch.tensor([0]).is_nonzero())
self.assertTrue(torch.tensor([1]).is_nonzero())
self.assertFalse(torch.tensor([[0]]).is_nonzero())
self.assertTrue(torch.tensor([[1]]).is_nonzero())
self.assertTrue(torch.tensor(0.1).is_nonzero())
self.assertTrue(torch.tensor(-0.1).is_nonzero())
self.assertFalse(torch.tensor(0.0).is_nonzero())
self.assertTrue(torch.tensor(True).is_nonzero())
self.assertFalse(torch.tensor(False).is_nonzero())
self.assertFalse(torch.tensor(0 + 0j).is_nonzero())
self.assertTrue(torch.tensor(0 + 0.1j).is_nonzero())
def test_assert_async(self):
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with no values is ambiguous"):
torch._assert_async(torch.tensor([]))
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with more than one value is ambiguous"):
torch._assert_async(torch.tensor([0, 0]))
with self.assertRaisesRegex(RuntimeError, "Expected Tensor with single nonzero value, but got zero"):
torch._assert_async(torch.tensor(0))
torch._assert_async(torch.tensor(1))
torch._assert_async(torch.tensor(0.1))
torch._assert_async(torch.tensor(-0.1))
with self.assertRaisesRegex(RuntimeError, "Expected Tensor with single nonzero value, but got zero"):
torch._assert_async(torch.tensor(0.0))
torch._assert_async(torch.tensor(True))
with self.assertRaisesRegex(RuntimeError, "Expected Tensor with single nonzero value, but got zero"):
torch._assert_async(torch.tensor(False))
torch._assert_async(torch.tensor(0 + 0.1j))
with self.assertRaisesRegex(RuntimeError, "Expected Tensor with single nonzero value, but got zero"):
torch._assert_async(torch.tensor(0 + 0j))
# NB: we must not be built with CUDA; if we are built with CUDA but no CUDA
# is available, we get a different error.
@unittest.skipIf(torch.backends.cuda.is_built() or IS_SANDCASTLE, "CUDA is built, can't test CUDA not built error")
def test_cuda_not_built(self):
msg = "Torch not compiled with CUDA enabled"
self.assertRaisesRegex(AssertionError, msg, lambda: torch.cuda.current_device())
self.assertRaisesRegex(AssertionError, msg, lambda: torch.tensor([1], device="cuda"))
self.assertRaisesRegex(AssertionError, msg, lambda: torch.tensor([1]).cuda())
self.assertRaisesRegex(TypeError, msg, lambda: torch.cuda.FloatTensor())
self.assertRaisesRegex(TypeError, msg, lambda: torch.set_default_tensor_type(torch.cuda.FloatTensor))
self.assertRaisesRegex(AssertionError, msg, lambda: torch.tensor([1]).to(device="cuda"))
def test_has_internal_overlap(self):
OVERLAP_NO = 0
OVERLAP_YES = 1
OVERLAP_TOO_HARD = 2
# Check for contiguous tensors
a = torch.randn(3, 3)
self.assertEqual(torch._debug_has_internal_overlap(a), OVERLAP_NO)
# Checks for zero strides
b = torch.randn(1, 3)
b_expanded = b.expand(4, 3)
self.assertEqual(torch._debug_has_internal_overlap(b_expanded), OVERLAP_YES)
# Check for zero strided, size 1 axis, in non-contiguous storage (gh-33812)
c = torch.randn(10).as_strided([2, 1, 5], [1, 0, 2])
self.assertEqual(torch._debug_has_internal_overlap(c), OVERLAP_NO)
c = torch.randn(2, 1, 10)[::2].as_strided((2, 1, 5), (10, 0, 2))
self.assertEqual(torch._debug_has_internal_overlap(c), OVERLAP_TOO_HARD)
def test_allow_tensor_metadata_change(self):
torch.ones(2, 3)
# Metadata changes are allowed on view tensors that are created from detach().
def test_memory_format(self):
def test_helper(x, memory_format):
y = x.contiguous(memory_format=memory_format)
self.assertFalse(y.is_contiguous())
self.assertTrue(y.is_contiguous(memory_format=memory_format))
self.assertEqual(y, x)
test_helper(torch.randn(4, 3, 8, 8), torch.channels_last)
test_helper(torch.randn(4, 3, 8, 8, 8), torch.channels_last_3d)
def test_memory_format_contiguous_returns_same_tensor_if_already_satisfies(self):
def test_helper(x, memory_format):
alias = x.contiguous(memory_format=memory_format)
alias.fill_(7)
self.assertEqual(x, alias)
test_helper(torch.randn(4, 8, 8, 3).permute(0, 3, 1, 2), torch.channels_last)
test_helper(torch.randn(4, 8, 8, 8, 3).permute(0, 4, 1, 2, 3), torch.channels_last_3d)
def test_memory_format_empty(self):
def test_helper(dim1, dim2, memory_format):
with self.assertRaises(RuntimeError):
x = torch.empty(dim1, memory_format=memory_format)
x = torch.empty(dim2, memory_format=memory_format)
self.assertTrue(x.is_contiguous(memory_format=memory_format))
test_helper((3, 3), (3, 3, 3, 3), torch.channels_last)
test_helper((3, 3, 3), (3, 3, 3, 3, 3), torch.channels_last_3d)
@skipIfCrossRef
def test_dim_order(self):
shape = (2, 3, 5, 7)
t = torch.empty(shape)
self.assertSequenceEqual(t.dim_order(), (0, 1, 2, 3), seq_type=tuple)
self.assertSequenceEqual(t.dim_order(ambiguity_check=True), (0, 1, 2, 3), seq_type=tuple)
# transpose doesn't really change the underlying physical memory
# so expecting dim_order change to reflect that (like strides)
self.assertSequenceEqual(t.transpose(0, 1).dim_order(), (1, 0, 2, 3))
t = torch.empty(shape, memory_format=torch.channels_last)
self.assertSequenceEqual(t.dim_order(), (0, 2, 3, 1))
t = torch.empty((2, 3, 5, 7, 8), memory_format=torch.channels_last_3d)
self.assertSequenceEqual(t.dim_order(), (0, 2, 3, 4, 1))
for dim_order in itertools.permutations(range(4)):
self.assertSequenceEqual(
dim_order, torch.empty_permuted(shape, dim_order).dim_order()
)
target_shapes = [[2, 2, 1, 2], [1, 2, 2, 2], [2, 2, 2, 1], [1, 2, 2, 1], [1, 2, 1, 2]]
for shape in target_shapes:
for memory_format in (torch.contiguous_format, torch.channels_last):
t = torch.empty(shape).to(memory_format=memory_format)
with self.assertRaises(RuntimeError):
t.dim_order(ambiguity_check=True)
if memory_format == torch.contiguous_format:
dim_order_target = list(range(len(shape)))
elif memory_format == torch.channels_last:
dim_order_target = [0, *list(range(2, len(shape))), 1]
self.assertSequenceEqual(
dim_order_target, t.dim_order(ambiguity_check=[torch.contiguous_format, torch.channels_last])
)
ambiguous_shapes = [[2, 1, 2, 2], [2, 2, 1, 1], [1, 2, 1, 1], [2, 1, 1, 2], [2, 1, 2, 1],
[1, 1, 1, 2], [1, 1, 2, 2], [1, 1, 1, 1], [2, 1, 1, 1], [1, 1, 2, 1]]
for shape in ambiguous_shapes:
for memory_format in (torch.contiguous_format, torch.channels_last):
t = torch.empty(shape).to(memory_format=memory_format)
with self.assertRaises(RuntimeError):
t.dim_order(ambiguity_check=True)
t.dim_order(ambiguity_check=[torch.contiguous_format, torch.channels_last])
with self.assertRaises(TypeError):
torch.empty((1, 2, 3, 4)).dim_order(ambiguity_check="ILLEGAL_STR")
# sparse tensor does not support dim order
with self.assertRaises(AttributeError):
indices = torch.tensor([[0, 1, 2], [0, 1, 2]]) # (row, column) indices
values = torch.tensor([1.0, 2.0, 3.0]) # values at those indices
sparse_tensor = torch.sparse_coo_tensor(indices, values, size=(3, 3))
sparse_tensor.dim_order()
def test_subclass_tensors(self):
# raise an error when trying to subclass FloatTensor
with self.assertRaisesRegex(TypeError, "type 'torch.FloatTensor' is not an acceptable base type"):
class Foo1(torch.FloatTensor):
pass
# but allow subclassing Tensor:
class Foo2(torch.Tensor):
def foo(self):
return 5
f = Foo2()
self.assertEqual(f.foo(), 5)
def test_ndim(self):
a = torch.randn(1, 2, 3)
self.assertEqual(3, a.ndim)
b = torch.randn(())
self.assertEqual(0, b.ndim)
c = torch.randn(1, 0)
self.assertEqual(2, c.ndim)
def test_nbytes(self):
a = torch.randn(1, 2, 3, dtype=torch.float64)
self.assertEqual(a.numel() * a.element_size(), a.nbytes)
b = torch.randn(())
self.assertEqual(b.numel() * b.element_size(), b.nbytes)
c = torch.randn(1, 0)
self.assertEqual(c.numel() * c.element_size(), c.nbytes)
def test_fill_diagonal(self):
a1 = torch.randn(7, 3)
a2 = a1.clone()
v = 1
for i in range(3):
a2[i][i] = v
a1.fill_diagonal_(v)
self.assertEqual(a1, a2)
b1 = torch.randn(7, 3)
b2 = b1.clone()
for i in range(3):
b2[i][i] = v
b2[i + 4][i] = v
b1.fill_diagonal_(v, wrap=True)
self.assertEqual(b1, b2)
c1 = torch.rand(3, 3, 3)
c2 = c1.clone()
for i in range(3):
c2[i][i][i] = v
c1.fill_diagonal_(v)
self.assertEqual(c1, c2)
# non-contiguous tensor
d1 = torch.rand(3, 3, 3)[:, 1, ...]
d2 = d1.clone()
for i in range(3):
d2[i][i] = v
d1.fill_diagonal_(v)
self.assertEqual(d1, d2)
e1 = torch.rand(7, 3, 3)[:, 1, ...]
e2 = e1.clone()
for i in range(3):
e2[i][i] = v
e2[i + 4][i] = v
e1.fill_diagonal_(v, wrap=True)
self.assertEqual(e1, e2)
def test_setting_real_imag_to_a_number(self):
x = torch.randn(4, dtype=torch.cfloat)
x.real = 0
x.imag = 0
zeros = torch.zeros(4)
self.assertEqual(x.real, zeros)
self.assertEqual(x.imag, zeros)
def test_batch_norm_cpu_inference(self):
# input nchw in (2,1,1,1), (2,2,2,2)
inputs = [
torch.tensor([[[[-0.5000]]], [[[0.5000]]]]),
torch.tensor([
[
[[-0.5000, 0.5000], [-1.0000, 1.0000]],
[[-0.2500, -0.5000], [0.2500, 0.5000]]
],
[
[[0.1000, 1.0000], [1.0000, 0.1000]],
[[1.0000, 0.5000], [1.5000, -1.5000]]
]])]
# output nchw in (2,1,1,1), (2,2,2,2)
outputs = [
torch.tensor([
[[[-0.499997496604919433593750000]]],
[[[0.499997496604919433593750000]]]]),
torch.tensor([
[[[-0.499997496604919433593750000, 0.499997496604919433593750000],
[-0.999994993209838867187500000, 0.999994993209838867187500000]],
[[-0.249998748302459716796875000, -0.499997496604919433593750000],
[0.249998748302459716796875000, 0.499997496604919433593750000]]],
[[[0.099999502301216125488281250, 0.999994993209838867187500000],
[0.999994993209838867187500000, 0.099999502301216125488281250]],
[[0.999994993209838867187500000, 0.499997496604919433593750000],
[1.499992489814758300781250000, -1.499992489814758300781250000]]]])]
for i in range(len(inputs)):
for affine in [False, True]:
m = torch.nn.BatchNorm2d(inputs[i].size()[1], 1e-05, 0.1, affine=affine)
m.eval()
# contiguous case
input1 = inputs[i].contiguous()
output1 = m(input1)
# non-contiguous case
input2 = input1.permute(0, 1, 3, 2)
output2 = m(input2).permute(0, 1, 3, 2)
# channels last case
input3 = input1.contiguous(memory_format=torch.channels_last)
output3 = m(input3)
self.assertEqual(output3, outputs[i])
self.assertEqual(output3, output1)
self.assertEqual(output3, output2)
# FIXME: move these meta tests to their own test suite/class or
# distribute them among the appropriate test suites for their ops
@skipIfTorchDynamo("Fails after Triton update, see https://github.com/pytorch/pytorch/issues/94687")
def test_empty_meta(self):
x = torch.empty(2 ** 20, 2 ** 20, device='meta')
y = torch.empty(2 ** 20, device='meta')
z = x + y
self.assertEqual(z.size(), (2 ** 20, 2 ** 20))
self.assertRaises(RuntimeError, lambda: z[0][0].item())
@skipIfTorchDynamo("Fails after Triton update, see https://github.com/pytorch/pytorch/issues/94687")
def test_format_scalar_meta(self):
x = torch.empty((), device='meta')
self.assertEqual(format(x), repr(x))
def test_upsample_nearest1d_meta(self):
# TODO: this test should be triggered by test_nn.py but right
# now meta is not enabled (and even if it was, we are probably
# missing too many meta functions to get through the test unmolested)
# NB: Can't make the exponent too big, or it will overflow
# signed 64-bit integer
x = torch.empty(2 * 10 ** 8, 3, 2 * 10 ** 8, device='meta')
z = torch.nn.functional.interpolate(x, scale_factor=2)
self.assertEqual(z.size(), (2 * 10 ** 8, 3, 4 * 10 ** 8))
self.assertRaises(RuntimeError, lambda: z[0][0][0].item())
# TODO: the out tests cannot be triggered by test_nn.py because
# we don't actually do out= arguments for nn functions, so there
# is no public API by which to get the out version
# interpolate doesn't seem to support out=
# (not sure why passing None here doesn't work? How strange...)
z = torch.empty(0, device='meta')
torch._C._nn.upsample_nearest1d(x, (4 * 10 ** 8,), 2, out=z)
self.assertEqual(z.size(), (2 * 10 ** 8, 3, 4 * 10 ** 8))
self.assertRaises(RuntimeError, lambda: z[0][0][0].item())
def test_upsample_nearest2d_meta(self):
# TODO: the out tests cannot be triggered by test_nn.py because
# we don't actually do out= arguments for nn functions, so there
# is no public API by which to get the out version
# Make sure we don't clobber strides of out tensor. NB: this
# test must be done on 2d/3d, because 1d doesn't have any meaningful
# layout support
x = torch.empty(4, 3, 8, 8, device='meta')
out = torch.empty(4, 3, 16, 16, device='meta', memory_format=torch.channels_last)
torch._C._nn.upsample_nearest2d(x, (16, 16), out=out)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
x = torch.empty(4, 3, 8, 8, device='meta', memory_format=torch.channels_last)
out = torch.empty(4, 3, 16, 16, device='meta')
torch._C._nn.upsample_nearest2d(x, (16, 16), out=out)
self.assertTrue(out.is_contiguous())
# But if resize occurs, do clobber
x = torch.empty(4, 3, 8, 8, device='meta', memory_format=torch.channels_last)
out = torch.empty(0, device='meta')
torch._C._nn.upsample_nearest2d(x, (16, 16), out=out)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
# Complain if out dtype mismatch
x = torch.empty(4, 3, 8, 8, device='meta', dtype=torch.float)
out = torch.empty(4, 3, 16, 16, device='meta', dtype=torch.double)
self.assertExpectedRaisesInline(
RuntimeError, lambda: torch._C._nn.upsample_nearest2d(x, (16, 16), out=out),
"""Expected out tensor to have dtype torch.float32 but got torch.float64 instead"""
)
# Complain if out device mismatch
x = torch.empty(0, 3, 8, 8, device='meta')
out = torch.empty(0, 3, 16, 16, device='cpu')
# FIXME: compiling should properly error with a device mismatch.
if not TEST_WITH_TORCHINDUCTOR:
self.assertExpectedRaisesInline(
RuntimeError, lambda: torch._C._nn.upsample_nearest2d(x, (16, 16), out=out),
"""Attempting to copy from device meta to device cpu, but cross-device copies are not allowed!"""
)
def test_add_meta_scalar(self):
# From https://github.com/pytorch/pytorch/issues/53815
x = torch.empty(2, device='meta')
y = x + 2
self.assertEqual(y.size(), x.size())
def test_normal_shape(self):
for device in get_all_device_types():
tensor1 = torch.rand(1, device=device)
tensor4 = torch.rand(4, device=device)
tensor120 = torch.rand(120, device=device)
tensor2145 = torch.rand(2, 1, 4, 5, device=device)
tensor2345 = torch.rand(2, 3, 4, 5, device=device)
tensor2345_non_contiguous = torch.rand(2, 4, 3, 5, device=device).permute(0, 2, 1, 3)
tensor2345_channels_last = tensor2345.contiguous(memory_format=torch.channels_last)
output2345 = torch.zeros(2, 3, 4, 5, device=device)
output345 = torch.zeros(3, 4, 5, device=device)
# inputs have same size
self.assertEqual(torch.normal(tensor2345, tensor2345).size(), (2, 3, 4, 5))
self.assertEqual(torch.normal(tensor2345_non_contiguous, tensor2345).size(), (2, 3, 4, 5))
self.assertEqual(torch.normal(tensor2345, tensor2345_channels_last).size(), (2, 3, 4, 5))
self.assertEqual(torch.normal(tensor2345_non_contiguous, tensor2345_channels_last).size(), (2, 3, 4, 5))
# scalar case
self.assertEqual(torch.normal(tensor2345, 2).size(), (2, 3, 4, 5))
self.assertEqual(torch.normal(2, tensor2345).size(), (2, 3, 4, 5))
# inputs are expandable tensors
self.assertEqual(torch.normal(tensor2345, tensor1).size(), (2, 3, 4, 5))
self.assertEqual(torch.normal(tensor2145, tensor2345).size(), (2, 3, 4, 5))
# inputs are non-expandable tensors, but they have same number of elements
with self.assertRaisesRegex(
RuntimeError,
r"The size of tensor a \(120\) must match the size of "
r"tensor b \(5\) at non-singleton dimension 3"):
self.assertEqual(torch.normal(tensor120, tensor2345).size(), (120,))
with self.assertRaisesRegex(
RuntimeError,
r"The size of tensor a \(5\) must match the size of "
r"tensor b \(120\) at non-singleton dimension 3"):
self.assertEqual(torch.normal(tensor2345, tensor120).size(), (2, 3, 4, 5))
# inputs are non-expandable tensors and they don't have same number of elements
with self.assertRaisesRegex(
RuntimeError,
r"The size of tensor a \(5\) must match the size of "
r"tensor b \(4\) at non-singleton dimension 3"):
torch.normal(tensor2345, tensor4)
# output and inputs are size compatible
self.assertEqual(torch.normal(tensor2345, tensor2345, out=output2345).size(), (2, 3, 4, 5))
# output and inputs are not size compatible
with self.assertWarnsRegex(
UserWarning,
"This behavior is deprecated, and in a future PyTorch "
"release outputs will not be resized unless they have "
"zero elements"):
self.assertEqual(torch.normal(tensor2345, tensor2145, out=output345).size(), (2, 3, 4, 5))
with self.assertRaisesRegex(
RuntimeError,
r"The size of tensor a \(5\) must match the size of "
r"tensor b \(120\) at non-singleton dimension 3"):
# inputs are not expandable, output size is not the same as mean
torch.normal(tensor2345, tensor120, out=output345)
def test_tensoriterator_output_setup(self):
# Test whether the output's memory layout is correct
def test_memory_layout(x, y, scale, zero_point, out):
self.assertEqual(x.dim(), 4)
self.assertEqual(x.size(), y.size())
self.assertEqual(y.size(), out.size())
shape = x.size()
for n in range(shape[0]):
for c in range(shape[1]):
for h in range(shape[2]):
for w in range(shape[3]):
if scale is not None and zero_point is not None:
self.assertEqual(
out[n][c][h][w],
torch.ops.quantized.add(x[n][c][h][w], y[n][c][h][w], scale, zero_point))
else:
self.assertEqual(out[n][c][h][w], x[n][c][h][w] + y[n][c][h][w])
xraw = torch.rand(2, 3, 4, 4)
yraw = torch.rand(2, 3, 4, 4)
qxraw = torch.quantize_per_tensor(xraw, 0.1, 5, torch.quint8)
qyraw = torch.quantize_per_tensor(yraw, 0.1, 5, torch.quint8)
# contiguous case fast setup
test_memory_layout(xraw, yraw, None, None, xraw + yraw)
test_memory_layout(qxraw, qyraw, 0.1, 5, torch.ops.quantized.add(qxraw, qyraw, 0.1, 5))
# channels last case fast setup
x = xraw.contiguous(memory_format=torch.channels_last)
y = yraw.contiguous(memory_format=torch.channels_last)
test_memory_layout(x, y, None, None, x + y)
qx = qxraw.contiguous(memory_format=torch.channels_last)
qy = qyraw.contiguous(memory_format=torch.channels_last)
test_memory_layout(qx, qy, 0.1, 5, torch.ops.quantized.add(qx, qy, 0.1, 5))
# non contiguous case fast setup (dense, non-overlapping, same shape and strides)
x = xraw.permute(0, 2, 3, 1)
y = yraw.permute(0, 2, 3, 1)
test_memory_layout(x, y, None, None, x + y)
qx = qxraw.permute(0, 2, 3, 1)
qy = qyraw.permute(0, 2, 3, 1)
test_memory_layout(qx, qy, 0.1, 5, torch.ops.quantized.add(qx, qy, 0.1, 5))
# non contiguous case fast setup (dense, non-overlapping)
# input tensors have same shape and strides
# output tensor have same shape as input tensors but different stride
# output tensor should preserve its strides in this case
x = xraw.permute(0, 2, 3, 1)
y = yraw.permute(0, 2, 3, 1)
out = torch.empty_like(xraw)
out = out.permute(0, 3, 2, 1)
expected_stride = out.stride()
test_memory_layout(x, y, None, None, torch.add(x, y, out=out))
self.assertEqual(expected_stride, out.stride())
# non contiguous case non fast setup
x = xraw.permute(0, 2, 3, 1)
y = yraw.permute(0, 3, 2, 1)
test_memory_layout(x, y, None, None, x + y)
qx = qxraw.permute(0, 2, 3, 1)
qy = qyraw.permute(0, 3, 2, 1)
test_memory_layout(qx, qy, 0.1, 5, torch.ops.quantized.add(qx, qy, 0.1, 5))
def test_conj_physical_meta_stride(self):
a = torch.zeros((5, 3, 6), dtype=torch.complex128, device='meta')
b = torch._fft_c2c(a, [1], 1, True)
c = torch.conj_physical(b)
self.assertEqual(b.stride(), c.stride())
# Tests to make sure we still handle .data properly until it is removed
def test_dot_data_use(self):
# .data allows to change the Tensors types inplace, check that we still
# raise a nice error.
with self.assertRaisesRegex(
RuntimeError,
# message includes both Double and ComplexFloat
'(?=.*Double)(?=.*ComplexFloat)'):
# Calls model with a LongTensor input but DoubleTensor weights
input = torch.randn(1, 1, 1, 6, dtype=torch.double)
weight = torch.zeros(1, 1, 1, 3, dtype=torch.complex64)
model = torch.nn.Conv2d(1, 1, (1, 3), stride=1, padding=0, bias=False)
model.weight.data = weight
model(input)
def test_empty_storage_view(self):
# we should be able to "modify" slices of a 0-element
# array without an error being raised due to
# trying to resize its storage
t = torch.from_numpy(np.empty((0, 4)))
t[:, 1::2] *= 1
def test_has_storage(self):
self.assertIsNotNone(torch.tensor([]).storage())
self.assertIsNotNone(torch.empty(0).storage())
self.assertIsNotNone(torch.tensor([]).clone().storage())
self.assertIsNotNone(torch.tensor([0, 0, 0]).nonzero().storage())
self.assertIsNotNone(torch.tensor([]).new().storage())
# FIXME: Extend this test and put in a TensorProperties test class
def test_numel(self):
b = torch.ByteTensor(3, 100, 100)
self.assertEqual(b.nelement(), 3 * 100 * 100)
self.assertEqual(b.numel(), 3 * 100 * 100)
# Verifies that (deep)copies of dtypes are the same objects
def test_copy_dtypes(self):
for dtype in all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool):
copied_dtype = copy.deepcopy(dtype)
self.assertIs(dtype, copied_dtype)
def test_dtype_is_signed(self):
for dtype in all_types_and_complex_and(torch.half, torch.bfloat16, torch.half):
self.assertEqual(dtype.is_signed, torch.is_signed(torch.tensor(0, dtype=dtype)))
self.assertRaisesRegex(RuntimeError, 'not supported for quantized', lambda: torch.quint8.is_signed)
self.assertRaisesRegex(RuntimeError, 'not supported for quantized', lambda: torch.qint8.is_signed)
self.assertRaisesRegex(RuntimeError, 'not supported for quantized', lambda: torch.qint32.is_signed)
# FIXME: Put the following random tests into their own test class or test suite
@skipIfTorchDynamo("requires https://github.com/pytorch/torchdynamo/pull/1098")
def test_RNGState(self):
state = torch.get_rng_state()
stateCloned = state.clone()
before = torch.rand(1000)
self.assertEqual(state.ne(stateCloned).long().sum(), 0, atol=0, rtol=0)
torch.set_rng_state(state)
after = torch.rand(1000)
self.assertEqual(before, after, atol=0, rtol=0)
@skipIfTorchDynamo("requires https://github.com/pytorch/torchdynamo/pull/1098")
def test_RNGStateAliasing(self):
# Fork the random number stream at this point
gen = torch.Generator()
gen.set_state(torch.get_rng_state())
self.assertEqual(gen.get_state(), torch.get_rng_state())
target_value = torch.rand(1000)
# Dramatically alter the internal state of the main generator
_ = torch.rand(100000)
forked_value = torch.rand(1000, generator=gen)
self.assertEqual(target_value, forked_value, atol=0, rtol=0, msg="RNG has not forked correctly.")
@skipIfTorchDynamo("requires https://github.com/pytorch/torchdynamo/pull/1098")
def test_RNG_after_pickle(self):
torch.random.manual_seed(100)
before = torch.rand(10)
torch.random.manual_seed(100)
buf = io.BytesIO()
tensor = torch.tensor([1, 2, 3])
ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(tensor)
after = torch.rand(10)
self.assertEqual(before, after, atol=0, rtol=0)
@skipIfTorchDynamo("requires https://github.com/pytorch/torchdynamo/pull/1098")
def test_boxMullerState(self):
torch.manual_seed(123)
odd_number = 101
seeded = torch.randn(odd_number)
state = torch.get_rng_state()
midstream = torch.randn(odd_number)
torch.set_rng_state(state)
repeat_midstream = torch.randn(odd_number)
torch.manual_seed(123)
reseeded = torch.randn(odd_number)
self.assertEqual(midstream, repeat_midstream, atol=0, rtol=0,
msg='get_rng_state/set_rng_state not generating same sequence of normally distributed numbers')
self.assertEqual(seeded, reseeded, atol=0, rtol=0,
msg='repeated calls to manual_seed not generating same sequence of normally distributed numbers')
@skipIfTorchDynamo("requires https://github.com/pytorch/torchdynamo/pull/1098")
def test_manual_seed(self):
rng_state = torch.get_rng_state()
torch.manual_seed(2)
x = torch.randn(100)
self.assertEqual(torch.initial_seed(), 2)
torch.manual_seed(2)
y = torch.randn(100)
self.assertEqual(x, y)
max_int64 = 0x7fff_ffff_ffff_ffff
min_int64 = -max_int64 - 1
max_uint64 = 0xffff_ffff_ffff_ffff
# Check all boundary cases of valid seed value inputs
test_cases = [
# (seed, expected_initial_seed)
# Positive seeds should be unchanged
(max_int64, max_int64),
(max_int64 + 1, max_int64 + 1),
(max_uint64, max_uint64),
(0, 0),
# Negative seeds wrap around starting from the largest seed value
(-1, max_uint64),
(min_int64, max_int64 + 1)
]
for seed, expected_initial_seed in test_cases:
torch.manual_seed(seed)
actual_initial_seed = torch.initial_seed()
msg = (f"expected initial_seed() = {expected_initial_seed:x} "
f"after calling manual_seed({seed:x}), but got {actual_initial_seed:x} instead")
self.assertEqual(expected_initial_seed, actual_initial_seed, msg=msg)
for invalid_seed in [min_int64 - 1, max_uint64 + 1]:
with self.assertRaisesRegex(ValueError, r'Overflow when unpacking long long'):
torch.manual_seed(invalid_seed)
torch.set_rng_state(rng_state)
# FIXME: Describe this test and port to the generic device framework in a more
# appropriate test suite for the copy operation
def test_copy_transpose(self):
x = torch.arange(100 * 100, dtype=torch.float).reshape(100, 100).t()
y = torch.empty(100, 100, dtype=torch.float)
y.copy_(x)
self.assertEqual(y[:, 0], range(100))
self.assertEqual(y[:, 40], range(4000, 4100))
y = torch.empty(100, 100, dtype=torch.double)
y.copy_(x)
self.assertEqual(y[:, 0], range(100))
self.assertEqual(y[:, 40], range(4000, 4100))
# Validates regression reported in https://github.com/pytorch/pytorch/issues/45269
x = torch.arange(100 * 100).reshape(100, 100).to(dtype=torch.cfloat).t()
y = torch.empty(100, 100, dtype=torch.cfloat)
y.copy_(x)
self.assertEqual(y[:, 0], range(100))
self.assertEqual(y[:, 40], range(4000, 4100))
x = torch.arange(100 * 100).reshape(100, 100).to(dtype=torch.complex32).t()
y = torch.empty(100, 100, dtype=torch.complex32)
y.copy_(x)
self.assertEqual(y[:, 0], range(100))
self.assertEqual(y[:, 40], range(4000, 4100))
# FIXME: Port to a more appropriate test suite
def test_copy_broadcast(self):
torch.zeros(5, 6).copy_(torch.zeros(6))
self.assertRaises(RuntimeError, lambda: torch.zeros(5, 6).copy_(torch.zeros(30)))
# FIXME: Port to a more appropriate test suite
# Fails with inductor (and aot_eager) because functionalization replaces copy_ with copy,
# which doesn't properly error on bad inputs.
def test_copy_many_to_one(self):
# Testing in-place copy where it attempt to write from many memory
# storage to a single storage would cause RuntimeError to be thrown
self.assertRaises(RuntimeError, lambda: torch.zeros(1, 6).expand(5, 6).copy_(torch.zeros(5, 6)))
def test_copy_float16(self):
# Check that fbgemm code no longer reads memory out of bounds, see
# copy_impl and fbgemm::Float16ToFloat_ref.
# https://github.com/pytorch/pytorch/issues/88543
# Types to test different code paths in copy_impl.
dtypes = (
# out_dtype, src_dtype
(torch.float32, torch.float16), # fbgemm
(torch.float16, torch.float32), # fbgemm
(torch.float32, torch.float32), # TensorIterator
)
cases = (
# out_shape, src_shape, is_ok
# These cases used to crash with fbgemm, make sure these also raise
# exceptions with TensorIterator.
((1, 2, 3), (0, 2, 3), False), # same strides, not allowed by TI
((1, 5, 6), (4, 5, 6), False), # same strides, not allowed by TI
(1, (0, 2, 3), False), # different strides
((4, 5, 6), (0, 2, 3), False), # different strides
((4, 5, 6), (1, 2, 3), False), # different strides
((4, 5, 6), (6, 5, 4), False), # same numel
# These cases should pass with fbgemm and TensorIterator.
((4, 5, 6), (1, 5, 6), True), # same strides
((4, 5, 6), (4, 5, 6), True), # same strides
((0, 2, 3), 1, True), # different strides, allowed by TI
((4, 5, 6), (4, 5, 1), True), # different strides, allowed by TI
)
for (out_shape, src_shape, is_ok), (out_dtype, src_dtype) in itertools.product(cases, dtypes):
out = torch.zeros(out_shape, dtype=out_dtype, device=torch.device('cpu'))
src = torch.ones(src_shape, dtype=src_dtype, device=torch.device('cpu'))
if is_ok:
if torch.cuda.is_available():
out_cuda = out.cuda()
src_cuda = src.cuda()
res = out.copy_(src)
if torch.cuda.is_available():
res_cuda = out_cuda.copy_(src_cuda)
self.assertEqual(res, res_cuda)
else:
self.assertRaises(RuntimeError, lambda: out.copy_(src))
# FIXME: Port to a more appropriate test suite
def _test_to_with_layout(self, layout):
def test_copy_behavior(t, non_blocking=False):
self.assertIs(t, t.to(t, non_blocking=non_blocking))
self.assertIs(t, t.to(t.dtype, non_blocking=non_blocking))
self.assertIs(t, t.to(torch.empty_like(t), non_blocking=non_blocking))
self.assertIsNot(t, t.to(t, non_blocking=non_blocking, copy=True))
self.assertIsNot(t, t.to(t.dtype, non_blocking=non_blocking, copy=True))
self.assertIsNot(t, t.to(torch.empty_like(t), non_blocking=non_blocking, copy=True))
devices = [t.device]
if t.device.type == 'cuda':
if t.device.index == -1:
devices.append(f'cuda:{torch.cuda.current_device()}')
elif t.device.index == torch.cuda.current_device():
devices.append('cuda')
for device in devices:
self.assertIs(t, t.to(device, non_blocking=non_blocking))
self.assertIs(t, t.to(device, t.dtype, non_blocking=non_blocking))
self.assertIsNot(t, t.to(device, non_blocking=non_blocking, copy=True))
self.assertIsNot(t, t.to(device, t.dtype, non_blocking=non_blocking, copy=True))
a = torch.tensor(5)
if layout == torch.sparse_csr:
a = torch.tensor([[0, 1, 2], [2, 0, 3]]).to_sparse_csr()
test_copy_behavior(a)
self.assertEqual(a.device, a.to('cpu').device)
self.assertEqual(a.device, a.to('cpu', dtype=torch.float32).device)
self.assertIs(torch.float32, a.to('cpu', dtype=torch.float32).dtype)
self.assertEqual(a.device, a.to(torch.float32).device)
self.assertIs(torch.float32, a.to(dtype=torch.float32).dtype)
def test_data_ptr(getter):
self.assertEqual(getter(a), getter(a.to('cpu')))
self.assertEqual(getter(a), getter(a.to(dtype=a.dtype, device=a.device, copy=False)))
self.assertEqual(getter(a), getter(a.to('cpu', copy=False)))
self.assertNotEqual(getter(a), getter(a.to('cpu', copy=True)))
if layout == torch.sparse_csr:
# TODO: compressed sparse tensors currently don't support data_ptr.
# Exercising failure will allow us to widen coverage of this test once it does.
with self.assertRaisesRegex(RuntimeError, "Cannot access data pointer of Tensor that doesn't have storage"):
a.data_ptr()
# While compressed sparse tensors don't have a concept of data_ptr
# the underlying tensors do. The implementation of to appropriately forwards
# the call to the components, which is what we're test here.
test_data_ptr(lambda a: a.values().data_ptr())
test_data_ptr(lambda a: a.crow_indices().data_ptr())
test_data_ptr(lambda a: a.col_indices().data_ptr())
else:
test_data_ptr(lambda a: a.data_ptr())
if torch.cuda.is_available():
for non_blocking in [True, False]:
for cuda in ['cuda', 'cuda:0' if torch.cuda.device_count() == 1 else 'cuda:1']:
b = torch.tensor(5., device=cuda)
test_copy_behavior(b, non_blocking)
self.assertEqual(b.device, b.to(cuda, non_blocking=non_blocking).device)
self.assertEqual(a.device, b.to('cpu', non_blocking=non_blocking).device)
self.assertEqual(b.device, a.to(cuda, non_blocking=non_blocking).device)
self.assertIs(torch.int32, b.to('cpu', dtype=torch.int32, non_blocking=non_blocking).dtype)
self.assertEqual(a.device, b.to('cpu', dtype=torch.int32, non_blocking=non_blocking).device)
self.assertIs(torch.int32, b.to(dtype=torch.int32).dtype)
self.assertEqual(b.device, b.to(dtype=torch.int32).device)
def test_to(self):
self._test_to_with_layout(torch.strided)
if torch.version.cuda is not None:
self._test_to_with_layout(torch.sparse_csr)
# FIXME: describe this test
def test_as_subclass(self):
class SubTensor(torch.Tensor):
member_var = object()
t0 = torch.tensor(0)
t1 = torch.tensor([1, 2])
t2 = torch.tensor([[3, 4], [5, 6]])
s0 = t0.as_subclass(SubTensor)
s1 = t1.as_subclass(SubTensor)
s2 = t2.as_subclass(SubTensor)
# Check that the correct type is returned.
self.assertTrue(type(s0) is SubTensor)
self.assertTrue(type(s1) is SubTensor)
self.assertTrue(type(s2) is SubTensor)
# Check that the data is equal.
self.assertEqual(t0, s0)
self.assertEqual(t1, s1)
self.assertEqual(t2, s2)
t0[()] = 1
t1[1] = 3
t2[1, 1] = 7
# Check that the data is equal even after modification.
self.assertEqual(t0, s0)
self.assertEqual(t1, s1)
self.assertEqual(t2, s2)
# Check that member variables are passed through.
self.assertTrue(s0.member_var is SubTensor.member_var)
self.assertTrue(s1.member_var is SubTensor.member_var)
self.assertTrue(s2.member_var is SubTensor.member_var)
# Test that autograd is propagated.
t = torch.tensor(5, dtype=torch.float32, requires_grad=True)
# Run a calculation on the tensor.
exp_t = torch.exp(t)
# Cast exp_t to a subclass.
exp_s = exp_t.as_subclass(SubTensor)
# Make sure that t.grad was initially None
self.assertTrue(t.grad is None)
# Run the autograd calculation.
exp_s.backward()
# Make sure autograd was propagated to the original tensor
# declared with requires_grad.
self.assertTrue(t.grad is not None)
# Make sure invalid subclasses raise nice errors
class BadSubTensor:
member_var = object()
err_msg = "Creating a Tensor subclass from a class that does not inherit from Tensor"
with self.assertRaisesRegex(TypeError, err_msg):
s0 = t0.as_subclass(BadSubTensor)
# FIXME: Port to a test suite that better fits slicing
def test_slice(self):
empty = torch.empty(0, 4)
x = torch.arange(0., 16).view(4, 4)
self.assertEqual(x[:], x)
self.assertEqual(x[:4], x)
# start and stop are clamped to the size of dim
self.assertEqual(x[:5], x)
# if start >= stop then the result is empty
self.assertEqual(x[2:1], empty)
self.assertEqual(x[2:2], empty)
# out of bounds is also empty
self.assertEqual(x[10:12], empty)
# additional correctness checks
self.assertEqual(x[:1].tolist(), [[0, 1, 2, 3]])
self.assertEqual(x[:-3].tolist(), [[0, 1, 2, 3]])
self.assertEqual(x[:, -2:3].tolist(), [[2], [6], [10], [14]])
self.assertEqual(x[0:-1:2].tolist(), [[0, 1, 2, 3], [8, 9, 10, 11]])
def test_split_with_sizes_copy_out(self):
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
shape = (30, 40, 50)
x = torch.rand(*shape, device=device)
cases = [
(0, [3, 7, 8, 12]),
(1, [3, 7, 10, 20]),
(-2, [3, 7, 10, 20]),
(2, [3, 7, 10, 12, 18]),
(-1, [3, 7, 10, 12, 18]),
(2, [3, 7, 10, 0, 30]),
]
for dim, split_sizes in cases:
views = x.split_with_sizes(split_sizes, dim=dim)
expects = [v.clone() for v in views]
out = [torch.zeros_like(v) for v in views]
for expect, t in zip(expects, out):
if expect.numel() != 0:
self.assertFalse(expect.eq(t).all().item())
torch.split_with_sizes_copy(x, split_sizes, dim=dim, out=out)
for expect, t in zip(expects, out):
self.assertTrue(expect.eq(t).all().item())
if not torch.cuda.is_available():
continue
# Test with cuda graph
out = [torch.zeros_like(v) for v in views]
for expect, t in zip(expects, out):
if expect.numel() != 0:
self.assertFalse(expect.eq(t).all().item())
g = torch.cuda.CUDAGraph()
with torch.cuda.graph(g):
torch.split_with_sizes_copy(x, split_sizes, dim=dim, out=out)
g.replay()
for expect, t in zip(expects, out):
self.assertTrue(expect.eq(t).all().item())
def test_type(self):
x = torch.randn(3, 3).double()
self.assertEqual(x.type('torch.FloatTensor').dtype, torch.float32)
self.assertEqual(x.type(torch.FloatTensor).dtype, torch.float32)
self.assertEqual(x.int().type(torch.Tensor).dtype, torch.get_default_dtype())
self.assertEqual(x.type(torch.int32).dtype, torch.int32)
# FIXME: port to a quantization test suite
@xfailIfS390X
def test_qengine(self):
qengines = torch.backends.quantized.supported_engines
original_qe = torch.backends.quantized.engine
for qe in qengines:
torch.backends.quantized.engine = qe
assert torch.backends.quantized.engine == qe, 'qengine not set successfully'
torch.backends.quantized.engine = original_qe
def test_terminate_handler_on_crash(self):
cmd = [sys.executable, '-c', "import os; os.environ[\"TORCH_CUSTOM_TERMINATE\"] ='1'; \
import torch; import torch._C; torch._C._abort()"]
with self.assertRaises(subprocess.CalledProcessError) as cm:
subprocess.check_output(cmd, shell=False)
e = cm.exception
output = e.stdout.decode("utf-8")
self.assertNotEqual(e.returncode, 0)
self.assertNotEqual(output, None)
self.assertIn('Unhandled exception caught in c10/util/AbortHandler.h', output)
# FIXME: port to a distributed test suite
@slowTest
def test_multinomial_invalid_probs(self):
def _spawn_method(self, method, arg):
try:
mp.set_start_method('spawn')
except RuntimeError:
pass
with mp.Pool(1) as pool:
out = pool.map(method, [arg])
self.assertTrue(out[0])
def _test_multinomial_invalid_probs(probs):
try:
# n_sample = 1 is a special case, test n_sample=2 which is more general
torch.multinomial(probs.to('cpu'), 2)
return False # Should not be reached
except RuntimeError as e:
return 'probability tensor contains either `inf`, `nan` or element < 0' in str(e)
_spawn_method(_test_multinomial_invalid_probs, torch.tensor([1., -1., 1.]))
_spawn_method(_test_multinomial_invalid_probs, torch.tensor([1., inf, 1.]))
_spawn_method(_test_multinomial_invalid_probs, torch.tensor([1., -inf, 1.]))
_spawn_method(_test_multinomial_invalid_probs, torch.tensor([1., 1., nan]))
# FIXME: port to more appropriate test suite
def test_to_with_tensor(self):
a = torch.tensor(5)
self.assertEqual(a.device, a.to(a).device)
if torch.cuda.is_available():
for non_blocking in [True, False]:
for cuda in ['cuda', 'cuda:0' if torch.cuda.device_count() == 1 else 'cuda:1']:
b = torch.tensor(5., device=cuda)
self.assertEqual(b.device, b.to(b, non_blocking=non_blocking).device)
self.assertEqual(a.device, b.to(a, non_blocking=non_blocking).device)
self.assertEqual(b.device, a.to(b, non_blocking=non_blocking).device)
def test_device(self):
cpu = torch.device('cpu')
self.assertEqual('cpu', str(cpu))
self.assertEqual('cpu', cpu.type)
self.assertEqual(None, cpu.index)
cpu0 = torch.device('cpu:0')
self.assertEqual('cpu:0', str(cpu0))
self.assertEqual('cpu', cpu0.type)
self.assertEqual(0, cpu0.index)
cpu0 = torch.device('cpu', 0)
self.assertEqual('cpu:0', str(cpu0))
self.assertEqual('cpu', cpu0.type)
self.assertEqual(0, cpu0.index)
cuda = torch.device('cuda')
self.assertEqual('cuda', str(cuda))
self.assertEqual('cuda', cuda.type)
self.assertEqual(None, cuda.index)
cuda1 = torch.device('cuda:1')
self.assertEqual('cuda:1', str(cuda1))
self.assertEqual('cuda', cuda1.type)
self.assertEqual(1, cuda1.index)
cuda1 = torch.device('cuda', 1)
self.assertEqual('cuda:1', str(cuda1))
self.assertEqual('cuda', cuda1.type)
self.assertEqual(1, cuda1.index)
cuda90 = torch.device('cuda', 90)
self.assertEqual('cuda:90', str(cuda90))
self.assertEqual('cuda', cuda90.type)
self.assertEqual(90, cuda90.index)
self.assertRaises(RuntimeError, lambda: torch.device('cpu:-1'))
self.assertRaises(RuntimeError, lambda: torch.device('cuda:-1'))
self.assertRaises(RuntimeError, lambda: torch.device('cuda:2 '))
self.assertRaises(RuntimeError, lambda: torch.device('cuda: 2'))
self.assertRaises(RuntimeError, lambda: torch.device('cuda:2 2'))
self.assertRaises(RuntimeError, lambda: torch.device('cuda:2.'))
self.assertRaises(RuntimeError, lambda: torch.device('cuda:2?'))
self.assertRaises(RuntimeError, lambda: torch.device('cuda:?2'))
self.assertRaises(RuntimeError, lambda: torch.device('cuda:'))
self.assertRaises(RuntimeError, lambda: torch.device('cuda:2.232'))
self.assertRaises(RuntimeError, lambda: torch.device('cuda:2 cuda:3'))
self.assertRaises(RuntimeError, lambda: torch.device('cuda:2+cuda:3'))
self.assertRaises(RuntimeError, lambda: torch.device('cuda:2cuda:3'))
self.assertRaises(RuntimeError, lambda: torch.device(-1))
self.assertRaises(RuntimeError, lambda: torch.device('other'))
self.assertRaises(RuntimeError, lambda: torch.device('other:0'))
device_set = {'cpu', 'cpu:0', 'cuda', 'cuda:0', 'cuda:1', 'cuda:10', 'cuda:100'}
device_hash_set = set()
device_hash_set.update(hash(torch.device(device)) for device in device_set)
self.assertEqual(len(device_set), len(device_hash_set))
def get_expected_device_repr(device):
if device.index is not None:
return f"device(type='{device.type}', index={device.index})"
return f"device(type='{device.type}')"
for device in device_set:
dev = torch.device(device)
self.assertEqual(repr(dev), get_expected_device_repr(dev))
# Tests that the use_deterministic_flag can be set as expected
@wrapDeterministicFlagAPITest
def test_deterministic_flag(self):
for deterministic, warn_only in product([True, False], [True, False]):
torch.use_deterministic_algorithms(deterministic, warn_only=warn_only)
self.assertEqual(deterministic, torch.are_deterministic_algorithms_enabled())
self.assertEqual(warn_only, torch.is_deterministic_algorithms_warn_only_enabled())
if deterministic:
if warn_only:
debug_mode = 1
else:
debug_mode = 2
else:
debug_mode = 0
self.assertEqual(debug_mode, torch.get_deterministic_debug_mode())
for debug_mode in [0, 1, 2]:
torch.set_deterministic_debug_mode(debug_mode)
self.assertEqual(debug_mode, torch.get_deterministic_debug_mode())
deterministic = debug_mode in [1, 2]
warn_only = debug_mode == 1
self.assertEqual(deterministic, torch.are_deterministic_algorithms_enabled())
self.assertEqual(warn_only, torch.is_deterministic_algorithms_warn_only_enabled())
for debug_mode, debug_mode_str in [(0, 'default'), (1, 'warn'), (2, 'error')]:
torch.set_deterministic_debug_mode(debug_mode_str)
self.assertEqual(debug_mode, torch.get_deterministic_debug_mode())
with self.assertRaisesRegex(
TypeError,
r"_set_deterministic_algorithms\(\): argument 'mode' \(position 1\) must be bool, not int"):
torch.use_deterministic_algorithms(1)
with self.assertRaisesRegex(
TypeError,
r"_set_deterministic_algorithms\(\): argument 'warn_only' must be bool, not int"):
torch.use_deterministic_algorithms(False, warn_only=1)
# Tests that torch.utils.deterministic.fill_uninitialized_memory can be set as expected
def test_deterministic_fill_uninitialized_memory(self):
with DeterministicGuard(True, fill_uninitialized_memory=False):
self.assertFalse(torch.utils.deterministic.fill_uninitialized_memory)
self.assertFalse(torch._C._get_deterministic_fill_uninitialized_memory())
with DeterministicGuard(True, fill_uninitialized_memory=True):
self.assertTrue(torch.utils.deterministic.fill_uninitialized_memory)
self.assertTrue(torch._C._get_deterministic_fill_uninitialized_memory())
self.assertFalse(torch.utils.deterministic.fill_uninitialized_memory)
self.assertFalse(torch._C._get_deterministic_fill_uninitialized_memory())
torch.utils.deterministic.fill_uninitialized_memory = False
self.assertFalse(torch.utils.deterministic.fill_uninitialized_memory)
self.assertFalse(torch._C._get_deterministic_fill_uninitialized_memory())
torch.utils.deterministic.fill_uninitialized_memory = True
self.assertTrue(torch.utils.deterministic.fill_uninitialized_memory)
self.assertTrue(torch._C._get_deterministic_fill_uninitialized_memory())
torch._C._set_deterministic_fill_uninitialized_memory(False)
self.assertFalse(torch.utils.deterministic.fill_uninitialized_memory)
self.assertFalse(torch._C._get_deterministic_fill_uninitialized_memory())
torch._C._set_deterministic_fill_uninitialized_memory(True)
self.assertTrue(torch.utils.deterministic.fill_uninitialized_memory)
self.assertTrue(torch._C._get_deterministic_fill_uninitialized_memory())
with self.assertRaisesRegex(RuntimeError, r"expected a bool, but got int"):
torch.utils.deterministic.fill_uninitialized_memory = 1
def test_type_conversion_via_dtype_name(self):
x = torch.tensor([1])
self.assertEqual(x.byte().dtype, torch.uint8)
self.assertEqual(x.bool().dtype, torch.bool)
self.assertEqual(x.char().dtype, torch.int8)
self.assertEqual(x.double().dtype, torch.float64)
self.assertEqual(x.float().dtype, torch.float32)
self.assertEqual(x.half().dtype, torch.float16)
self.assertEqual(x.int().dtype, torch.int32)
self.assertEqual(x.bfloat16().dtype, torch.bfloat16)
cfloat = x.cfloat()
self.assertEqual(cfloat.dtype, torch.complex64)
self.assertEqual(cfloat.real, x.float())
self.assertEqual(cfloat.imag, torch.zeros_like(cfloat.imag))
cdouble = x.cdouble()
self.assertEqual(cdouble.dtype, torch.complex128)
self.assertEqual(cdouble.real, x.double())
self.assertEqual(cdouble.imag, torch.zeros_like(cdouble.imag))
chalf = x.chalf()
self.assertEqual(chalf.dtype, torch.complex32)
self.assertEqual(chalf.real, x.half())
self.assertEqual(chalf.imag, torch.zeros_like(chalf.imag))
def test_type_alias(self):
type_alias_map = {torch.float64: torch.double,
torch.float32: torch.float,
torch.int32: torch.int,
torch.int64: torch.long,
torch.int16: torch.short,
torch.float16: torch.half,
torch.complex32: torch.chalf,
torch.complex64: torch.cfloat}
for dtype, alias in type_alias_map.items():
self.assertIs(alias, dtype)
def test_doc_template(self) -> None:
"""
Test that all public API doc strings use the same standard template for
all common arguments such as tensor or dim
"""
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
with open(doc_file, encoding="utf-8") as f:
doc_strs = f.read()
matches = re.findall(
r'add_docstr\(([^,]+?),[^"\']*?(?:"""|\'\'\')(.*?)(?:"""|\'\'\')(?:\.|,?[^,\)]*?\))',
doc_strs,
re.MULTILINE | re.DOTALL,
)
self.assertTrue(matches)
for m in matches:
func = m[0].strip()
desc = m[1].strip()
for common_args in [multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args]:
for k, v in common_args.items():
self.assertNotIn(v, desc, f'The argument description "{v}" in {func} can be '
f'replaced by {{{k}}}')
def test_doc(self):
checked_types = (types.MethodType, types.FunctionType,
types.BuiltinFunctionType, types.BuiltinMethodType)
def _test_namespace(ns, *skips):
if isinstance(ns, object):
ns_name = ns.__class__.__name__
else:
ns_name = ns.__name__
skip_regexes = []
for r in skips:
if isinstance(r, str):
skip_regexes.append(re.compile(f'^{re.escape(r)}$'))
else:
skip_regexes.append(r)
for name in dir(ns):
if name.startswith('_'):
continue
if name in ['real', 'imag']:
y = torch.randn(1, dtype=torch.cfloat)
var = getattr(y, name)
elif name in ["H", "mT", "mH"]:
y = torch.randn(1, 1)
var = getattr(y, name)
else:
var = getattr(ns, name)
if not isinstance(var, checked_types):
continue
doc = var.__doc__
has_doc = doc is not None and len(doc.strip()) > 0
full_name = ns_name + '.' + name
if any(r.match(name) for r in skip_regexes):
self.assertFalse(has_doc,
f'New docs have been added for {full_name}, please remove '
'it from the skipped list in TestTorch.test_doc')
else:
self.assertTrue(has_doc, f'{full_name} is missing documentation')
# FIXME: All of the following should be marked as expected failures
# so that it is easier to tell when missing has been added.
# FIXME: fix all the skipped ones below!
test_namespace(torch.randn(1), # noqa: F821
'as_strided_',
re.compile('^clamp_(min|max)_?$'),
'is_distributed',
'is_nonzero',
'is_same_size',
'log_softmax',
'map2_',
'new',
'reinforce',
'relu',
'relu_',
'prelu',
'resize',
'resize_as',
'softmax',
'split_with_sizes',
'unsafe_split_with_sizes',
'_autocast_to_fp16',
'_autocast_to_fp32',
)
test_namespace(torch.nn) # noqa: F821
test_namespace(torch.nn.functional, 'assert_int_or_pair') # noqa: F821
# TODO: add torch.* tests when we have proper namespacing on ATen functions
# test_namespace(torch)
# FIXME: deprecate torch.Tensor constructor
def test_tensor_ctor_scalar(self):
x = torch.Tensor(torch.tensor(1.0))
self.assertEqual(x, torch.tensor(1.0))
def test_deepcopy_gradient(self):
from copy import deepcopy
a = torch.zeros(10)
a.grad = torch.ones(10)
self.assertEqual(a.grad, deepcopy(a).grad)
s = torch.zeros(10).to_sparse()
s.grad = torch.ones(10).to_sparse()
self.assertEqual(s.grad, deepcopy(s).grad)
# ensure sharing is not broken
c = deepcopy([a, a.grad])
self.assertTrue(c[0].grad is c[1])
def test_tensor_base_init(self):
# Direct construction not OK
self.assertRaises(RuntimeError, lambda: torch._C.TensorBase())
# Subclassing it directly no OK
with self.assertRaisesRegex(RuntimeError, "Cannot subclass"):
class Tfail(torch._C.TensorBase):
pass
# Doing so with Tensor is ok though
class T(torch.Tensor):
pass
T()
def test_storage_base_init(self):
# Direct construction not OK
self.assertRaises(RuntimeError, lambda: torch._C.StorageBase())
# But construction of subclass is OK
class T(torch._C.StorageBase):
pass
T()
def test_tensor_base_new(self):
# OK to call super().__new__, see
# https://github.com/pytorch/pytorch/issues/57421
class TestTensor(torch.Tensor):
@staticmethod
def __new__(cls, x, *args, **kwargs):
return super().__new__(cls, x, *args, **kwargs)
x = torch.ones(5)
TestTensor(x)
def test_storage_base_new(self):
# OK to call super().__new__, see
# https://github.com/pytorch/pytorch/issues/57421
class TestStorage(torch._C.StorageBase):
@staticmethod
def __new__(cls, x, *args, **kwargs):
return super().__new__(cls, x, *args, **kwargs)
x = torch.UntypedStorage(5)
TestStorage(x)
def test_pyobj_preserved(self):
x = torch.empty(2)
x.foo = 2 # put something on __dict__
y = torch.empty(2)
y.grad = x
del x # x is dead in Python
self.assertEqual(y.grad.foo, 2)
z = y.grad # it's live
del z # it's dead again
self.assertEqual(y.grad.foo, 2)
def test_subclass_preserved(self):
class MyTensor(torch.Tensor):
pass
x = MyTensor(torch.empty(2))
y = torch.empty(2)
y.grad = x
del x # x is dead in Python
self.assertEqual(type(y.grad), MyTensor)
z = y.grad # it's live
del z # it's dead again
self.assertEqual(type(y.grad), MyTensor)
@skipIfTorchDynamo("Tracker hook does not work in TorchDynamo")
def test_storage_dealloc(self):
m, t = Tracker.make()
s0 = torch.UntypedStorage(10)
s1 = s0
s0._tracker = t
del t
self.assertFalse(m[0])
del s0
self.assertFalse(m[0])
del s1
self.assertTrue(m[0])
@skipIfTorchDynamo("Tracker hook does not work in TorchDynamo")
def test_storage_from_tensor_dealloc(self):
m, t = Tracker.make()
a = torch.randn(10)
s0 = a.untyped_storage()
s0._tracker = t
del t
s1 = a.untyped_storage()
self.assertTrue(s0 is s1)
self.assertTrue(hasattr(s1, '_tracker'))
del a
self.assertFalse(m[0])
del s0
self.assertFalse(m[0])
del s1
self.assertTrue(m[0])
@skipIfTorchDynamo("Tracker hook does not work in TorchDynamo")
def test_storage_from_tensor_dealloc_zombie(self):
m, t = Tracker.make()
a = torch.randn(10)
s0 = a.untyped_storage()
s0._tracker = t
del t
s1 = a.untyped_storage()
self.assertTrue(s0 is s1)
self.assertTrue(hasattr(s1, '_tracker'))
self.assertFalse(m[0])
del s0
self.assertFalse(m[0])
del s1
self.assertFalse(m[0])
del a
self.assertTrue(m[0])
@skipIfTorchDynamo("Tracker hook does not work in TorchDynamo")
def test_storage_from_tensor_dealloc_resurrected(self):
m, t = Tracker.make()
a = torch.randn(10)
s0 = a.untyped_storage()
s0._tracker = t
del t
s1 = a.untyped_storage()
self.assertTrue(s0 is s1)
self.assertTrue(hasattr(s1, '_tracker'))
self.assertFalse(m[0])
del s0
self.assertFalse(m[0])
del s1
self.assertFalse(m[0])
s0 = a.untyped_storage()
self.assertTrue(isinstance(s0, torch.UntypedStorage))
del a
self.assertFalse(m[0])
del s0
self.assertTrue(m[0])
@skipIfTorchDynamo("Tracker hook does not work in TorchDynamo")
def test_storage_dealloc_resurrected(self):
m, t = Tracker.make()
s = torch.UntypedStorage(10)
s._tracker = t
del t
a = torch.tensor(s)
self.assertFalse(m[0])
del s
self.assertFalse(m[0])
s = a.untyped_storage()
self.assertTrue(isinstance(s, torch.UntypedStorage))
del a
self.assertFalse(m[0])
del s
self.assertTrue(m[0])
@skipIfTorchDynamo("Tracker hook does not work in TorchDynamo")
def test_storage_dealloc_subclass_zombie(self):
class MyStorage(torch.UntypedStorage):
finalized_count = 0
def __del__(self):
MyStorage.finalized_count += 1
m, t = Tracker.make()
s = MyStorage(10)
s._tracker = t
del t
a = torch.tensor(s)
self.assertFalse(m[0])
del s
self.assertEqual(MyStorage.finalized_count, 0)
self.assertFalse(m[0])
del a
self.assertEqual(MyStorage.finalized_count, 1)
self.assertTrue(m[0])
@skipIfTorchDynamo("Tracker hook does not work in TorchDynamo")
def test_storage_dealloc_subclass_resurrected(self):
class MyStorage(torch.UntypedStorage):
finalized_count = 0
def __del__(self):
MyStorage.finalized_count += 1
m, t = Tracker.make()
s = MyStorage(10)
s._tracker = t
del t
a = torch.tensor(s)
self.assertFalse(m[0])
del s
self.assertEqual(MyStorage.finalized_count, 0)
self.assertFalse(m[0])
s = a.untyped_storage()
del a
self.assertFalse(m[0])
self.assertEqual(MyStorage.finalized_count, 0)
self.assertTrue(isinstance(s, MyStorage))
del s
self.assertEqual(MyStorage.finalized_count, 1)
self.assertTrue(m[0])
def test_tensor_ressurecting_clear(self):
# Regression test for https://github.com/pytorch/pytorch/issues/136358
# A Tensor with custom __dict__
# Autograd here is for the c++ reference later
t = torch.rand(2, requires_grad=True).clone()
t.foo = 2
# that is part of a cycle
l = []
l.append(l)
l.append(t)
# Keep the Tensor alive from c++
# Using autograd graph here (any other mean would work)
t2 = t ** 2
self.assertIs(t2.grad_fn._saved_self, t)
# Clear all python references and trigger the gc
del t, l
gc.collect()
# We used to loose the dict!
self.assertTrue(hasattr(t2.grad_fn._saved_self, "foo"))
def test_tensor_slot_dealloc(self):
class SlotTensor1(torch.Tensor):
__slots__ = ['slot1']
class SlotTensor2(SlotTensor1):
__slots__ = ['slot2']
m1, t1 = Tracker.make()
m2, t2 = Tracker.make()
slot_tensor = SlotTensor2(torch.empty(2))
slot_tensor.slot1 = t1
slot_tensor.slot2 = t2
del t1
del t2
self.assertFalse(m1[0])
self.assertFalse(m2[0])
del slot_tensor
self.assertTrue(m1[0])
self.assertTrue(m2[0])
def test_storage_slot_dealloc(self):
class SlotStorage1(torch._C.StorageBase):
__slots__ = ['slot1']
class SlotStorage2(SlotStorage1):
__slots__ = ['slot2']
m1, t1 = Tracker.make()
m2, t2 = Tracker.make()
slot_storage = SlotStorage2(torch.UntypedStorage(2))
slot_storage.slot1 = t1
slot_storage.slot2 = t2
del t1
del t2
self.assertFalse(m1[0])
self.assertFalse(m2[0])
del slot_storage
self.assertTrue(m1[0])
self.assertTrue(m2[0])
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
def test_tensor_dict_dealloc(self):
m, t = Tracker.make()
x = torch.empty(2)
x.arf = t
del t
self.assertFalse(m[0])
del x
self.assertTrue(m[0])
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
def test_storage_dict_dealloc(self):
m, t = Tracker.make()
x = torch.UntypedStorage(2)
x.arf = t
del t
self.assertFalse(m[0])
del x
self.assertTrue(m[0])
def test_tensor_finalizer_dealloc(self):
m = [False]
class FinalizerTensor(torch.Tensor):
def __del__(self):
m[0] = True
fin_tensor = FinalizerTensor(torch.empty(2))
self.assertFalse(m[0])
del fin_tensor
self.assertTrue(m[0])
def test_storage_finalizer_dealloc(self):
m = [False]
class FinalizerStorage(torch._C.StorageBase):
def __del__(self):
m[0] = True
fin_storage = FinalizerStorage(torch.UntypedStorage(2))
self.assertFalse(m[0])
del fin_storage
self.assertTrue(m[0])
@skipIfTorchDynamo("https://github.com/pytorch/torchdynamo/issues/1993")
def test_tensor_weakref_dealloc(self):
x = torch.empty(2)
m = [False]
def cb(r):
m[0] = True
wref = weakref.ref(x, cb)
del x
self.assertTrue(m[0])
self.assertEqual(wref(), None)
@skipIfTorchDynamo("https://github.com/pytorch/torchdynamo/issues/1993")
def test_storage_weakref_dealloc(self):
x = torch.UntypedStorage(2)
m = [False]
def cb(r):
m[0] = True
wref = weakref.ref(x, cb)
del x
self.assertTrue(m[0])
self.assertEqual(wref(), None)
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
def test_tensor_cycle_via_dict(self):
m1, t1 = Tracker.make()
x = torch.empty(2)
x._tracker = t1
del t1
m2, t2 = Tracker.make()
y = torch.empty(2)
y._tracker = t2
del t2
x._loop = y
y._loop = x
# C++ reference should keep the cycle live!
# This exercise THPVariable_subtype_traverse
# NB: Because z.grad is a reference done entirely in C++, cycles
# involving it directly are NOT broken by Python GC; you've
# set up a good old C++ reference cycle which we cannot safely
# break (because C++ references are allowed to be accessed
# multithreaded-ly) (TODO: except maybe if you can prove that
# only Python has access to the C++ object, in which case you can
# also prove that no multithreaded access occurs)
z = torch.empty(2)
z.grad = x
del x
del y
gc.collect()
self.assertFalse(m1[0])
self.assertFalse(m2[0])
with disable_gc():
del z
self.assertFalse(m1[0])
self.assertFalse(m2[0])
gc.collect()
self.assertTrue(m1[0])
self.assertTrue(m2[0])
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
def test_storage_cycle_via_dict(self):
m1, t1 = Tracker.make()
x = torch.UntypedStorage(2)
x._tracker = t1
del t1
m2, t2 = Tracker.make()
y = torch.UntypedStorage(2)
y._tracker = t2
del t2
x._loop = y
y._loop = x
# C++ reference should keep the cycle live!
# This exercise THPVariable_subtype_traverse
# NB: Because z.grad is a reference done entirely in C++, cycles
# involving it directly are NOT broken by Python GC; you've
# set up a good old C++ reference cycle which we cannot safely
# break (because C++ references are allowed to be accessed
# multithreaded-ly) (TODO: except maybe if you can prove that
# only Python has access to the C++ object, in which case you can
# also prove that no multithreaded access occurs)
z = torch.UntypedStorage(2)
z.grad = x
del x
del y
gc.collect()
self.assertFalse(m1[0])
self.assertFalse(m2[0])
with disable_gc():
del z
self.assertFalse(m1[0])
self.assertFalse(m2[0])
gc.collect()
self.assertTrue(m1[0])
self.assertTrue(m2[0])
def test_tensor_cycle_via_slots(self):
m1 = [False]
m2 = [False]
class SlotTensor1(torch.Tensor):
__slots__ = ['slot1']
def __del__(self):
m1[0] = True
class SlotTensor2(SlotTensor1):
__slots__ = ['slot2']
def __del__(self):
m2[0] = True
x = SlotTensor1(torch.empty(2))
x_ref = weakref.ref(x)
y = SlotTensor2(torch.empty(2))
x.slot1 = y
y.slot2 = x
del x
with disable_gc():
del y
self.assertFalse(m1[0])
self.assertFalse(m2[0])
gc.collect()
self.assertTrue(m1[0])
self.assertTrue(m2[0])
self.assertIsNone(x_ref())
# At this point, we know the finalizer ran and the weakref
# was cleared. But is the object really gone?
self.assertFalse(any(isinstance(o, SlotTensor1) for o in gc.get_objects()))
def test_storage_cycle_via_slots(self):
m1 = [False]
m2 = [False]
class SlotStorage1(torch._C.StorageBase):
__slots__ = ['slot1']
def __del__(self):
m1[0] = True
class SlotStorage2(SlotStorage1):
__slots__ = ['slot2']
def __del__(self):
m2[0] = True
x = SlotStorage1(torch.UntypedStorage(2))
y = SlotStorage2(torch.UntypedStorage(2))
x.slot1 = y
y.slot2 = x
del x
with disable_gc():
del y
self.assertFalse(m1[0])
self.assertFalse(m2[0])
gc.collect()
self.assertTrue(m1[0])
self.assertTrue(m2[0])
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
def test_storage_preserve_nonhermetic_in_hermetic_context(self):
from torch.library import Library, impl
global _my_storage
my_lib = Library("my_lib", "DEF") # noqa: TOR901
my_lib.define('my_func() -> None')
a = torch.tensor([1.])
_my_storage = a.untyped_storage()
m, t = Tracker.make()
_my_storage._tracker = t
del t
@impl(my_lib, 'my_func', '')
def my_func():
global _my_storage
del _my_storage
self.assertFalse(m[0])
torch.ops.my_lib.my_func()
self.assertFalse(m[0])
s = a.untyped_storage()
del a
del s
self.assertTrue(m[0])
# FIXME: move to test_autograd?
@skipIfTorchDynamo("TorchDynamo does not work well with hooks")
def test_backward_hooks_traverse(self):
m1, t1 = Tracker.make()
m2, t2 = Tracker.make()
x = torch.empty(2, requires_grad=True)
x._tracker = t1
y = torch.empty(2, requires_grad=True)
y._tracker = t2
del t1
del t2
# this hits a special setter, it's not just a __dict__ entry
x._backward_hooks = y
y._backward_hooks = x
del x
with disable_gc():
del y
self.assertFalse(m1[0])
self.assertFalse(m2[0])
gc.collect()
self.assertTrue(m1[0])
self.assertTrue(m2[0])
@skipIfTorchDynamo("https://github.com/pytorch/torchdynamo/issues/1993")
def test_tensor_dead_weak_ref(self):
x = torch.ones(2)
w_x = weakref.ref(x)
y = torch.ones(2)
y.grad = x
del x
x = w_x()
# x should keep the tensor live. This didn't happen in earlier PyTorch
# versions.
del y
self.assertEqual(2, x.sum())
del x
self.assertIsNone(w_x())
@skipIfTorchDynamo("https://github.com/pytorch/torchdynamo/issues/1993")
def test_storage_dead_weak_ref(self):
x = torch.UntypedStorage(2)
w_x = weakref.ref(x)
y = torch.tensor(x)
del x
self.assertIsNotNone(w_x())
del y
self.assertIsNone(w_x())
def test_tensor_resurrected_weak_ref(self):
x = torch.empty(2)
w_x = weakref.ref(x)
y = torch.empty(2)
y.grad = x
del x
x = w_x()
# Use this to manually fix weak references after dereferencing them
x._fix_weakref()
del y
x.sigmoid()
def test_storage_resurrected_weak_ref(self):
x = torch.UntypedStorage(2)
w_x = weakref.ref(x)
y = torch.tensor(x)
del x
x = w_x()
# Use this to manually fix weak reference after dereferencing them
x._fix_weakref()
del y
x.float()
@skipIfTorchDynamo("https://github.com/pytorch/torchdynamo/issues/1993")
def test_tensor_fix_weakref_no_leak(self):
import weakref
called = False
a = torch.randn(1)
def callback(w):
nonlocal called
called = True
_wa = weakref.ref(a, callback)
a._fix_weakref()
del a
self.assertTrue(called)
@skipIfTorchDynamo("https://github.com/pytorch/torchdynamo/issues/1993")
def test_storage_fix_weakref_no_leak(self):
import weakref
called = False
a = torch.UntypedStorage(1)
def callback(w):
nonlocal called
called = True
_wa = weakref.ref(a, callback)
a._fix_weakref()
del a
self.assertTrue(called)
def test_storage_thread_safety(self):
import threading
from concurrent.futures import ThreadPoolExecutor
NUM_ITERS = 10
NUM_THREADS = 4
# Concurrent calls to tensor.untyped_storage()
def access_untyped_storage(tensor, barrier):
barrier.wait()
return weakref.ref(tensor.untyped_storage())
for i in range(NUM_ITERS):
tensor = torch.tensor([1.0, 2.0, 3.0])
barrier = threading.Barrier(NUM_THREADS)
with ThreadPoolExecutor(max_workers=NUM_THREADS) as executor:
futures = [
executor.submit(access_untyped_storage, tensor, barrier)
for _ in range(NUM_THREADS)
]
# Check that all the storages returned were the same
for future in futures:
self.assertEqual(future.result()(), tensor.untyped_storage())
# FIXME: move to test_linalg
@torch.inference_mode()
def test_bmm_multithreaded(self):
device = 'cpu'
num_threads = torch.get_num_threads()
torch.set_num_threads(4)
batch_sizes = [1, 10]
M, N, O = 23, 8, 12
dtype = torch.float32
numpy_dtype = dtype
def invert_perm(p):
d = {x: i for i, x in enumerate(p)}
return (d[0], d[1], d[2])
def generate_inputs(num_batches):
# transposed tensors
for perm1, perm2 in itertools.product(itertools.permutations((0, 1, 2)), repeat=2):
b1 = make_tensor((num_batches, M, N), dtype=dtype, device=device, low=-1, high=1)
b2 = make_tensor((num_batches, N, O), dtype=dtype, device=device, low=-1, high=1)
b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1))
b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2))
yield b1, b2
# broadcasting tensors
for b1, b2, b3, b4, b5, b6 in itertools.product((True, False), repeat=6):
shape1 = (num_batches if b1 else 1, M if b2 else 1, N if b3 else 1)
shape2 = (num_batches if b4 else 1, N if b5 else 1, O if b6 else 1)
b1 = make_tensor(shape1, dtype=dtype, device=device, low=-1, high=1).expand(num_batches, M, N)
b2 = make_tensor(shape2, dtype=dtype, device=device, low=-1, high=1).expand(num_batches, N, O)
yield b1, b2
# zero-sized tensors
for z1, z2, z3, z4 in itertools.product((True, False), repeat=4):
shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0)
shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0)
b1 = torch.randn(shape1, dtype=dtype, device=device)
b2 = torch.randn(shape2, dtype=dtype, device=device)
yield b1, b2
try:
for num_batches in batch_sizes:
for (b1, b2), perm3 in itertools.product(generate_inputs(num_batches), itertools.permutations((0, 1, 2))):
res1 = torch.bmm(b1, b2)
res2 = torch.full((num_batches, M, O), math.nan, dtype=dtype, device=device) \
.permute(perm3).contiguous().permute(invert_perm(perm3))
torch.bmm(b1, b2, out=res2)
expect = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)
self.assertEqual(expect, res1)
self.assertEqual(expect, res2)
finally:
torch.set_num_threads(num_threads)
def test_conj_neg_tolist(self):
x = torch.randn(2, dtype=torch.cfloat)
y1 = x.conj()
y1_expect = x.conj_physical()
y2 = y1.imag
self.assertEqual(y1, y1_expect.tolist())
self.assertEqual(y2, y1_expect.imag.tolist())
@unittest.skipIf(torch.backends.cuda.is_built(), "Skipped for cuda-enabled build")
def test_no_cuda_monkeypatch(self):
# Note that this is not in test_cuda.py as this whole file is skipped when cuda
# is not available.
with self.assertRaisesRegex(RuntimeError, "torch.cuda.Stream requires CUDA support"):
torch.cuda.Stream()
with self.assertRaisesRegex(RuntimeError, "Tried to instantiate dummy base class Event"):
torch.cuda.Event()
with self.assertRaisesRegex(RuntimeError, "Tried to instantiate dummy base class CUDAGraph"):
torch.cuda.graphs.CUDAGraph()
def test_tensor_where_scalar(self):
a = torch.arange(4.0)
not_zero = 0.001
# b is generated through torch.where function with not_zero being a scalar parameter
b = torch.where(a != 0, a, not_zero)
# c is generated through Tensor.where method with not_zero being a scalar parameter
c = a.where(a != 0, not_zero)
self.assertEqual(b, c)
def test_data_ptr_of_empty_tensor_with_storage(self):
t = torch.empty((2, 2))
self.assertNotEqual(t.data_ptr(), 0)
t.resize_((0, 2))
self.assertEqual(t.data_ptr(), 0)
def test_data_ptr_of_empty_view_with_storage(self):
t = torch.empty((2, 2))
self.assertNotEqual(t.data_ptr(), 0)
t2 = t[0:0].view(0, 1)
self.assertEqual(t2.data_ptr(), 0)
def test_size_stride(self) -> None:
t = torch.rand(2, 3, dtype=torch.float32)
self.assertEqual(t.size(0), 2)
self.assertEqual(t.size(dim=None), torch.Size([2, 3]))
self.assertEqual(t.stride(dim=None), torch.Size([3, 1]))
self.assertEqual(t.t().stride(), torch.Size([1, 3]))
def test_invalid_arg_error_handling(self) -> None:
""" Tests that errors from old TH functions are propagated back """
for invalid_val in [-1, 2**65]:
self.assertRaises((ValueError, RuntimeError), lambda: torch.set_num_threads(invalid_val))
self.assertRaises((ValueError, RuntimeError), lambda: torch.set_num_interop_threads(invalid_val))
def _get_tensor_prop(self, t):
preserved = (
id(t),
# Refcount values get modified by Dynamo resume frames
0 if TEST_WITH_TORCHDYNAMO else sys.getrefcount(t),
)
slotnames = copyreg._slotnames(t.__class__)
moved = (
slotnames,
id(t.__dict__),
tuple(t.__dict__.keys()),
[getattr(t, name, None) for name in slotnames]
)
return preserved, moved
def _checked_swap(self, t1, t2):
t1_pres, t1_moved = self._get_tensor_prop(t1)
t2_pres, t2_moved = self._get_tensor_prop(t2)
torch.utils.swap_tensors(t1, t2)
new_t1_pres, new_t1_moved = self._get_tensor_prop(t1)
new_t2_pres, new_t2_moved = self._get_tensor_prop(t2)
self.assertEqual(t1_pres, new_t1_pres)
self.assertEqual(t2_pres, new_t2_pres)
self.assertEqual(t1_moved, new_t2_moved)
self.assertEqual(t2_moved, new_t1_moved)
# tests that PyObject slots on TensorImpl are correctly swapped by
# checking that when the function applied on a swapped tensor is
# returns doesn't change the TensorImpl, the returned value (which is
# given by returning the reference to the PyObject in the TensorImpl's
# PyObjectSlot) is still correct
self.assertEqual(id(t1.fill_(0.5)), id(t1))
self.assertEqual(id(t2.fill_(0.5)), id(t2))
@unittest.skipIf(TEST_WITH_TORCHDYNAMO, "Dynamo adds weakrefs")
def test_swap_basic(self):
ts = [
torch.rand(2),
torch.rand(3, 3),
torch.empty(3, dtype=torch.int),
TwoTensor(torch.rand(4), torch.rand(4))
]
for t1, t2 in itertools.combinations(ts, 2):
t1 = t1.clone()
t2 = t2.clone()
t2.foo = "bar"
holder = []
holder.append(t1)
self._checked_swap(t1, t2)
self.assertIs(holder[0], t1)
self.assertEqual(t1.foo, "bar")
if t1.is_floating_point():
t3 = t1.detach().clone().requires_grad_(True)
out = t3 * 2
torch.utils.swap_tensors(t3, t2)
with self.assertRaisesRegex(RuntimeError, "AccumulateGrad node that was poisoned by swap_tensors"):
out.sum().backward()
_wr = weakref.ref(t1)
with self.assertRaisesRegex(RuntimeError, "has weakref"):
torch.utils.swap_tensors(t1, t2)
@unittest.skipIf(TEST_WITH_TORCHDYNAMO, "Dynamo adds weakrefs")
def test_swap_fail_slots(self):
class MyTwoTensor(TwoTensor):
__slots__ = ("a", "b")
class MyTwoTensor2(TwoTensor):
__slots__ = ("b", "a")
class MyTwoTensor3(TwoTensor):
__slots__ = ("a", "b", "c", "d")
class MyTwoTensor4(TwoTensor):
__slots__ = ("a", "c")
t1 = torch.rand(4)
t2 = TwoTensor(torch.rand(4), torch.rand(4))
t3 = MyTwoTensor(torch.rand(4), torch.rand(4))
t4 = MyTwoTensor(torch.rand(4), torch.rand(4))
t5 = MyTwoTensor2(torch.rand(4), torch.rand(4))
t6 = MyTwoTensor3(torch.rand(4), torch.rand(4))
t7 = MyTwoTensor3(torch.rand(4), torch.rand(4))
t8 = MyTwoTensor4(torch.rand(4), torch.rand(4))
self._checked_swap(t1, t2)
with self.assertRaisesRegex(RuntimeError, "Cannot swap t1 and t2 if they have different slots"):
torch.utils.swap_tensors(t1, t3)
with self.assertRaisesRegex(RuntimeError, "Cannot swap t1 and t2 if they have different slots"):
torch.utils.swap_tensors(t2, t3)
with self.assertRaisesRegex(RuntimeError, "Cannot swap t1 and t2 if they have different slots"):
torch.utils.swap_tensors(t2, t8)
self._checked_swap(t3, t4)
self._checked_swap(t3, t5)
with self.assertRaisesRegex(RuntimeError, "Cannot swap t1 and t2 if they have different slots"):
torch.utils.swap_tensors(t3, t6)
t3.c = "foo"
t4.d = "bar"
self._checked_swap(t3, t4)
self.assertEqual(t4.c, "foo")
self.assertEqual(t3.d, "bar")
t6.c = "cat"
t7.d = "dog"
self._checked_swap(t6, t7)
@unittest.skipIf(torch.cuda.is_available(), "Test specific for CPU")
def test_bf16_supported_on_cpu(self):
self.assertFalse(torch.cuda.is_bf16_supported())
def test_tensor_with_grad_to_scalar_warning(self) -> None:
with (warnings.catch_warnings(record=True) as w,
set_warn_always_context(True)):
warnings.simplefilter("always")
x = torch.tensor(2.0, requires_grad=True)
math.pow(x, 3) # calling this results in a warning
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[0].category, UserWarning))
self.assertIn(
"Converting a tensor with requires_grad=True to a scalar may lead to unexpected behavior.",
str(w[0].message)
)
def test_tensor_item_no_warning(self):
with (warnings.catch_warnings(record=True) as w,
set_warn_always_context(True)):
warnings.simplefilter("always")
x = torch.tensor(2.0, requires_grad=True)
max(x, 3) # No warning
x.item() # No warning
self.assertEqual(len(w), 0)
# The following block extends TestTorch with negative dim wrapping tests
# FIXME: replace these with OpInfo sample inputs or systemic OpInfo tests
# Functions to test negative dimension wrapping
METHOD = 1
INPLACE_METHOD = 2
FUNCTIONAL = 4
DIM_ARG: None = None
def make_neg_dim_test(name, tensor_arg, arg_constr, types, extra_dim=0):
def neg_dim_test(self):
if isinstance(tensor_arg, list):
assert METHOD not in types and INPLACE_METHOD not in types
x = [torch.randn(arg) for arg in tensor_arg]
ndim = len(tensor_arg[-1])
else:
x = torch.randn(*tensor_arg)
ndim = len(tensor_arg)
ndim += extra_dim
n_dim_to_test = sum(e is DIM_ARG for e in arg_constr())
for dims_val in combinations(range(ndim), n_dim_to_test):
arg = arg_constr()
arg_neg = copy.deepcopy(arg)
idx = 0
for i, v in enumerate(arg):
if v is DIM_ARG:
arg[i] = dims_val[idx]
arg_neg[i] = dims_val[idx] - ndim
idx += 1
if METHOD in types:
a = getattr(x, name)(*arg)
b = getattr(x, name)(*arg_neg)
self.assertEqual(a, b)
if INPLACE_METHOD in types:
a = x.clone()
getattr(a, name + '_')(*arg)
b = x.clone()
getattr(b, name + '_')(*arg_neg)
self.assertEqual(a, b)
if FUNCTIONAL in types:
a = getattr(torch, name)(x, *arg)
b = getattr(torch, name)(x, *arg_neg)
self.assertEqual(a, b)
return neg_dim_test
def idx_tensor(size, max_val):
return torch.LongTensor(*size).random_(0, max_val - 1)
def add_neg_dim_tests():
neg_dim_tests = [
('narrow', (10, 20, 30), lambda: [DIM_ARG, 0, 5], [METHOD]),
('transpose', (10, 20, 30), lambda: [DIM_ARG, DIM_ARG], [METHOD, INPLACE_METHOD, FUNCTIONAL]),
('size', (10, 20, 30), lambda: [DIM_ARG], [METHOD]),
('cat', [(2, 3, 4), (2, 3, 4)], lambda: [DIM_ARG], [FUNCTIONAL]),
('chunk', (10, 20, 30), lambda: [5, DIM_ARG], [METHOD, FUNCTIONAL]),
('gather', (10, 20), lambda: [DIM_ARG, idx_tensor((10, 20), 10)], [METHOD, FUNCTIONAL]),
('index_select', (10, 10), lambda: [DIM_ARG, idx_tensor((10,), 10)], [METHOD, FUNCTIONAL]),
('split', (10, 20), lambda: [5, DIM_ARG], [METHOD, FUNCTIONAL]),
('squeeze', (10, 1, 20, 1), lambda: [DIM_ARG], [METHOD, INPLACE_METHOD, FUNCTIONAL]),
('unbind', (2, 3, 4), lambda: [DIM_ARG], [FUNCTIONAL]),
('unsqueeze', (10, 20), lambda: [DIM_ARG], [METHOD, INPLACE_METHOD, FUNCTIONAL], 1),
('logcumsumexp', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('cumprod', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('cumsum', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('cummax', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('cummin', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('mean', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('median', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('nanmedian', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('mode', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('norm', (10, 20), lambda: [2, DIM_ARG], [METHOD, FUNCTIONAL]),
('prod', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('std', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('sum', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('var', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('kthvalue', (10, 20), lambda: [3, DIM_ARG], [METHOD, FUNCTIONAL]),
('max', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('min', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('sort', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]),
('topk', (10, 20), lambda: [5, DIM_ARG], [METHOD, FUNCTIONAL]),
('renorm', (10, 20), lambda: [2, DIM_ARG, 1], [METHOD, INPLACE_METHOD, FUNCTIONAL]),
('index_add', (10, 10), lambda: [DIM_ARG, idx_tensor((10,), 10), torch.randn(10, 10)], [INPLACE_METHOD]),
('index_copy', (10, 10), lambda: [DIM_ARG, idx_tensor((10,), 10), torch.randn(10, 10)], [INPLACE_METHOD]),
('index_fill', (10, 10), lambda: [DIM_ARG, idx_tensor((10,), 10), 12], [INPLACE_METHOD]),
('scatter', (10, 10), lambda: [DIM_ARG, idx_tensor((10, 10), 10), torch.randn(10, 10)], [INPLACE_METHOD]),
('select', (10, 20), lambda: [DIM_ARG, 3], [METHOD]),
('unfold', (10, 20), lambda: [DIM_ARG, 5, 2], [METHOD]),
]
for decl in neg_dim_tests:
if len(decl) == 4:
name, tensor_arg, arg_constr, types = decl
extra_dim = 0
elif len(decl) == 5:
name, tensor_arg, arg_constr, types, extra_dim = decl
test_name = 'test_' + name + '_neg_dim'
assert not hasattr(TestTorch, test_name), "Duplicated test name: " + test_name
setattr(TestTorch, test_name, make_neg_dim_test(name, tensor_arg, arg_constr, types, extra_dim))
# TODO: these empty classes are temporarily instantiated for XLA compatibility
# once XLA updates their test suite it should be removed
|
TestTorch
|
python
|
mlflow__mlflow
|
mlflow/types/llm.py
|
{
"start": 13250,
"end": 13802
}
|
class ____(_BaseDataclass):
"""
Definition for tools that can be called by the model.
Args:
function (:py:class:`FunctionToolDefinition`): The definition of a function tool.
type (str): The type of the tool. Currently only "function" is supported.
"""
function: FunctionToolDefinition
type: Literal["function"] = "function"
def __post_init__(self):
self._validate_literal("type", ["function"], True)
self._convert_dataclass("function", FunctionToolDefinition, True)
@dataclass
|
ToolDefinition
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-sling/dagster_sling_tests/test_sling_replication_collection_component.py
|
{
"start": 7798,
"end": 11968
}
|
class ____(TestTranslation):
def test_translation(
self,
attributes: Mapping[str, Any],
assertion: Callable[[AssetSpec], bool],
key_modifier: Optional[Callable[[AssetKey], AssetKey]],
) -> None:
defs = build_component_defs_for_test(
SlingReplicationCollectionComponent,
{
"connections": {},
"replications": [{"path": str(REPLICATION_PATH), "translation": attributes}],
},
)
key = AssetKey("input_duckdb")
if key_modifier:
key = key_modifier(key)
assets_def = defs.resolve_assets_def(key)
assert assertion(assets_def.get_asset_spec(key)), (
f"Asset spec {assets_def.get_asset_spec(key)} does not match assertion {assertion}"
)
def test_scaffold_sling():
with create_defs_folder_sandbox() as sandbox:
defs_path = sandbox.scaffold_component(component_cls=SlingReplicationCollectionComponent)
assert (defs_path / "defs.yaml").exists()
assert (defs_path / "replication.yaml").exists()
def test_spec_is_available_in_scope() -> None:
with temp_sling_component_instance(
[
{
"path": "./replication.yaml",
"translation": {"metadata": {"asset_key": "{{ spec.key.path }}"}},
}
]
) as (_, defs):
assets_def: AssetsDefinition = defs.resolve_assets_def("input_duckdb")
assert assets_def.get_asset_spec(AssetKey("input_duckdb")).metadata["asset_key"] == [
"input_duckdb"
]
def test_subclass_override_get_asset_spec() -> None:
"""Test that subclasses of SlingReplicationCollectionComponent can override get_asset_spec method."""
class CustomSlingReplicationComponent(SlingReplicationCollectionComponent):
def get_asset_spec(self, stream_definition: Mapping[str, Any]) -> AssetSpec:
# Override to add custom metadata and tags
base_spec = super().get_asset_spec(stream_definition)
return base_spec.replace_attributes(
metadata={"custom_override": "test_value"}, tags={"custom_tag": "override_test"}
)
defs = build_component_defs_for_test(
CustomSlingReplicationComponent,
{
"connections": {},
"replications": [{"path": str(REPLICATION_PATH)}],
},
)
# Verify that the custom get_asset_spec method is being used
assets_def: AssetsDefinition = defs.resolve_assets_def("input_duckdb")
asset_spec = assets_def.get_asset_spec(AssetKey("input_duckdb"))
# Check that our custom metadata and tags are present
assert asset_spec.metadata["custom_override"] == "test_value"
assert asset_spec.tags["custom_tag"] == "override_test"
# Verify that the asset keys are still correct
assert defs.resolve_asset_graph().get_all_asset_keys() == {
AssetKey("input_csv"),
AssetKey("input_duckdb"),
}
def map_spec(spec: AssetSpec) -> AssetSpec:
return spec.replace_attributes(tags={"is_custom_spec": "yes"})
def map_spec_to_attributes(spec: AssetSpec):
return AssetAttributesModel(tags={"is_custom_spec": "yes"})
def map_spec_to_attributes_dict(spec: AssetSpec) -> dict[str, Any]:
return {"tags": {"is_custom_spec": "yes"}}
@pytest.mark.parametrize("map_fn", [map_spec, map_spec_to_attributes, map_spec_to_attributes_dict])
def test_udf_map_spec(map_fn: Callable[[AssetSpec], Any]) -> None:
class DebugSlingReplicationComponent(SlingReplicationCollectionComponent):
@classmethod
def get_additional_scope(cls) -> Mapping[str, Any]:
return {"map_spec": map_fn}
defs = build_component_defs_for_test(
DebugSlingReplicationComponent,
{
"connections": {},
"replications": [
{"path": str(REPLICATION_PATH), "translation": "{{ map_spec(spec) }}"}
],
},
)
assets_def: AssetsDefinition = defs.resolve_assets_def(AssetKey("input_duckdb"))
assert assets_def.get_asset_spec(AssetKey("input_duckdb")).tags["is_custom_spec"] == "yes"
|
TestSlingTranslation
|
python
|
getsentry__sentry
|
src/sentry/pipeline/types.py
|
{
"start": 472,
"end": 612
}
|
class ____:
"""Attributes to describe a pipeline in analytics records."""
event_type: str
pipeline_type: str
|
PipelineAnalyticsEntry
|
python
|
django__django
|
tests/postgres_tests/test_constraints.py
|
{
"start": 1169,
"end": 11723
}
|
class ____(PostgreSQLTestCase):
get_opclass_query = """
SELECT opcname, c.relname FROM pg_opclass AS oc
JOIN pg_index as i on oc.oid = ANY(i.indclass)
JOIN pg_class as c on c.oid = i.indexrelid
WHERE c.relname = %s
"""
def get_constraints(self, table):
"""Get the constraints on the table using a new cursor."""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
def test_check_constraint_range_value(self):
constraint_name = "ints_between"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = CheckConstraint(
condition=Q(ints__contained_by=NumericRange(10, 30)),
name=constraint_name,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(20, 50))
RangesModel.objects.create(ints=(10, 30))
def test_check_constraint_array_contains(self):
constraint = CheckConstraint(
condition=Q(field__contains=[1]),
name="array_contains",
)
msg = f"Constraint “{constraint.name}” is violated."
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(IntegerArrayModel, IntegerArrayModel())
constraint.validate(IntegerArrayModel, IntegerArrayModel(field=[1]))
def test_check_constraint_array_length(self):
constraint = CheckConstraint(
condition=Q(field__len=1),
name="array_length",
)
msg = f"Constraint “{constraint.name}” is violated."
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(IntegerArrayModel, IntegerArrayModel())
constraint.validate(IntegerArrayModel, IntegerArrayModel(field=[1]))
def test_check_constraint_daterange_contains(self):
constraint_name = "dates_contains"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = CheckConstraint(
condition=Q(dates__contains=F("dates_inner")),
name=constraint_name,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
date_1 = datetime.date(2016, 1, 1)
date_2 = datetime.date(2016, 1, 4)
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(
dates=(date_1, date_2),
dates_inner=(date_1, date_2.replace(day=5)),
)
RangesModel.objects.create(
dates=(date_1, date_2),
dates_inner=(date_1, date_2),
)
def test_check_constraint_datetimerange_contains(self):
constraint_name = "timestamps_contains"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = CheckConstraint(
condition=Q(timestamps__contains=F("timestamps_inner")),
name=constraint_name,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
datetime_1 = datetime.datetime(2016, 1, 1)
datetime_2 = datetime.datetime(2016, 1, 2, 12)
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(
timestamps=(datetime_1, datetime_2),
timestamps_inner=(datetime_1, datetime_2.replace(hour=13)),
)
RangesModel.objects.create(
timestamps=(datetime_1, datetime_2),
timestamps_inner=(datetime_1, datetime_2),
)
def test_check_constraint_range_contains(self):
constraint = CheckConstraint(
condition=Q(ints__contains=(1, 5)),
name="ints_contains",
)
msg = f"Constraint “{constraint.name}” is violated."
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(RangesModel, RangesModel(ints=(6, 10)))
def test_check_constraint_range_lower_upper(self):
constraint = CheckConstraint(
condition=Q(ints__startswith__gte=0) & Q(ints__endswith__lte=99),
name="ints_range_lower_upper",
)
msg = f"Constraint “{constraint.name}” is violated."
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(RangesModel, RangesModel(ints=(-1, 20)))
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(RangesModel, RangesModel(ints=(0, 100)))
constraint.validate(RangesModel, RangesModel(ints=(0, 99)))
def test_check_constraint_range_lower_with_nulls(self):
constraint = CheckConstraint(
condition=Q(ints__isnull=True) | Q(ints__startswith__gte=0),
name="ints_optional_positive_range",
)
constraint.validate(RangesModel, RangesModel())
constraint = CheckConstraint(
condition=Q(ints__startswith__gte=0),
name="ints_positive_range",
)
constraint.validate(RangesModel, RangesModel())
def test_opclass(self):
constraint = UniqueConstraint(
name="test_opclass",
fields=["scene"],
opclasses=["varchar_pattern_ops"],
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
self.assertIn(constraint.name, self.get_constraints(Scene._meta.db_table))
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
self.assertEqual(
cursor.fetchall(),
[("varchar_pattern_ops", constraint.name)],
)
# Drop the constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Scene, constraint)
self.assertNotIn(constraint.name, self.get_constraints(Scene._meta.db_table))
def test_opclass_multiple_columns(self):
constraint = UniqueConstraint(
name="test_opclass_multiple",
fields=["scene", "setting"],
opclasses=["varchar_pattern_ops", "text_pattern_ops"],
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
expected_opclasses = (
("varchar_pattern_ops", constraint.name),
("text_pattern_ops", constraint.name),
)
self.assertCountEqual(cursor.fetchall(), expected_opclasses)
def test_opclass_partial(self):
constraint = UniqueConstraint(
name="test_opclass_partial",
fields=["scene"],
opclasses=["varchar_pattern_ops"],
condition=Q(setting__contains="Sir Bedemir's Castle"),
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
self.assertCountEqual(
cursor.fetchall(),
[("varchar_pattern_ops", constraint.name)],
)
@skipUnlessDBFeature("supports_covering_indexes")
def test_opclass_include(self):
constraint = UniqueConstraint(
name="test_opclass_include",
fields=["scene"],
opclasses=["varchar_pattern_ops"],
include=["setting"],
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
self.assertCountEqual(
cursor.fetchall(),
[("varchar_pattern_ops", constraint.name)],
)
@skipUnlessDBFeature("supports_expression_indexes")
def test_opclass_func(self):
constraint = UniqueConstraint(
OpClass(Lower("scene"), name="text_pattern_ops"),
name="test_opclass_func",
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
constraints = self.get_constraints(Scene._meta.db_table)
self.assertIs(constraints[constraint.name]["unique"], True)
self.assertIn(constraint.name, constraints)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
self.assertEqual(
cursor.fetchall(),
[("text_pattern_ops", constraint.name)],
)
Scene.objects.create(scene="Scene 10", setting="The dark forest of Ewing")
with self.assertRaises(IntegrityError), transaction.atomic():
Scene.objects.create(scene="ScEnE 10", setting="Sir Bedemir's Castle")
Scene.objects.create(scene="Scene 5", setting="Sir Bedemir's Castle")
# Drop the constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Scene, constraint)
self.assertNotIn(constraint.name, self.get_constraints(Scene._meta.db_table))
Scene.objects.create(scene="ScEnE 10", setting="Sir Bedemir's Castle")
def test_opclass_func_validate_constraints(self):
constraint_name = "test_opclass_func_validate_constraints"
constraint = UniqueConstraint(
OpClass(Lower("scene"), name="text_pattern_ops"),
name="test_opclass_func_validate_constraints",
)
Scene.objects.create(scene="First scene")
# Non-unique scene.
msg = f"Constraint “{constraint_name}” is violated."
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(Scene, Scene(scene="first Scene"))
constraint.validate(Scene, Scene(scene="second Scene"))
|
SchemaTests
|
python
|
keras-team__keras
|
keras/src/layers/preprocessing/image_preprocessing/random_sharpness_test.py
|
{
"start": 177,
"end": 2143
}
|
class ____(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandomSharpness,
init_kwargs={
"factor": 0.75,
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
def test_random_sharpness_value_range(self):
image = keras.random.uniform(shape=(3, 3, 3), minval=0, maxval=1)
layer = layers.RandomSharpness(0.2)
adjusted_image = layer(image)
self.assertTrue(keras.ops.numpy.all(adjusted_image >= 0))
self.assertTrue(keras.ops.numpy.all(adjusted_image <= 1))
def test_random_sharpness_no_op(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
inputs = np.random.random((2, 8, 8, 3))
else:
inputs = np.random.random((2, 3, 8, 8))
layer = layers.RandomSharpness((0.5, 0.5))
output = layer(inputs, training=False)
self.assertAllClose(inputs, output, atol=1e-3, rtol=1e-5)
def test_random_sharpness_randomness(self):
image = keras.random.uniform(shape=(3, 3, 3), minval=0, maxval=1)[:5]
layer = layers.RandomSharpness(0.2)
adjusted_images = layer(image)
self.assertNotAllClose(adjusted_images, image)
def test_tf_data_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3))
else:
input_data = np.random.random((2, 3, 8, 8))
layer = layers.RandomSharpness(
factor=0.5, data_format=data_format, seed=1337
)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
|
RandomSharpnessTest
|
python
|
PyCQA__isort
|
isort/output.py
|
{
"start": 27663,
"end": 28660
}
|
class ____(str):
comments: list[str]
def __new__(
cls: type["_LineWithComments"], value: Any, comments: list[str]
) -> "_LineWithComments":
instance = super().__new__(cls, value)
instance.comments = comments
return instance
def _ensure_newline_before_comment(output: list[str]) -> list[str]:
new_output: list[str] = []
def is_comment(line: str | None) -> bool:
return line.startswith("#") if line else False
for line, prev_line in zip(output, [None, *output], strict=False):
if is_comment(line) and prev_line != "" and not is_comment(prev_line):
new_output.append("")
new_output.append(line)
return new_output
def _with_star_comments(parsed: parse.ParsedContent, module: str, comments: list[str]) -> list[str]:
star_comment = parsed.categorized_comments["nested"].get(module, {}).pop("*", None)
if star_comment:
return [*comments, star_comment]
return comments
|
_LineWithComments
|
python
|
getsentry__sentry
|
src/sentry/analytics/base.py
|
{
"start": 218,
"end": 883
}
|
class ____(Service, abc.ABC):
__all__ = ("record", "validate")
event_manager = default_manager
def record(self, event: Event) -> None:
"""
Record an event. Must be an instance of a subclass of `Event`.
>>> analytics.record(
... MyEvent(
... some_id=123,
... some_prop="abc"
... )
... )
"""
self.record_event_envelope(EventEnvelope(event=event))
def record_event_envelope(self, envelope: EventEnvelope) -> None:
pass
def setup(self) -> None:
# Load default event types
import sentry.analytics.events # NOQA
|
Analytics
|
python
|
sympy__sympy
|
sympy/logic/boolalg.py
|
{
"start": 23252,
"end": 26836
}
|
class ____(LatticeOp, BooleanFunction):
"""
Logical OR function
It evaluates its arguments in order, returning true immediately
when an argument is true, and false if they are all false.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import Or
>>> x | y
x | y
Notes
=====
The ``|`` operator is provided as a convenience, but note that its use
here is different from its normal use in Python, which is bitwise
or. Hence, ``Or(a, b)`` and ``a | b`` will return different things if
``a`` and ``b`` are integers.
>>> Or(x, y).subs(x, 0)
y
"""
zero = true
identity = false
if TYPE_CHECKING:
def __new__(cls, *args: Boolean | bool, evaluate: bool | None = None) -> Boolean: # type: ignore
...
@property
def args(self) -> tuple[Boolean, ...]:
...
@classmethod
def _from_args(cls, args, is_commutative=None):
return super(AssocOp, cls).__new__(cls, *args)
@classmethod
def _new_args_filter(cls, args):
newargs = []
rel = []
args = BooleanFunction.binary_check_and_simplify(*args)
for x in args:
if x.is_Relational:
c = x.canonical
if c in rel:
continue
nc = c.negated.canonical
if any(r == nc for r in rel):
return [true]
rel.append(c)
newargs.append(x)
return LatticeOp._new_args_filter(newargs, Or)
def _eval_subs(self, old, new):
args = []
bad = None
for i in self.args:
try:
i = i.subs(old, new)
except TypeError:
# store TypeError
if bad is None:
bad = i
continue
if i == True:
return true
elif i != False:
args.append(i)
if bad is not None:
# let it raise
bad.subs(old, new)
# If old is Or, replace the parts of the arguments with new if all
# are there
if isinstance(old, Or):
old_set = set(old.args)
if old_set.issubset(args):
args = set(args) - old_set
args.add(new)
return self.func(*args)
def _eval_as_set(self):
from sympy.sets.sets import Union
return Union(*[arg.as_set() for arg in self.args])
def _eval_rewrite_as_Nand(self, *args, **kwargs):
return Nand(*[Not(arg) for arg in self.args])
def _eval_simplify(self, **kwargs):
from sympy.core.relational import Le, Ge, Eq
lege = self.atoms(Le, Ge)
if lege:
reps = {i: self.func(
Eq(i.lhs, i.rhs), i.strict) for i in lege}
return self.xreplace(reps)._eval_simplify(**kwargs)
# standard simplify
rv = super()._eval_simplify(**kwargs)
if not isinstance(rv, Or):
return rv
patterns = _simplify_patterns_or()
return _apply_patternbased_simplification(rv, patterns,
kwargs['measure'], true)
def to_anf(self, deep=True):
args = range(1, len(self.args) + 1)
args = (combinations(self.args, j) for j in args)
args = chain.from_iterable(args) # powerset
args = (And(*arg) for arg in args)
args = (to_anf(x, deep=deep) if deep else x for x in args)
return Xor(*list(args), remove_true=False)
|
Or
|
python
|
keon__algorithms
|
tests/test_strings.py
|
{
"start": 11049,
"end": 12502
}
|
class ____(unittest.TestCase):
"""[summary]
Test for the file roman_to_int.py
Arguments:
unittest {[type]} -- [description]
"""
def test_roman_to_int(self):
self.assertEqual(621, roman_to_int("DCXXI"))
self.assertEqual(1, roman_to_int("I"))
self.assertEqual(3999, roman_to_int("MMMCMXCIX"))
# class TestStripUrlParams(unittest.TestCase):
# """[summary]
# Test for the file strip_urls_params.py
# Arguments:
# unittest {[type]} -- [description]
# """
# def test_strip_url_params1(self):
# self.assertEqual(strip_url_params1("www.saadbenn.com?a=1&b=2&a=2"),
# "www.saadbenn.com?a=1&b=2")
# self.assertEqual(strip_url_params1("www.saadbenn.com?a=1&b=2",
# ['b']), "www.saadbenn.com?a=1")
# def test_strip_url_params2(self):
# self.assertEqual(strip_url_params2("www.saadbenn.com?a=1&b=2&a=2"),
# "www.saadbenn.com?a=1&b=2")
# self.assertEqual(strip_url_params2("www.saadbenn.com?a=1&b=2",
# 'b']), "www.saadbenn.com?a=1")
# def test_strip_url_params3(self):
# self.assertEqual(strip_url_params3("www.saadbenn.com?a=1&b=2&a=2"),
# "www.saadbenn.com?a=1&b=2")
# self.assertEqual(strip_url_params3("www.saadbenn.com?a=1&b=2",
# ['b']), "www.saadbenn.com?a=1")
|
TestRomanToInt
|
python
|
neetcode-gh__leetcode
|
python/0136-single-number.py
|
{
"start": 0,
"end": 152
}
|
class ____:
def singleNumber(self, nums: List[int]) -> int:
res = 0
for n in nums:
res = n ^ res
return res
|
Solution
|
python
|
ray-project__ray
|
python/ray/llm/_internal/serve/engines/vllm/vllm_engine.py
|
{
"start": 4464,
"end": 20989
}
|
class ____(LLMEngine):
def __init__(
self,
llm_config: LLMConfig,
):
"""Create a vLLM Engine class
Args:
llm_config: The llm configuration for this engine
"""
super().__init__(llm_config)
self.llm_config = llm_config
if vllm is None:
raise ImportError(
"vLLM is not installed. Please install it with `pip install ray[llm]`."
)
from vllm import envs as vllm_envs
if hasattr(vllm_envs, "VLLM_USE_V1") and not vllm_envs.VLLM_USE_V1:
logger.error(
"vLLM v0 is fully deprecated. As a result in Ray Serve LLM only v1 is supported."
)
self.llm_config.setup_engine_backend()
self._running = False
# vLLM Integration points. Will be set through .start()
self._engine_client = None
self._oai_models: Optional["OpenAIServingModels"] = None
self._oai_serving_chat: Optional["OpenAIServingChat"] = None
self._oai_serving_completion: Optional["OpenAIServingCompletion"] = None
self._oai_serving_embedding: Optional["OpenAIServingEmbedding"] = None
self._oai_serving_transcription: Optional["OpenAIServingTranscription"] = None
self._oai_serving_scores: Optional["ServingScores"] = None
async def start(self) -> None:
"""Start the vLLM engine.
If the engine is already running, do nothing.
"""
if self._running:
# The engine is already running!
logger.info("Skipping engine restart because the engine is already running")
return
from vllm.entrypoints.openai.api_server import init_app_state
callback = self.llm_config.get_or_create_callback()
await callback.run_callback("on_before_node_init")
if callback.ctx.run_init_node:
await initialize_node(self.llm_config)
await callback.run_callback("on_after_node_init")
(
vllm_engine_args,
vllm_frontend_args,
vllm_engine_config,
) = self._prepare_engine_config(callback.ctx)
# Apply checkpoint info to the llm_config.
# This is needed for capturing model capabilities
# (e.g. supports vision, etc.) on the llm_config.
config = self.llm_config.get_engine_config()
self.llm_config.apply_checkpoint_info(
vllm_engine_config.model_config.model,
trust_remote_code=config.trust_remote_code,
)
self._engine_client = self._start_async_llm_engine(
vllm_engine_args,
vllm_engine_config,
callback.ctx.placement_group,
)
state = State()
# TODO (Kourosh): There might be some variables that needs protection?
args = argparse.Namespace(
**vllm_frontend_args.__dict__,
**vllm_engine_args.__dict__,
)
await init_app_state(
engine_client=self._engine_client,
# TODO (ahao): remove vllm_config for vllm v1.12
vllm_config=vllm_engine_config,
state=state,
args=args,
)
self._oai_models = state.openai_serving_models
self._oai_serving_chat = state.openai_serving_chat
self._oai_serving_completion = state.openai_serving_completion
self._oai_serving_embedding = state.openai_serving_embedding
self._oai_serving_transcription = state.openai_serving_transcription
self._oai_serving_scores = state.openai_serving_scores
self._validate_openai_serving_models()
self._validate_engine_client()
self._running = True
logger.info("Started vLLM engine.")
def _validate_openai_serving_models(self):
assert self._oai_models is not None, "oai_models is not initialized"
assert hasattr(
self._oai_models, "lora_requests"
), "oai_models must have a lora_requests attribute"
assert hasattr(
self._oai_models, "load_lora_adapter"
), "oai_models must have a load_lora_adapter attribute"
def _validate_openai_serving_chat(self):
assert hasattr(
self._oai_serving_chat, "create_chat_completion"
), "oai_serving_chat must have a create_chat_completion attribute"
def _validate_openai_serving_completion(self):
assert hasattr(
self._oai_serving_completion, "create_completion"
), "oai_serving_completion must have a create_completion attribute"
def _validate_openai_serving_embedding(self):
assert hasattr(
self._oai_serving_embedding, "create_embedding"
), "oai_serving_embedding must have a create_embedding attribute"
def _validate_openai_serving_transcription(self):
assert hasattr(
self._oai_serving_transcription, "create_transcription"
), "oai_serving_transcription must have a create_transcription attribute"
def _validate_openai_serving_scores(self):
assert hasattr(
self._oai_serving_scores, "create_score"
), "oai_serving_scores must have a create_score attribute"
def _validate_engine_client(self):
assert hasattr(
self._engine_client, "check_health"
), "engine_client must have a check_health attribute"
def _prepare_engine_config(
self, callback_ctx: CallbackCtx
) -> Tuple["AsyncEngineArgs", "FrontendArgs", "VllmConfig"]:
"""Prepare the engine config to start the engine.
Args:
callback_ctx: The callback context.
Returns:
A tuple of:
engine_args: The vLLM's internal engine arguments that is flattened.
frontend_args: The vLLM's internal frontend arguments that is flattened.
engine_config: The vLLM's internal engine config that is nested.
"""
engine_config: VLLMEngineConfig = self.llm_config.get_engine_config()
if engine_config.use_gpu:
# Create engine config on a task with access to GPU,
# as GPU capability may be queried.
ref = (
ray.remote(
num_cpus=0,
num_gpus=0.001,
accelerator_type=self.llm_config.accelerator_type,
)(_get_vllm_engine_config)
.options(
runtime_env=callback_ctx.runtime_env,
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=callback_ctx.placement_group,
),
)
.remote(self.llm_config)
)
vllm_engine_args, vllm_engine_config = ray.get(ref)
else:
vllm_engine_args, vllm_engine_config = _get_vllm_engine_config(
self.llm_config
)
vllm_frontend_args = FrontendArgs(**engine_config.frontend_kwargs)
return vllm_engine_args, vllm_frontend_args, vllm_engine_config
def _start_async_llm_engine(
self,
vllm_engine_args: "AsyncEngineArgs",
vllm_engine_config: "VllmConfig",
placement_group: PlacementGroup,
) -> "EngineClient":
"""Creates an async LLM engine from the engine arguments."""
from vllm.v1.engine.async_llm import AsyncLLM
from vllm.v1.executor.abstract import Executor
vllm_engine_config.parallel_config.placement_group = placement_group
_clear_current_platform_cache()
custom_stat_loggers = None
if self.llm_config.log_engine_metrics:
from vllm.v1.metrics.ray_wrappers import RayPrometheusStatLogger
# V1 AsyncLLM does not yet support add_logger: https://github.com/vllm-project/vllm/issues/17702
# Use `disable_log_stats: False` and `log_engine_metrics: False` as
# a workaround to enable PrometheusStatLogger instead.
custom_stat_loggers = [RayPrometheusStatLogger]
executor_class = Executor.get_class(vllm_engine_config)
logger.info(f"Using executor class: {executor_class}")
engine_client = AsyncLLM(
vllm_config=vllm_engine_config,
executor_class=executor_class,
log_stats=not vllm_engine_args.disable_log_stats,
stat_loggers=custom_stat_loggers,
)
return engine_client
async def resolve_lora(self, disk_lora_model: DiskMultiplexConfig):
from vllm.entrypoints.openai.protocol import LoadLoRAAdapterRequest
self._validate_openai_serving_models()
if disk_lora_model.model_id in self._oai_models.lora_requests:
# Lora is already loaded, return
return
lora_request = await self._oai_models.load_lora_adapter( # type: ignore[attr-defined]
request=LoadLoRAAdapterRequest(
lora_name=disk_lora_model.model_id,
lora_path=disk_lora_model.local_path,
)
)
if isinstance(lora_request, VLLMErrorResponse):
raise ValueError(f"Failed to load lora model: {lora_request.error.message}")
def _create_raw_request(
self,
request: Union[
CompletionRequest,
ChatCompletionRequest,
EmbeddingRequest,
TranscriptionRequest,
ScoreRequest,
],
path: str,
) -> Request:
scope = {
"type": "http",
"method": "POST",
"path": path,
"headers": [(b"x-request-id", getattr(request, "request_id", "").encode())],
"query_string": b"",
}
return Request(scope)
async def chat(
self, request: ChatCompletionRequest
) -> AsyncGenerator[Union[str, ChatCompletionResponse, ErrorResponse], None]:
self._validate_openai_serving_chat()
# TODO (Kourosh): Remove when we upstream request_id attribute to vLLM.
# PR: https://github.com/vllm-project/vllm/pull/21009
# Create a fake starlette.Request object with the x-request-id header
# so that the create_chat_completion API can assign the request_id properly.
raw_request = self._create_raw_request(request, "/chat/completions")
chat_response = await self._oai_serving_chat.create_chat_completion( # type: ignore[attr-defined]
request, raw_request=raw_request
)
if isinstance(chat_response, AsyncGenerator):
async for response in chat_response:
if not isinstance(response, str):
raise ValueError(
f"Expected create_chat_completion to return a stream of strings, got an item with type {type(response)}"
)
yield response
else:
if isinstance(chat_response, VLLMErrorResponse):
yield ErrorResponse(error=ErrorInfo(**chat_response.error.model_dump()))
else:
yield ChatCompletionResponse(**chat_response.model_dump())
async def completions(
self, request: CompletionRequest
) -> AsyncGenerator[Union[str, CompletionResponse, ErrorResponse], None]:
self._validate_openai_serving_completion()
# TODO (Kourosh): Remove when we upstream request_id attribute to vLLM.
# PR: https://github.com/vllm-project/vllm/pull/21009
# Create a fake starlette.Request object with the x-request-id header
# so that the create_completion API can assign the request_id properly.
raw_request = self._create_raw_request(request, "/completions")
completion_response = await self._oai_serving_completion.create_completion( # type: ignore[attr-defined]
request,
raw_request=raw_request,
)
if isinstance(completion_response, AsyncGenerator):
async for response in completion_response:
if not isinstance(response, str):
raise ValueError(
f"Expected create_completion to return a stream of strings, got an item with type {type(response)}"
)
yield response
else:
if isinstance(completion_response, VLLMErrorResponse):
yield ErrorResponse(
error=ErrorInfo(**completion_response.error.model_dump())
)
else:
yield CompletionResponse(**completion_response.model_dump())
async def embeddings(
self, request: EmbeddingRequest
) -> AsyncGenerator[Union[EmbeddingResponse, ErrorResponse], None]:
self._validate_openai_serving_embedding()
# TODO (Kourosh): Remove when upstream is fixed to accept req_id.
# Create a fake starlette.Request object with the x-request-id header
# so that the create_embedding API can assign the request_id properly.
raw_request = self._create_raw_request(request, "/embeddings")
embedding_response = await self._oai_serving_embedding.create_embedding( # type: ignore[attr-defined]
request, raw_request=raw_request
)
if isinstance(embedding_response, VLLMErrorResponse):
yield ErrorResponse(
error=ErrorInfo(**embedding_response.error.model_dump())
)
else:
yield EmbeddingResponse(**embedding_response.model_dump())
async def transcriptions(
self, request: TranscriptionRequest
) -> AsyncGenerator[Union[str, TranscriptionResponse, ErrorResponse], None]:
self._validate_openai_serving_transcription()
# TODO (Kourosh): Remove when we upstream request_id attribute to vLLM.
# PR: https://github.com/vllm-project/vllm/pull/21009
# Create a fake starlette.Request object with the x-request-id header
# so that the create_transcription API can assign the request_id properly.
raw_request = self._create_raw_request(request, "/audio/transcriptions")
# Extract audio data from the request file
audio_data = await request.file.read()
transcription_response = await self._oai_serving_transcription.create_transcription( # type: ignore[attr-defined]
audio_data,
request,
raw_request=raw_request,
)
if isinstance(transcription_response, AsyncGenerator):
async for response in transcription_response:
if not isinstance(response, str):
raise ValueError(
f"Expected create_transcription to return a stream of strings, got an item with type {type(response)}"
)
yield response
else:
if isinstance(transcription_response, VLLMErrorResponse):
yield ErrorResponse(
error=ErrorInfo(**transcription_response.error.model_dump())
)
else:
yield TranscriptionResponse(**transcription_response.model_dump())
async def score(
self, request: ScoreRequest
) -> AsyncGenerator[Union[ScoreResponse, ErrorResponse], None]:
self._validate_openai_serving_scores()
raw_request = self._create_raw_request(request, "/score")
score_response = await self._oai_serving_scores.create_score(
request, raw_request=raw_request
)
if isinstance(score_response, VLLMErrorResponse):
yield ErrorResponse(**score_response.model_dump())
else:
yield ScoreResponse(**score_response.model_dump())
async def check_health(self) -> None:
assert self._engine_client is not None, "engine_client is not initialized"
try:
await self._engine_client.check_health()
except BaseException as e:
logger.error("Healthcheck failed. The replica will be restarted")
raise e from None
async def reset_prefix_cache(self) -> None:
assert self._engine_client is not None, "engine_client is not initialized"
await self._engine_client.reset_prefix_cache()
async def start_profile(self) -> None:
assert self._engine_client is not None, "engine_client is not initialized"
await self._engine_client.start_profile()
async def stop_profile(self) -> None:
assert self._engine_client is not None, "engine_client is not initialized"
await self._engine_client.stop_profile()
|
VLLMEngine
|
python
|
chardet__chardet
|
chardet/charsetgroupprober.py
|
{
"start": 1248,
"end": 3849
}
|
class ____(CharSetProber):
def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None:
super().__init__(lang_filter=lang_filter)
self._active_num = 0
self.probers: List[CharSetProber] = []
self._best_guess_prober: Optional[CharSetProber] = None
def reset(self) -> None:
super().reset()
self._active_num = 0
for prober in self.probers:
prober.reset()
prober.active = True
self._active_num += 1
self._best_guess_prober = None
@property
def charset_name(self) -> Optional[str]:
if not self._best_guess_prober:
self.get_confidence()
if not self._best_guess_prober:
return None
return self._best_guess_prober.charset_name
@property
def language(self) -> Optional[str]:
if not self._best_guess_prober:
self.get_confidence()
if not self._best_guess_prober:
return None
return self._best_guess_prober.language
def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
for prober in self.probers:
if not prober.active:
continue
state = prober.feed(byte_str)
if not state:
continue
if state == ProbingState.FOUND_IT:
self._best_guess_prober = prober
self._state = ProbingState.FOUND_IT
return self.state
if state == ProbingState.NOT_ME:
prober.active = False
self._active_num -= 1
if self._active_num <= 0:
self._state = ProbingState.NOT_ME
return self.state
return self.state
def get_confidence(self) -> float:
state = self.state
if state == ProbingState.FOUND_IT:
return 0.99
if state == ProbingState.NOT_ME:
return 0.01
best_conf = 0.0
self._best_guess_prober = None
for prober in self.probers:
if not prober.active:
self.logger.debug("%s not active", prober.charset_name)
continue
conf = prober.get_confidence()
self.logger.debug(
"%s %s confidence = %s", prober.charset_name, prober.language, conf
)
if best_conf < conf:
best_conf = conf
self._best_guess_prober = prober
if not self._best_guess_prober:
return 0.0
return best_conf
|
CharSetGroupProber
|
python
|
scikit-learn__scikit-learn
|
sklearn/preprocessing/_data.py
|
{
"start": 69537,
"end": 76334
}
|
class ____(OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1, l2 or inf) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
For an example visualization, refer to :ref:`Compare Normalizer with other
scalers <plot_all_scaling_normalizer_section>`.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : {'l1', 'l2', 'max'}, default='l2'
The norm to use to normalize each non zero sample. If norm='max'
is used, values will be rescaled by the maximum of the absolute
values.
copy : bool, default=True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Attributes
----------
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
normalize : Equivalent function without the estimator API.
Notes
-----
This estimator is :term:`stateless` and does not need to be fitted.
However, we recommend to call :meth:`fit_transform` instead of
:meth:`transform`, as parameter validation is only performed in
:meth:`fit`.
Examples
--------
>>> from sklearn.preprocessing import Normalizer
>>> X = [[4, 1, 2, 2],
... [1, 3, 9, 3],
... [5, 7, 5, 1]]
>>> transformer = Normalizer().fit(X) # fit does nothing.
>>> transformer
Normalizer()
>>> transformer.transform(X)
array([[0.8, 0.2, 0.4, 0.4],
[0.1, 0.3, 0.9, 0.3],
[0.5, 0.7, 0.5, 0.1]])
"""
_parameter_constraints: dict = {
"norm": [StrOptions({"l1", "l2", "max"})],
"copy": ["boolean"],
}
def __init__(self, norm="l2", *, copy=True):
self.norm = norm
self.copy = copy
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Only validates estimator's parameters.
This method allows to: (i) validate the estimator's parameters and
(ii) be consistent with the scikit-learn transformer API.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to estimate the normalization parameters.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
Fitted transformer.
"""
validate_data(self, X, accept_sparse="csr")
return self
def transform(self, X, copy=None):
"""Scale each non zero row of X to unit norm.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
copy : bool, default=None
Copy the input X or not.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
copy = copy if copy is not None else self.copy
X = validate_data(
self, X, accept_sparse="csr", force_writeable=True, copy=copy, reset=False
)
return normalize(X, norm=self.norm, axis=1, copy=False)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
tags.requires_fit = False
tags.array_api_support = True
return tags
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"threshold": [Interval(Real, None, None, closed="neither")],
"copy": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def binarize(X, *, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix.
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, default=0.0
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : bool, default=True
If False, try to avoid a copy and binarize in place.
This is not guaranteed to always work in place; e.g. if the data is
a numpy array with an object dtype, a copy will be returned even with
copy=False.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
The transformed data.
See Also
--------
Binarizer : Performs binarization using the Transformer API
(e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`).
Examples
--------
>>> from sklearn.preprocessing import binarize
>>> X = [[0.4, 0.6, 0.5], [0.6, 0.1, 0.2]]
>>> binarize(X, threshold=0.5)
array([[0., 1., 0.],
[1., 0., 0.]])
"""
X = check_array(X, accept_sparse=["csr", "csc"], force_writeable=True, copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError("Cannot binarize a sparse matrix with threshold < 0")
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
xp, _, device = get_namespace_and_device(X)
float_dtype = _find_matching_floating_dtype(X, threshold, xp=xp)
cond = xp.astype(X, float_dtype, copy=False) > threshold
not_cond = xp.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
|
Normalizer
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/experimental/benchmarks/parameter_value_benchmark.py
|
{
"start": 1389,
"end": 6354
}
|
class ____(benchmark_base.DatasetBenchmarkBase):
"""Benchmarks to compare effect of different parameter values on the performance."""
def _benchmark_map(self, num_parallel_calls, buffer_size):
k = 1024 * 1024
dataset = dataset_ops.Dataset.from_tensors(
(np.random.rand(1, 4 * k), np.random.rand(4 * k, 1))).repeat()
dataset = dataset.map(
math_ops.matmul, num_parallel_calls=num_parallel_calls)
dataset = dataset.map(map_function)
dataset = dataset.prefetch(buffer_size=buffer_size)
dataset = dataset.apply(testing.sleep(int(input_sleep_ms * 1000)))
name_str = ("map_max_output_sleep_ms_%.2f_input_sleep_ms_%.2f_"
"num_parallel_calls_%d_buffer_size_%d")
return self.run_and_report_benchmark(
dataset=dataset,
num_elements=10000,
name=name_str %
(max_output_sleep_ms, input_sleep_ms, num_parallel_calls, buffer_size))
def benchmark_map(self):
nums_parallel_calls = [4, 8, 12]
buffer_sizes = [10, 50, 100, 150, 200, 250, 300]
parameters_list = []
wall_time_map = {}
for num_parallel_calls in nums_parallel_calls:
for buffer_size in buffer_sizes:
parameters = (num_parallel_calls, buffer_size)
parameters_list.append(parameters)
wall_time = self._benchmark_map(num_parallel_calls, buffer_size)
wall_time_map[parameters] = wall_time
parameters_list.sort(key=lambda x: wall_time_map[x])
for parameters in parameters_list:
print("num_parallel_calls_%d_buffer_size_%d_wall_time:" % parameters,
wall_time_map[parameters])
def _benchmark_map_and_batch(self, num_parallel_calls, buffer_size):
batch_size = 16
k = 1024 * 1024
dataset = dataset_ops.Dataset.from_tensors(
(np.random.rand(1, 4 * k), np.random.rand(4 * k, 1))).repeat()
dataset = dataset.map(
math_ops.matmul, num_parallel_calls=num_parallel_calls)
dataset = dataset.batch(batch_size=batch_size)
dataset = dataset.map(map_function)
dataset = dataset.prefetch(buffer_size=buffer_size)
dataset = dataset.apply(testing.sleep(int(input_sleep_ms * 1000)))
name_str = ("map_and_batch_max_output_sleep_ms_%.2f_input_sleep_ms_%.2f"
"_num_parallel_calls_%d_buffer_size_%d")
return self.run_and_report_benchmark(
dataset=dataset,
num_elements=1000,
name=name_str %
(max_output_sleep_ms, input_sleep_ms, num_parallel_calls, buffer_size))
def benchmark_map_and_batch(self):
nums_parallel_calls = [4, 8, 12]
buffer_sizes = [10, 50, 100, 150, 200, 250, 300]
parameters_list = []
wall_time_map = {}
for num_parallel_calls in nums_parallel_calls:
for buffer_size in buffer_sizes:
parameters = (num_parallel_calls, buffer_size)
parameters_list.append(parameters)
wall_time = self._benchmark_map_and_batch(num_parallel_calls,
buffer_size)
wall_time_map[parameters] = wall_time
parameters_list.sort(key=lambda x: wall_time_map[x])
for parameters in parameters_list:
print("num_parallel_calls_%d_buffer_size_%d_wall_time:" % parameters,
wall_time_map[parameters])
def _benchmark_interleave(self, num_parallel_calls, buffer_size):
k = 1024 * 1024
dataset = dataset_ops.Dataset.from_tensors(
(np.random.rand(1, 4 * k), np.random.rand(4 * k, 1))).repeat()
dataset = dataset.map(math_ops.matmul)
dataset = dataset.map(map_function)
dataset = dataset_ops.Dataset.range(1).repeat().interleave(
lambda _: dataset, # pylint: disable=cell-var-from-loop
cycle_length=10,
num_parallel_calls=num_parallel_calls)
dataset = dataset.prefetch(buffer_size=buffer_size)
dataset = dataset.apply(testing.sleep(int(input_sleep_ms * 1000)))
name_str = ("interleave_max_output_sleep_ms_%.2f_input_sleep_ms_%.2f"
"_num_parallel_calls_%d_buffer_size_%d")
return self.run_and_report_benchmark(
dataset=dataset,
num_elements=10000,
name=name_str %
(max_output_sleep_ms, input_sleep_ms, num_parallel_calls, buffer_size))
def benchmark_interleave(self):
nums_parallel_calls = [4, 8, 10]
buffer_sizes = [10, 50, 100, 150, 200, 250, 300]
parameters_list = []
wall_time_map = {}
for num_parallel_calls in nums_parallel_calls:
for buffer_size in buffer_sizes:
parameters = (num_parallel_calls, buffer_size)
parameters_list.append(parameters)
wall_time = self._benchmark_interleave(num_parallel_calls, buffer_size)
wall_time_map[parameters] = wall_time
parameters_list.sort(key=lambda x: wall_time_map[x])
for parameters in parameters_list:
print("num_parallel_calls_%d_buffer_size_%d_wall_time:" % parameters,
wall_time_map[parameters])
if __name__ == "__main__":
benchmark_base.test.main()
|
ParameterValueBenchmark
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/jvm.py
|
{
"start": 24936,
"end": 31868
}
|
class ____(RegexLexer):
"""
For `Ioke <http://ioke.org/>`_ (a strongly typed, dynamic,
prototype based programming language) source.
.. versionadded:: 1.4
"""
name = 'Ioke'
filenames = ['*.ik']
aliases = ['ioke', 'ik']
mimetypes = ['text/x-iokesrc']
tokens = {
'interpolatableText': [
(r'(\\b|\\e|\\t|\\n|\\f|\\r|\\"|\\\\|\\#|\\\Z|\\u[0-9a-fA-F]{1,4}'
r'|\\[0-3]?[0-7]?[0-7])', String.Escape),
(r'#\{', Punctuation, 'textInterpolationRoot')
],
'text': [
(r'(?<!\\)"', String, '#pop'),
include('interpolatableText'),
(r'[^"]', String)
],
'documentation': [
(r'(?<!\\)"', String.Doc, '#pop'),
include('interpolatableText'),
(r'[^"]', String.Doc)
],
'textInterpolationRoot': [
(r'\}', Punctuation, '#pop'),
include('root')
],
'slashRegexp': [
(r'(?<!\\)/[im-psux]*', String.Regex, '#pop'),
include('interpolatableText'),
(r'\\/', String.Regex),
(r'[^/]', String.Regex)
],
'squareRegexp': [
(r'(?<!\\)][im-psux]*', String.Regex, '#pop'),
include('interpolatableText'),
(r'\\]', String.Regex),
(r'[^\]]', String.Regex)
],
'squareText': [
(r'(?<!\\)]', String, '#pop'),
include('interpolatableText'),
(r'[^\]]', String)
],
'root': [
(r'\n', Text),
(r'\s+', Text),
# Comments
(r';(.*?)\n', Comment),
(r'\A#!(.*?)\n', Comment),
# Regexps
(r'#/', String.Regex, 'slashRegexp'),
(r'#r\[', String.Regex, 'squareRegexp'),
# Symbols
(r':[\w!:?]+', String.Symbol),
(r'[\w!:?]+:(?![\w!?])', String.Other),
(r':"(\\\\|\\"|[^"])*"', String.Symbol),
# Documentation
(r'((?<=fn\()|(?<=fnx\()|(?<=method\()|(?<=macro\()|(?<=lecro\()'
r'|(?<=syntax\()|(?<=dmacro\()|(?<=dlecro\()|(?<=dlecrox\()'
r'|(?<=dsyntax\())\s*"', String.Doc, 'documentation'),
# Text
(r'"', String, 'text'),
(r'#\[', String, 'squareText'),
# Mimic
(r'\w[\w!:?]+(?=\s*=.*mimic\s)', Name.Entity),
# Assignment
(r'[a-zA-Z_][\w!:?]*(?=[\s]*[+*/-]?=[^=].*($|\.))',
Name.Variable),
# keywords
(r'(break|cond|continue|do|ensure|for|for:dict|for:set|if|let|'
r'loop|p:for|p:for:dict|p:for:set|return|unless|until|while|'
r'with)(?![\w!:?])', Keyword.Reserved),
# Origin
(r'(eval|mimic|print|println)(?![\w!:?])', Keyword),
# Base
(r'(cell\?|cellNames|cellOwner\?|cellOwner|cells|cell|'
r'documentation|hash|identity|mimic|removeCell\!|undefineCell\!)'
r'(?![\w!:?])', Keyword),
# Ground
(r'(stackTraceAsText)(?![\w!:?])', Keyword),
# DefaultBehaviour Literals
(r'(dict|list|message|set)(?![\w!:?])', Keyword.Reserved),
# DefaultBehaviour Case
(r'(case|case:and|case:else|case:nand|case:nor|case:not|case:or|'
r'case:otherwise|case:xor)(?![\w!:?])', Keyword.Reserved),
# DefaultBehaviour Reflection
(r'(asText|become\!|derive|freeze\!|frozen\?|in\?|is\?|kind\?|'
r'mimic\!|mimics|mimics\?|prependMimic\!|removeAllMimics\!|'
r'removeMimic\!|same\?|send|thaw\!|uniqueHexId)'
r'(?![\w!:?])', Keyword),
# DefaultBehaviour Aspects
(r'(after|around|before)(?![\w!:?])', Keyword.Reserved),
# DefaultBehaviour
(r'(kind|cellDescriptionDict|cellSummary|genSym|inspect|notice)'
r'(?![\w!:?])', Keyword),
(r'(use|destructuring)', Keyword.Reserved),
# DefaultBehavior BaseBehavior
(r'(cell\?|cellOwner\?|cellOwner|cellNames|cells|cell|'
r'documentation|identity|removeCell!|undefineCell)'
r'(?![\w!:?])', Keyword),
# DefaultBehavior Internal
(r'(internal:compositeRegexp|internal:concatenateText|'
r'internal:createDecimal|internal:createNumber|'
r'internal:createRegexp|internal:createText)'
r'(?![\w!:?])', Keyword.Reserved),
# DefaultBehaviour Conditions
(r'(availableRestarts|bind|error\!|findRestart|handle|'
r'invokeRestart|rescue|restart|signal\!|warn\!)'
r'(?![\w!:?])', Keyword.Reserved),
# constants
(r'(nil|false|true)(?![\w!:?])', Name.Constant),
# names
(r'(Arity|Base|Call|Condition|DateTime|Aspects|Pointcut|'
r'Assignment|BaseBehavior|Boolean|Case|AndCombiner|Else|'
r'NAndCombiner|NOrCombiner|NotCombiner|OrCombiner|XOrCombiner|'
r'Conditions|Definitions|FlowControl|Internal|Literals|'
r'Reflection|DefaultMacro|DefaultMethod|DefaultSyntax|Dict|'
r'FileSystem|Ground|Handler|Hook|IO|IokeGround|Struct|'
r'LexicalBlock|LexicalMacro|List|Message|Method|Mixins|'
r'NativeMethod|Number|Origin|Pair|Range|Reflector|Regexp Match|'
r'Regexp|Rescue|Restart|Runtime|Sequence|Set|Symbol|'
r'System|Text|Tuple)(?![\w!:?])', Name.Builtin),
# functions
(u'(generateMatchMethod|aliasMethod|\u03bb|\u028E|fnx|fn|method|'
u'dmacro|dlecro|syntax|macro|dlecrox|lecrox|lecro|syntax)'
u'(?![\w!:?])', Name.Function),
# Numbers
(r'-?0[xX][0-9a-fA-F]+', Number.Hex),
(r'-?(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'-?\d+', Number.Integer),
(r'#\(', Punctuation),
# Operators
(r'(&&>>|\|\|>>|\*\*>>|:::|::|\.\.\.|===|\*\*>|\*\*=|&&>|&&=|'
r'\|\|>|\|\|=|\->>|\+>>|!>>|<>>>|<>>|&>>|%>>|#>>|@>>|/>>|\*>>|'
r'\?>>|\|>>|\^>>|~>>|\$>>|=>>|<<=|>>=|<=>|<\->|=~|!~|=>|\+\+|'
r'\-\-|<=|>=|==|!=|&&|\.\.|\+=|\-=|\*=|\/=|%=|&=|\^=|\|=|<\-|'
r'\+>|!>|<>|&>|%>|#>|\@>|\/>|\*>|\?>|\|>|\^>|~>|\$>|<\->|\->|'
r'<<|>>|\*\*|\?\||\?&|\|\||>|<|\*|\/|%|\+|\-|&|\^|\||=|\$|!|~|'
u'\\?|#|\u2260|\u2218|\u2208|\u2209)', Operator),
(r'(and|nand|or|xor|nor|return|import)(?![\w!?])',
Operator),
# Punctuation
(r'(\`\`|\`|\'\'|\'|\.|\,|@@|@|\[|\]|\(|\)|\{|\})', Punctuation),
# kinds
(r'[A-Z][\w!:?]*', Name.Class),
# default cellnames
(r'[a-z_][\w!:?]*', Name)
]
}
|
IokeLexer
|
python
|
pytorch__pytorch
|
test/test_expanded_weights.py
|
{
"start": 8829,
"end": 24911
}
|
class ____(TestCase):
def _compare_ew_and_for_loop_per_sample_grads(self, op, sample_input, reduction):
input = sample_input.input
args = sample_input.args
kwargs = sample_input.kwargs
batch_size = input.shape[0] if len(input.shape) > 1 else 1
# get per sample grads with ExpandedWeights objects
loss_reduction = "sum" if reduction == torch.sum else "mean"
(ew_input, ew_args, ew_kwargs) = make_expanded_weight(
sample_input, batch_size, loss_reduction
)
diff_input_list = (ew_input,) + tuple(ew_args) + tuple(ew_kwargs.values())
diff_input_list = [i for i in diff_input_list if is_diff_tensor(i)]
diff_input_list = [
i.orig_weight if isinstance(i, ExpandedWeight) else i
for i in diff_input_list
]
if not diff_input_list:
return
result = run_op(op, ew_input, *ew_args, **ew_kwargs)
reduction(
result
).backward() # grad doesn't work with ExpandedWeight because it calls __torch_function__
expanded_weight_grad = tuple(
i.grad_sample if hasattr(i, "grad_sample") else i.grad
for i in diff_input_list
)
# get per sample grads with for loop
func = partial(run_op, op)
per_sample_grad = for_loop_per_sample_grad(
batch_size, reduction, input, func, *args, **kwargs
)
# check equality
self.assertEqual(len(per_sample_grad), len(expanded_weight_grad))
if loss_reduction == "mean":
# don't check equality of `input.grad`s since these vanilla tensors won't be scaled
expanded_weight_grad = expanded_weight_grad[1:]
per_sample_grad = per_sample_grad[1:]
for result_grad, expected_grad in zip(expanded_weight_grad, per_sample_grad):
self.assertEqual(result_grad, expected_grad)
@ops(
filter(lambda op: op.supports_expanded_weight, op_db),
dtypes=OpDTypes.supported,
allowed_dtypes=(torch.double,),
)
def test_expanded_weight_per_sample_grad_sum(self, device, dtype, op):
sample_inputs = op.sample_inputs(device, dtype, requires_grad=True)
for sample_input in supported_inputs(op, sample_inputs):
if (
op.name == "nn.functional.embedding"
): # embedding flips its argument order for autograd tests
sample_input = SampleInput(
sample_input.args[0],
args=(sample_input.input,),
kwargs=sample_input.kwargs,
)
self._compare_ew_and_for_loop_per_sample_grads(op, sample_input, torch.sum)
@ops(
filter(lambda op: op.supports_expanded_weight, op_db),
dtypes=OpDTypes.supported,
allowed_dtypes=(torch.double,),
)
def test_expanded_weight_per_sample_grad_mean(self, device, dtype, op):
sample_inputs = op.sample_inputs(device, dtype, requires_grad=True)
for sample_input in supported_inputs(op, sample_inputs):
if (
op.name == "nn.functional.embedding"
): # embedding flips its argument order for autograd tests
sample_input = SampleInput(
sample_input.args[0],
args=(sample_input.input,),
kwargs=sample_input.kwargs,
)
self._compare_ew_and_for_loop_per_sample_grads(op, sample_input, torch.mean)
@ops(
filter(lambda op: op.supports_expanded_weight, op_db),
dtypes=OpDTypes.supported,
allowed_dtypes=(torch.double,),
)
def test_expanded_weights_per_sample_grad_input_no_grad(self, device, dtype, op):
sample_inputs = op.sample_inputs(device, dtype, requires_grad=True)
for sample_input in supported_inputs(op, sample_inputs):
if (
op.name == "nn.functional.embedding"
): # embedding flips its argument order for autograd tests
sample_input = SampleInput(
sample_input.args[0],
args=(sample_input.input,),
kwargs=sample_input.kwargs,
)
sample_input.input.requires_grad_(False)
self._compare_ew_and_for_loop_per_sample_grads(op, sample_input, torch.mean)
@skipIfTorchDynamo("Checking error message doesn't work with dynamo")
@ops(
filter(lambda op: op.supports_expanded_weight, op_db),
dtypes=OpDTypes.supported,
allowed_dtypes=(torch.double,),
)
def test_unsupported_expand_weights(self, device, dtype, op):
sample_inputs = op.sample_inputs(device, dtype, requires_grad=True)
unsupported_inputs = supported_inputs(op, sample_inputs, supported_inputs=False)
for sample_input in unsupported_inputs:
with self.assertRaisesRegex(RuntimeError, r"Expanded Weights"):
if (
op.name == "nn.functional.embedding"
): # embedding flips its argument order for autograd tests
sample_input = SampleInput(
sample_input.args[0],
args=(sample_input.input,),
kwargs=sample_input.kwargs,
)
input = sample_input.input
batch_size = input.shape[0] if len(input.shape) > 1 else 1
# get per sample grads with ExpandedWeights objects
(ew_input, ew_args, ew_kwargs) = make_expanded_weight(
sample_input, batch_size
)
result = run_op(op, ew_input, *ew_args, **ew_kwargs)
diff_input_list = (
(ew_input,) + tuple(ew_args) + tuple(ew_kwargs.values())
)
diff_input_list = [i for i in diff_input_list if is_diff_tensor(i)]
diff_input_list = [
i.orig_weight if isinstance(i, ExpandedWeight) else i
for i in diff_input_list
]
result.sum().backward() # grad doesn't work with ExpandedWeight because it calls __torch_function__
@ops(
filter(lambda op: op.supports_expanded_weight, op_db), dtypes=OpDTypes.supported
)
def test_expanded_weight_forward(self, device, dtype, op):
sample_inputs = op.sample_inputs(device, dtype)
for sample_input in supported_inputs(op, sample_inputs):
if (
op.name == "nn.functional.embedding"
): # embedding flips its argument order for autograd tests
sample_input = SampleInput(
sample_input.args[0].clone(),
args=(sample_input.input.clone(),),
kwargs=sample_input.kwargs,
)
if (
"cuda" in device
and "max_norm" in sample_input.kwargs
and "padding_idx" in sample_input.kwargs
):
self.skipTest(
"embedding is non-determinstic in this case, see issue #74679"
)
batch_size = (
sample_input.input.shape[0] if len(sample_input.input.shape) > 1 else 1
)
for loss_reduction in ["sum", "mean"]:
(ew_input, ew_args, ew_kwargs) = make_expanded_weight(
sample_input, batch_size, loss_reduction
)
expanded_weight_result = run_op(op, ew_input, *ew_args, **ew_kwargs)
normal_result = run_op(
op, sample_input.input, *sample_input.args, **sample_input.kwargs
)
self.assertEqual(expanded_weight_result, normal_result)
def test_expanded_weight_error(self, device):
batch_size = 3
sample_input = make_tensor(
(batch_size, 4), dtype=torch.float32, device=device, requires_grad=True
)
sample_weight = make_tensor(
(4), dtype=torch.float32, device=device, requires_grad=True
)
with self.assertRaisesRegex(
RuntimeError, r"Expanded Weights encountered but cannot handle function"
):
torch.add(
sample_input,
ExpandedWeight(sample_weight, batch_size, loss_reduction="sum"),
)
def _test_embedding_model(self, model, num_embedding, device):
batch_size = 32
input = torch.randint(0, num_embedding, (batch_size, 5, 5), device=device)
return self._test_model(
partial(model, num_embedding=num_embedding), batch_size, input, device
)
def _test_conv_model(
self,
model,
input_size,
num_dim,
device,
loss_reduction="sum",
atol=1e-4,
rtol=5e-5,
):
batch_size = 32
input_ending = [input_size] * num_dim
input = torch.randn([batch_size, 3] + input_ending, device=device)
return self._test_model(
partial(model, num_dim=num_dim),
batch_size,
input,
device,
loss_reduction,
atol,
rtol,
)
def _test_model(
self,
model,
batch_size,
input,
device,
loss_reduction="sum",
atol=1e-4,
rtol=5e-5,
):
model = model(10).to(device)
targets = torch.randint(0, 10, (batch_size,), device=device)
criterion = CrossEntropyLoss(reduction=loss_reduction)
result = call_for_per_sample_grads(model, loss_reduction=loss_reduction)(input)
loss = criterion(result, targets)
loss.backward()
result = []
for weight in model.parameters():
result.append(weight.grad_sample)
del weight.grad_sample
expected = []
for i in range(batch_size):
loss = criterion(model(input[i].unsqueeze(0)), targets[i].unsqueeze(0))
expected.append(
torch.autograd.grad(loss, model.parameters(), torch.ones_like(loss))
)
expected = [torch.stack(grad) for grad in zip(*expected)]
for res, exp in zip(result, expected):
self.assertEqual(res, exp, atol=atol, rtol=rtol)
def _compute_tolerances(self, device):
is_cuda_sm86 = device.startswith("cuda") and torch.cuda.get_device_capability(
0
) == (8, 6)
return (9e-3, 5e-5) if is_cuda_sm86 else (1e-4, 5e-5)
@tf32_off()
def test_cnn_model_sum(self, device):
def convnet(num_classes, num_dim):
return nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(128, num_classes, bias=True),
)
atol, rtol = self._compute_tolerances(device)
return self._test_conv_model(convnet, 28, 2, device, atol=atol, rtol=rtol)
@tf32_off()
def test_cnn_model_mean(self, device):
def convnet(num_classes, num_dim):
return nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(128, num_classes, bias=True),
)
atol, rtol = self._compute_tolerances(device)
return self._test_conv_model(
convnet, 28, 2, device, loss_reduction="mean", atol=atol, rtol=rtol
)
@parametrize("num_dim", [1, 2, 3])
@tf32_off()
def test_instance_norm_model(self, num_dim, device):
def instance_norm_model(num_classes, num_dim):
conv_layer = (
nn.Conv1d if num_dim == 1 else nn.Conv2d if num_dim == 2 else nn.Conv3d
)
norm_layer = (
nn.InstanceNorm1d
if num_dim == 1
else nn.InstanceNorm2d
if num_dim == 2
else nn.InstanceNorm3d
)
return nn.Sequential(
conv_layer(3, 32, kernel_size=3, stride=1, padding=1),
norm_layer(32, affine=True),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(32 * (7**num_dim), num_classes, bias=True),
)
atol, rtol = self._compute_tolerances(device)
return self._test_conv_model(
instance_norm_model, 7, num_dim, device, atol=atol, rtol=rtol
)
@parametrize("num_dim", [1, 2, 3])
@tf32_off()
def test_group_norm_model(self, num_dim, device):
def group_norm_model(num_classes, num_dim):
conv_layer = (
nn.Conv1d if num_dim == 1 else nn.Conv2d if num_dim == 2 else nn.Conv3d
)
return nn.Sequential(
conv_layer(3, 32, kernel_size=3, stride=1, padding=1),
nn.GroupNorm(8, 32, affine=True),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(32 * (7**num_dim), num_classes, bias=True),
)
atol, rtol = self._compute_tolerances(device)
return self._test_conv_model(
group_norm_model, 7, num_dim, device, atol=atol, rtol=rtol
)
@parametrize("num_dim", [1, 2, 3])
@tf32_off()
def test_layer_norm_model(self, num_dim, device):
def layer_norm_model(num_classes, num_dim):
conv_layer = (
nn.Conv1d if num_dim == 1 else nn.Conv2d if num_dim == 2 else nn.Conv3d
)
normalized_shape = [7] * num_dim
return nn.Sequential(
conv_layer(3, 32, kernel_size=3, stride=1, padding=1),
nn.LayerNorm(normalized_shape, elementwise_affine=True),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(32 * (7**num_dim), num_classes, bias=True),
)
atol, rtol = self._compute_tolerances(device)
return self._test_conv_model(
layer_norm_model, 7, num_dim, device, atol=atol, rtol=rtol
)
def test_embedding_model(self, device):
def embedding_model(num_classes, num_embedding):
return nn.Sequential(
nn.Embedding(num_embedding, 15),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(375, num_classes, bias=True),
)
return self._test_embedding_model(embedding_model, 16, device)
def test_group_norm_error(self, device):
# group norm has to call native_group_norm. This checks that it hits the same errors
# that normal group norm would
N = 3
C = 5
inp = torch.randn(N, C)
with self.assertRaisesRegex(
RuntimeError, r"Expected number of channels in input to be divisible"
):
F.group_norm(inp, 2) # 5 is not divisible by 2
|
TestExpandedWeightFunctional
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/graph_stores/simple.py
|
{
"start": 2071,
"end": 6148
}
|
class ____(GraphStore):
"""
Simple Graph Store.
In this graph store, triplets are stored within a simple, in-memory dictionary.
Args:
simple_graph_store_data_dict (Optional[dict]): data dict
containing the triplets. See SimpleGraphStoreData
for more details.
"""
def __init__(
self,
data: Optional[SimpleGraphStoreData] = None,
fs: Optional[fsspec.AbstractFileSystem] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
self._data = data or SimpleGraphStoreData()
self._fs = fs or fsspec.filesystem("file")
@classmethod
def from_persist_dir(
cls,
persist_dir: str = DEFAULT_PERSIST_DIR,
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> "SimpleGraphStore":
"""Load from persist dir."""
persist_path = os.path.join(persist_dir, DEFAULT_PERSIST_FNAME)
return cls.from_persist_path(persist_path, fs=fs)
@property
def client(self) -> None:
"""
Get client.
Not applicable for this store.
"""
return
def get(self, subj: str) -> List[List[str]]:
"""Get triplets."""
return self._data.graph_dict.get(subj, [])
def get_rel_map(
self, subjs: Optional[List[str]] = None, depth: int = 2, limit: int = 30
) -> Dict[str, List[List[str]]]:
"""Get depth-aware rel map."""
return self._data.get_rel_map(subjs=subjs, depth=depth, limit=limit)
def upsert_triplet(self, subj: str, rel: str, obj: str) -> None:
"""Add triplet."""
if subj not in self._data.graph_dict:
self._data.graph_dict[subj] = []
existing = self._data.graph_dict[subj]
if (rel, obj) not in map(tuple, existing):
existing.append([rel, obj])
def delete(self, subj: str, rel: str, obj: str) -> None:
"""Delete triplet."""
if subj in self._data.graph_dict:
if (rel, obj) in self._data.graph_dict[subj]:
self._data.graph_dict[subj].remove([rel, obj])
if len(self._data.graph_dict[subj]) == 0:
del self._data.graph_dict[subj]
def persist(
self,
persist_path: str = os.path.join(DEFAULT_PERSIST_DIR, DEFAULT_PERSIST_FNAME),
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> None:
"""Persist the SimpleGraphStore to a directory."""
fs = fs or self._fs
dirpath = os.path.dirname(persist_path)
if not fs.exists(dirpath):
fs.makedirs(dirpath)
with fs.open(persist_path, "w") as f:
json.dump(self._data.to_dict(), f)
def get_schema(self, refresh: bool = False) -> str:
"""Get the schema of the Simple Graph store."""
raise NotImplementedError("SimpleGraphStore does not support get_schema")
def query(self, query: str, param_map: Optional[Dict[str, Any]] = {}) -> Any:
"""Query the Simple Graph store."""
raise NotImplementedError("SimpleGraphStore does not support query")
@classmethod
def from_persist_path(
cls, persist_path: str, fs: Optional[fsspec.AbstractFileSystem] = None
) -> "SimpleGraphStore":
"""Create a SimpleGraphStore from a persist directory."""
fs = fs or fsspec.filesystem("file")
if not fs.exists(persist_path):
logger.warning(
f"No existing {__name__} found at {persist_path}. "
"Initializing a new graph_store from scratch. "
)
return cls()
logger.debug(f"Loading {__name__} from {persist_path}.")
with fs.open(persist_path, "rb") as f:
data_dict = json.load(f)
data = SimpleGraphStoreData.from_dict(data_dict)
return cls(data)
@classmethod
def from_dict(cls, save_dict: dict) -> "SimpleGraphStore":
data = SimpleGraphStoreData.from_dict(save_dict)
return cls(data)
def to_dict(self) -> dict:
return self._data.to_dict()
|
SimpleGraphStore
|
python
|
getsentry__sentry
|
tests/snuba/api/endpoints/test_organization_tags.py
|
{
"start": 420,
"end": 17645
}
|
class ____(APITestCase, OccurrenceTestMixin, SnubaTestCase):
def setUp(self) -> None:
super().setUp()
self.min_ago = before_now(minutes=1).isoformat()
def test_simple(self) -> None:
user = self.create_user()
org = self.create_organization()
team = self.create_team(organization=org)
self.create_member(organization=org, user=user, teams=[team])
self.login_as(user=user)
project = self.create_project(organization=org, teams=[team])
self.store_event(
data={"event_id": "a" * 32, "tags": {"fruit": "apple"}, "timestamp": self.min_ago},
project_id=project.id,
)
self.store_event(
data={"event_id": "b" * 32, "tags": {"fruit": "orange"}, "timestamp": self.min_ago},
project_id=project.id,
)
self.store_event(
data={
"event_id": "c" * 32,
"tags": {"some_tag": "some_value"},
"timestamp": self.min_ago,
},
project_id=project.id,
)
self.store_event(
data={"event_id": "d" * 32, "tags": {"fruit": "orange"}, "timestamp": self.min_ago},
project_id=project.id,
)
url = reverse(
"sentry-api-0-organization-tags", kwargs={"organization_id_or_slug": org.slug}
)
response = self.client.get(url, {"statsPeriod": "14d"}, format="json")
assert response.status_code == 200, response.content
data = response.data
data.sort(key=lambda val: val["totalValues"], reverse=True)
assert data == [
{"name": "Level", "key": "level", "totalValues": 4},
{"name": "Fruit", "key": "fruit", "totalValues": 3},
{"name": "Some Tag", "key": "some_tag", "totalValues": 1},
]
def test_simple_flags(self) -> None:
user = self.create_user()
org = self.create_organization()
team = self.create_team(organization=org)
self.create_member(organization=org, user=user, teams=[team])
self.login_as(user=user)
project = self.create_project(organization=org, teams=[team])
self.store_event(
data={
"contexts": {
"flags": {
"values": [
{"flag": "abc", "result": True},
{"flag": "def", "result": False},
]
}
},
"timestamp": before_now(minutes=1).isoformat(),
},
project_id=project.id,
)
self.store_event(
data={
"contexts": {
"flags": {
"values": [
{"flag": "abc", "result": False},
]
}
},
"timestamp": before_now(minutes=1).isoformat(),
},
project_id=project.id,
)
url = reverse(
"sentry-api-0-organization-tags", kwargs={"organization_id_or_slug": org.slug}
)
response = self.client.get(
url,
{"statsPeriod": "14d", "useFlagsBackend": "1", "dataset": "events"},
format="json",
)
assert response.status_code == 200, response.content
data = response.data
data.sort(key=lambda val: val["totalValues"], reverse=True)
assert data == [
{"key": "abc", "name": "Abc", "totalValues": 2},
{"key": "def", "name": "Def", "totalValues": 1},
]
def test_dataset_events(self) -> None:
user = self.create_user()
org = self.create_organization()
team = self.create_team(organization=org)
self.create_member(organization=org, user=user, teams=[team])
self.login_as(user=user)
project = self.create_project(organization=org, teams=[team])
self.store_event(
data={"event_id": "a" * 32, "tags": {"berry": "raspberry"}, "timestamp": self.min_ago},
project_id=project.id,
)
url = reverse(
"sentry-api-0-organization-tags", kwargs={"organization_id_or_slug": org.slug}
)
response = self.client.get(url, {"statsPeriod": "14d", "dataset": "events"}, format="json")
assert response.status_code == 200, response.content
data = response.data
data.sort(key=lambda val: val["name"])
assert data == [
{"name": "Berry", "key": "berry", "totalValues": 1},
{"name": "Level", "key": "level", "totalValues": 1},
]
def test_dataset_discover(self) -> None:
user = self.create_user()
org = self.create_organization()
team = self.create_team(organization=org)
self.create_member(organization=org, user=user, teams=[team])
self.login_as(user=user)
project = self.create_project(organization=org, teams=[team])
url = reverse(
"sentry-api-0-organization-tags", kwargs={"organization_id_or_slug": org.slug}
)
event = load_data("transaction")
event["tags"].extend([["apple", "fuji"]])
event.update(
{
"transaction": "example_transaction",
"event_id": uuid.uuid4().hex,
"start_timestamp": self.min_ago,
"timestamp": self.min_ago,
}
)
event["measurements"]["lcp"]["value"] = 5000
self.store_event(data=event, project_id=project.id)
discoverResponse = self.client.get(
url,
{"statsPeriod": "14d", "dataset": "discover"},
format="json",
)
assert discoverResponse.status_code == 200, discoverResponse.content
# Other tags are added by default, just check that the one we added exists
assert {"name": "Apple", "key": "apple", "totalValues": 1} in discoverResponse.data
def test_dataset_issue_platform(self) -> None:
user = self.create_user()
org = self.create_organization()
team = self.create_team(organization=org)
self.create_member(organization=org, user=user, teams=[team])
self.login_as(user=user)
project = self.create_project(organization=org, teams=[team])
self.store_event(
data={"event_id": "a" * 32, "tags": {"berry": "raspberry"}, "timestamp": self.min_ago},
project_id=project.id,
)
self.process_occurrence(
event_id=uuid.uuid4().hex,
project_id=project.id,
event_data={
"title": "some problem",
"platform": "python",
"tags": {"stone_fruit": "cherry"},
"timestamp": before_now(minutes=1).isoformat(),
"received": before_now(minutes=1).isoformat(),
},
)
url = reverse(
"sentry-api-0-organization-tags", kwargs={"organization_id_or_slug": org.slug}
)
response = self.client.get(
url, {"statsPeriod": "14d", "dataset": "search_issues"}, format="json"
)
assert response.status_code == 200, response.content
data = response.data
data.sort(key=lambda val: val["name"])
assert data == [
{"name": "Level", "key": "level", "totalValues": 1},
{"name": "Stone Fruit", "key": "stone_fruit", "totalValues": 1},
]
def test_dataset_combination(self) -> None:
user = self.create_user()
org = self.create_organization()
team = self.create_team(organization=org)
self.create_member(organization=org, user=user, teams=[team])
self.login_as(user=user)
project = self.create_project(organization=org, teams=[team])
# Added to Events AND Discover Datasets
self.store_event(
data={"event_id": "a" * 32, "tags": {"berry": "raspberry"}, "timestamp": self.min_ago},
project_id=project.id,
)
# Added to Discover Dataset
discoverEvent = load_data("transaction")
discoverEvent["tags"].extend([["apple", "fuji"]])
discoverEvent.update(
{
"transaction": "example_transaction",
"event_id": uuid.uuid4().hex,
"start_timestamp": self.min_ago,
"timestamp": self.min_ago,
}
)
discoverEvent["measurements"]["lcp"]["value"] = 5000
self.store_event(data=discoverEvent, project_id=project.id)
# Added to IssuePlatform Dataset
self.process_occurrence(
event_id=uuid.uuid4().hex,
project_id=project.id,
event_data={
"title": "some problem",
"platform": "python",
"tags": {"stone_fruit": "cherry"},
"timestamp": before_now(minutes=1).isoformat(),
"received": before_now(minutes=1).isoformat(),
},
)
url = reverse(
"sentry-api-0-organization-tags", kwargs={"organization_id_or_slug": org.slug}
)
eventsResponse = self.client.get(
url, {"statsPeriod": "14d", "dataset": "events"}, format="json"
)
assert eventsResponse.status_code == 200, eventsResponse.content
eventsData = eventsResponse.data
eventsData.sort(key=lambda val: val["name"])
assert eventsData == [
{"name": "Berry", "key": "berry", "totalValues": 1},
{"name": "Level", "key": "level", "totalValues": 1},
]
discoverResponse = self.client.get(
url, {"statsPeriod": "14d", "dataset": "discover"}, format="json"
)
discoverData = discoverResponse.data
assert {"name": "Berry", "key": "berry", "totalValues": 1} in discoverData
assert {"name": "Apple", "key": "apple", "totalValues": 1} in discoverData
issuePlatformResponse = self.client.get(
url, {"statsPeriod": "14d", "dataset": "search_issues"}, format="json"
)
issuePlatformData = issuePlatformResponse.data
issuePlatformData.sort(key=lambda val: val["name"])
assert issuePlatformData == [
{"name": "Level", "key": "level", "totalValues": 1},
{"name": "Stone Fruit", "key": "stone_fruit", "totalValues": 1},
]
def test_invalid_dataset(self) -> None:
user = self.create_user()
org = self.create_organization()
team = self.create_team(organization=org)
self.create_member(organization=org, user=user, teams=[team])
self.login_as(user=user)
project = self.create_project(organization=org, teams=[team])
self.store_event(
data={"event_id": "a" * 32, "tags": {"berry": "raspberry"}, "timestamp": self.min_ago},
project_id=project.id,
)
url = reverse(
"sentry-api-0-organization-tags", kwargs={"organization_id_or_slug": org.slug}
)
response = self.client.get(
url, {"statsPeriod": "14d", "dataset": "invalid_dataset"}, format="json"
)
assert response.status_code == 400
assert response.data == {
"detail": ErrorDetail(string="Invalid dataset parameter", code="parse_error")
}
def test_no_projects(self) -> None:
user = self.create_user()
org = self.create_organization(owner=user)
self.login_as(user=user)
url = reverse(
"sentry-api-0-organization-tags", kwargs={"organization_id_or_slug": org.slug}
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data == []
@mock.patch("sentry.utils.snuba.query", return_value={})
def test_tag_caching(self, mock_snuba_query: mock.MagicMock) -> None:
user = self.create_user()
org = self.create_organization()
team = self.create_team(organization=org)
self.create_member(organization=org, user=user, teams=[team])
self.create_project(organization=org, teams=[team])
self.login_as(user=user)
with self.options({"snuba.tagstore.cache-tagkeys-rate": 1.0}):
url = reverse(
"sentry-api-0-organization-tags", kwargs={"organization_id_or_slug": org.slug}
)
response = self.client.get(url, {"use_cache": "1", "statsPeriod": "14d"}, format="json")
assert response.status_code == 200, response.content
assert mock_snuba_query.call_count == 1
response = self.client.get(url, {"use_cache": "1", "statsPeriod": "14d"}, format="json")
assert response.status_code == 200, response.content
# Cause we're caching, we shouldn't call snuba again
assert mock_snuba_query.call_count == 1
@mock.patch("sentry.utils.snuba.query", return_value={})
def test_different_statsperiod_caching(self, mock_snuba_query: mock.MagicMock) -> None:
user = self.create_user()
org = self.create_organization()
team = self.create_team(organization=org)
self.create_member(organization=org, user=user, teams=[team])
self.create_project(organization=org, teams=[team])
self.login_as(user=user)
with self.options({"snuba.tagstore.cache-tagkeys-rate": 1.0}):
url = reverse(
"sentry-api-0-organization-tags", kwargs={"organization_id_or_slug": org.slug}
)
response = self.client.get(url, {"use_cache": "1", "statsPeriod": "14d"}, format="json")
assert response.status_code == 200, response.content
# Empty cache, we should query snuba
assert mock_snuba_query.call_count == 1
response = self.client.get(url, {"use_cache": "1", "statsPeriod": "30d"}, format="json")
assert response.status_code == 200, response.content
# With a different statsPeriod, we shouldn't use cache and still query snuba
assert mock_snuba_query.call_count == 2
@mock.patch("sentry.utils.snuba.query", return_value={})
def test_different_times_caching(self, mock_snuba_query: mock.MagicMock) -> None:
user = self.create_user()
org = self.create_organization()
team = self.create_team(organization=org)
self.create_member(organization=org, user=user, teams=[team])
self.create_project(organization=org, teams=[team])
self.login_as(user=user)
with self.options({"snuba.tagstore.cache-tagkeys-rate": 1.0}):
start = before_now(minutes=10).isoformat()
end = before_now(minutes=5).isoformat()
url = reverse(
"sentry-api-0-organization-tags", kwargs={"organization_id_or_slug": org.slug}
)
response = self.client.get(
url, {"use_cache": "1", "start": start, "end": end}, format="json"
)
assert response.status_code == 200, response.content
assert mock_snuba_query.call_count == 1
# 5 minutes later, cache_key should be different
start = before_now(minutes=5).isoformat()
end = before_now(minutes=0).isoformat()
response = self.client.get(
url, {"use_cache": "1", "start": start, "end": end}, format="json"
)
assert response.status_code == 200, response.content
assert mock_snuba_query.call_count == 2
def test_different_times_retrieves_cache(self) -> None:
user = self.create_user()
org = self.create_organization()
team = self.create_team(organization=org)
self.create_member(organization=org, user=user, teams=[team])
project = self.create_project(organization=org, teams=[team])
with self.options({"snuba.tagstore.cache-tagkeys-rate": 1.0}):
start = before_now(minutes=10).isoformat()
middle = before_now(minutes=5).isoformat()
end = before_now(minutes=0).isoformat()
# Throw an event in the middle of the time window, since end might get rounded down a bit
self.store_event(
data={"event_id": "a" * 32, "tags": {"fruit": "apple"}, "timestamp": middle},
project_id=project.id,
)
self.login_as(user=user)
url = reverse(
"sentry-api-0-organization-tags", kwargs={"organization_id_or_slug": org.slug}
)
response = self.client.get(
url, {"use_cache": "1", "start": start, "end": end}, format="json"
)
original_data = response.data
url = reverse(
"sentry-api-0-organization-tags", kwargs={"organization_id_or_slug": org.slug}
)
response = self.client.get(
url, {"use_cache": "1", "start": start, "end": end}, format="json"
)
cached_data = response.data
assert original_data == cached_data
|
OrganizationTagsTest
|
python
|
jina-ai__jina
|
tests/integration/deployments/test_deployment.py
|
{
"start": 12613,
"end": 15197
}
|
class ____(Executor):
@requests
def foo(self, docs, **kwargs):
docs.texts = ['foo' for _ in docs]
@requests(on='/bar')
def bar(self, docs, **kwargs):
docs.texts = ['bar' for _ in docs]
@pytest.fixture()
def exposed_port():
port = random_port()
yield port
@pytest.fixture(autouse=False)
def served_depl(request: FixtureRequest, exposed_port):
import threading
import time
def serve_depl(stop_event, **kwargs):
depl = Deployment(uses=MyServeExec, **kwargs)
with depl:
depl.block(stop_event)
stop_event = threading.Event()
kwargs = {'port': exposed_port}
enable_dynamic_batching = request.param
if enable_dynamic_batching:
kwargs['uses_dynamic_batching'] = {
'/bar': {'preferred_batch_size': 4, 'timeout': 5000}
}
t = threading.Thread(
name='serve-depl',
target=serve_depl,
args=(stop_event,),
kwargs=kwargs,
)
t.start()
time.sleep(3) # allow Deployment to start
yield
stop_event.set() # set event and stop (unblock) the Deployment
t.join()
@pytest.mark.repeat(10)
@pytest.mark.parametrize('served_depl', [False, True], indirect=True)
def test_deployment_dynamic_batching(served_depl, exposed_port):
docs = Client(port=exposed_port).post(on='/bar', inputs=DocumentArray.empty(5))
assert docs.texts == ['bar' for _ in docs]
@pytest.mark.repeat(10)
@pytest.mark.parametrize('enable_dynamic_batching', [False, True])
def test_deployment_client_dynamic_batching(enable_dynamic_batching):
kwargs = {'port': random_port()}
if enable_dynamic_batching:
kwargs['uses_dynamic_batching'] = {
'/bar': {'preferred_batch_size': 4, 'timeout': 5000}
}
depl = Deployment(uses=MyServeExec, **kwargs)
with depl:
docs = depl.post(on='/bar', inputs=DocumentArray.empty(5))
assert docs.texts == ['bar' for _ in docs]
@pytest.mark.parametrize('shards', [1, 2])
@pytest.mark.parametrize('replicas', [1, 2, 3])
def test_deployment_shards_replicas(shards, replicas):
class PIDExecutor(Executor):
@requests
def foo(self, docs, **kwargs):
import os
for doc in docs:
doc.tags['pid'] = os.getpid()
dep = Deployment(uses=PIDExecutor, shards=shards, replicas=replicas)
with dep:
docs = dep.post(on='/', inputs=DocumentArray.empty(20), request_size=1)
returned_pids = set([doc.tags['pid'] for doc in docs])
assert len(returned_pids) == shards * replicas
|
MyServeExec
|
python
|
apache__airflow
|
providers/redis/tests/unit/redis/sensors/test_redis_key.py
|
{
"start": 1054,
"end": 1711
}
|
class ____:
def setup_method(self):
args = {"owner": "airflow", "start_date": DEFAULT_DATE}
self.dag = DAG("test_dag_id", schedule=None, default_args=args)
self.mock_context = MagicMock()
@patch("airflow.providers.redis.hooks.redis.RedisHook.get_conn")
def test_execute_operator(self, mock_redis_conn):
sensor = RedisKeySensor(
key="test_key", redis_conn_id="redis_default", task_id="test_task", dag=self.dag
)
sensor.poke(self.mock_context)
mock_redis_conn.assert_called_once_with()
mock_redis_conn().exists.assert_called_once_with("test_key")
|
TestRedisPublishOperator
|
python
|
joke2k__faker
|
faker/providers/internet/zh_TW/__init__.py
|
{
"start": 90,
"end": 516
}
|
class ____(InternetProvider):
user_name_formats = (
"{{last_romanized_name}}.{{first_romanized_name}}",
"{{first_romanized_name}}.{{last_romanized_name}}",
"{{first_romanized_name}}##",
"?{{last_romanized_name}}",
)
tlds = ("com", "com", "com", "net", "org", "tw", "tw", "tw")
@slugify
def domain_word(self):
return self.generator.format("last_romanized_name")
|
Provider
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 549257,
"end": 549681
}
|
class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("CreatedPullRequestContribution", graphql_name="node")
"""The item at the end of the edge."""
|
CreatedPullRequestContributionEdge
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/ragged/ragged_placeholder_op_test.py
|
{
"start": 1109,
"end": 3435
}
|
class ____(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.parameters([
# dtype, ragged_rank, value_shape, name -> expected
(dtypes.int32, 0, [5], None,
'Tensor("Placeholder:0", shape=(5,), dtype=int32)'),
(dtypes.int32, 1, [], 'ph', 'tf.RaggedTensor('
'values=Tensor("ph/flat_values:0", shape=(None,), dtype=int32), '
'row_splits=Tensor("ph/row_splits_0:0", shape=(None,), dtype=int64))'),
(dtypes.string, 1, [5], 'ph', 'tf.RaggedTensor('
'values=Tensor("ph/flat_values:0", shape=(None, 5), dtype=string), '
'row_splits=Tensor("ph/row_splits_0:0", shape=(None,), dtype=int64))'),
(dtypes.float32, 2, [], 'ph', 'tf.RaggedTensor(values=tf.RaggedTensor('
'values=Tensor("ph/flat_values:0", shape=(None,), dtype=float32), '
'row_splits=Tensor("ph/row_splits_1:0", shape=(None,), dtype=int64)), '
'row_splits=Tensor("ph/row_splits_0:0", shape=(None,), dtype=int64))'),
(dtypes.int32, 2, [3, 5], 'ph', 'tf.RaggedTensor(values=tf.RaggedTensor('
'values=Tensor("ph/flat_values:0", shape=(None, 3, 5), dtype=int32), '
'row_splits=Tensor("ph/row_splits_1:0", shape=(None,), dtype=int64)), '
'row_splits=Tensor("ph/row_splits_0:0", shape=(None,), dtype=int64))'),
])
def testRaggedPlaceholder(self, dtype, ragged_rank, value_shape, name,
expected):
if not context.executing_eagerly():
placeholder = ragged_factory_ops.placeholder(
dtype, ragged_rank, value_shape, name)
result = str(placeholder).replace('?', 'None')
self.assertEqual(result, expected)
def testRaggedPlaceholderRaisesExceptionInEagerMode(self):
if context.executing_eagerly():
with self.assertRaises(RuntimeError):
ragged_factory_ops.placeholder(dtypes.int32, 1, [])
def testRaggedPlaceholderDoesNotIncludeValidationOps(self):
if context.executing_eagerly():
return
graph = ops.Graph()
with graph.as_default():
ragged_factory_ops.placeholder(
dtypes.float32, ragged_rank=1, value_shape=[])
self.assertEqual([op.type for op in graph.get_operations()],
['Placeholder', 'Placeholder'])
if __name__ == '__main__':
googletest.main()
|
RaggedPlaceholderOpTest
|
python
|
astropy__astropy
|
astropy/modeling/tests/test_statistics.py
|
{
"start": 396,
"end": 1822
}
|
class ____:
"""Tests for leastsquare with pre-specified number of dimensions."""
@classmethod
def setup_class(cls):
cls.model1D = Identity(n_inputs=1)
cls.model2D = Identity(n_inputs=2) | Mapping((0,), n_inputs=2)
cls.model3D = Identity(n_inputs=3) | Mapping((0,), n_inputs=3)
cls.data = cls.x = cls.y = cls.z = np.linspace(0, 10, num=100)
cls.lsq_exp = 0
def test_1d_no_weights(self):
lsq = leastsquare_1d(self.data, self.model1D, None, self.x)
assert_almost_equal(lsq, self.lsq_exp)
def test_1d_with_weights(self):
lsq = leastsquare_1d(self.data, self.model1D, np.ones(100), self.x)
assert_almost_equal(lsq, self.lsq_exp)
def test_2d_no_weights(self):
lsq = leastsquare_2d(self.data, self.model2D, None, self.x, self.y)
assert_almost_equal(lsq, self.lsq_exp)
def test_2d_with_weights(self):
lsq = leastsquare_2d(self.data, self.model2D, np.ones(100), self.x, self.y)
assert_almost_equal(lsq, self.lsq_exp)
def test_3d_no_weights(self):
lsq = leastsquare_3d(self.data, self.model3D, None, self.x, self.y, self.z)
assert_almost_equal(lsq, self.lsq_exp)
def test_3d_with_weights(self):
lsq = leastsquare_3d(
self.data, self.model3D, np.ones(100), self.x, self.y, self.z
)
assert_almost_equal(lsq, self.lsq_exp)
|
TestLeastSquare_XD
|
python
|
pydantic__pydantic
|
tests/mypy/modules/plugin_fail.py
|
{
"start": 930,
"end": 1035
}
|
class ____(BaseModel):
model_config = ConfigDict(extra=1) # type: ignore[typeddict-item]
|
BadExtraModel
|
python
|
pandas-dev__pandas
|
pandas/tests/indexes/datetimes/test_constructors.py
|
{
"start": 40194,
"end": 44083
}
|
class ____:
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range("1/1/2000", "1/2/2000", freq="5min")
rng2 = DatetimeIndex(rng)
assert rng.freq == rng2.freq
def test_explicit_none_freq(self):
# Explicitly passing freq=None is respected
rng = date_range("1/1/2000", "1/2/2000", freq="5min")
result = DatetimeIndex(rng, freq=None)
assert result.freq is None
result = DatetimeIndex(rng._data, freq=None)
assert result.freq is None
def test_dti_constructor_small_int(self, any_int_numpy_dtype):
# see gh-13721
exp = DatetimeIndex(
[
"1970-01-01 00:00:00.00000000",
"1970-01-01 00:00:00.00000001",
"1970-01-01 00:00:00.00000002",
]
)
arr = np.array([0, 10, 20], dtype=any_int_numpy_dtype)
tm.assert_index_equal(DatetimeIndex(arr), exp)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(["1-1-2000 00:00:01"])
assert rng[0].second == 1
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view("M8[D]")
idx = Index(arr)
assert (idx.values == astype_overflowsafe(arr, dtype=np.dtype("M8[ns]"))).all()
def test_constructor_int64_nocopy(self):
# GH#1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
assert (index.asi8[50:100] == -1).all()
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
assert (index.asi8[50:100] != -1).all()
@pytest.mark.parametrize(
"freq",
["ME", "QE", "YE", "D", "B", "bh", "min", "s", "ms", "us", "h", "ns", "C"],
)
def test_from_freq_recreate_from_data(self, freq):
org = date_range(start="2001/02/01 09:00", freq=freq, periods=1)
idx = DatetimeIndex(org, freq=freq)
tm.assert_index_equal(idx, org)
org = date_range(
start="2001/02/01 09:00", freq=freq, tz="US/Pacific", periods=1
)
idx = DatetimeIndex(org, freq=freq, tz="US/Pacific")
tm.assert_index_equal(idx, org)
def test_datetimeindex_constructor_misc(self):
arr = ["1/1/2005", "1/2/2005", "Jn 3, 2005", "2005-01-04"]
msg = r"(\(')?Unknown datetime string format(:', 'Jn 3, 2005'\))?"
with pytest.raises(ValueError, match=msg):
DatetimeIndex(arr)
arr = ["1/1/2005", "1/2/2005", "1/3/2005", "2005-01-04"]
idx1 = DatetimeIndex(arr)
arr = [datetime(2005, 1, 1), "1/2/2005", "1/3/2005", "2005-01-04"]
idx2 = DatetimeIndex(arr)
arr = [Timestamp(datetime(2005, 1, 1)), "1/2/2005", "1/3/2005", "2005-01-04"]
idx3 = DatetimeIndex(arr)
arr = np.array(["1/1/2005", "1/2/2005", "1/3/2005", "2005-01-04"], dtype="O")
idx4 = DatetimeIndex(arr)
idx5 = DatetimeIndex(["12/05/2007", "25/01/2008"], dayfirst=True)
idx6 = DatetimeIndex(
["2007/05/12", "2008/01/25"], dayfirst=False, yearfirst=True
)
tm.assert_index_equal(idx5, idx6)
for other in [idx2, idx3, idx4]:
assert (idx1.values == other.values).all()
def test_dti_constructor_object_dtype_dayfirst_yearfirst_with_tz(self):
# GH#55813
val = "5/10/16"
dfirst = Timestamp(2016, 10, 5, tz="US/Pacific")
yfirst = Timestamp(2005, 10, 16, tz="US/Pacific")
result1 = DatetimeIndex([val], tz="US/Pacific", dayfirst=True)
expected1 = DatetimeIndex([dfirst])
tm.assert_index_equal(result1, expected1)
result2 = DatetimeIndex([val], tz="US/Pacific", yearfirst=True)
expected2 = DatetimeIndex([yfirst])
tm.assert_index_equal(result2, expected2)
|
TestTimeSeries
|
python
|
numba__numba
|
numba/cuda/descriptor.py
|
{
"start": 207,
"end": 985
}
|
class ____(TargetDescriptor):
def __init__(self, name):
self.options = CUDATargetOptions
# The typing and target contexts are initialized only when needed -
# this prevents an attempt to load CUDA libraries at import time on
# systems that might not have them present.
self._typingctx = None
self._targetctx = None
super().__init__(name)
@property
def typing_context(self):
if self._typingctx is None:
self._typingctx = CUDATypingContext()
return self._typingctx
@property
def target_context(self):
if self._targetctx is None:
self._targetctx = CUDATargetContext(self._typingctx)
return self._targetctx
cuda_target = CUDATarget('cuda')
|
CUDATarget
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1185034,
"end": 1185333
}
|
class ____(sgqlc.types.Type, GitSignature):
"""Represents a GPG signature on a Commit or Tag."""
__schema__ = github_schema
__field_names__ = ("key_id",)
key_id = sgqlc.types.Field(String, graphql_name="keyId")
"""Hex-encoded ID of the key that signed this object."""
|
GpgSignature
|
python
|
tox-dev__tox
|
src/tox/config/of_type.py
|
{
"start": 617,
"end": 1242
}
|
class ____(ABC, Generic[T]): # noqa: PLW1641
"""Abstract base class for configuration definitions."""
def __init__(self, keys: Iterable[str], desc: str) -> None:
self.keys = keys
self.desc = desc
@abstractmethod
def __call__(self, conf: Config, loaders: list[Loader[T]], args: ConfigLoadArgs) -> T:
raise NotImplementedError
def __eq__(self, o: object) -> bool:
return type(self) == type(o) and (self.keys, self.desc) == (o.keys, o.desc) # type: ignore[attr-defined] # noqa: E721
def __ne__(self, o: object) -> bool:
return not (self == o)
|
ConfigDefinition
|
python
|
huggingface__transformers
|
tests/models/ovis2/test_modeling_ovis2.py
|
{
"start": 5026,
"end": 7444
}
|
class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Model tester for `Ovis2ForConditionalGeneration`.
"""
all_model_classes = (
(
Ovis2Model,
Ovis2ForConditionalGeneration,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{"image-text-to-text": Ovis2ForConditionalGeneration, "any-to-any": Ovis2ForConditionalGeneration}
if is_torch_available()
else {}
)
_is_composite = True
def setUp(self):
self.model_tester = Ovis2VisionText2TextModelTester(self)
self.config_tester = ConfigTester(self, config_class=Ovis2Config, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
wte = model.get_input_embeddings()
inputs["inputs_embeds"] = wte(input_ids)
with torch.no_grad():
model(**inputs)
# overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs
# while some other models require pixel_values to be present
def test_inputs_embeds_matches_input_ids(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
inputs_embeds = model.get_input_embeddings()(input_ids)
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
torch.testing.assert_close(out_embeds, out_ids)
@require_torch
@slow
|
Ovis2ModelTest
|
python
|
fastai__fastai
|
fastai/vision/augment.py
|
{
"start": 43393,
"end": 43711
}
|
class ____(SpaceTfm):
"Apply `fs` to the logits"
order = 40
def __init__(self,
fs:Callable|MutableSequence, # Transformation functions applying in logit space,
**kwargs
):
super().__init__(fs, TensorImage.lighting, **kwargs)
# %% ../../nbs/09_vision.augment.ipynb 203
|
LightingTfm
|
python
|
great-expectations__great_expectations
|
contrib/capitalone_dataprofiler_expectations/capitalone_dataprofiler_expectations/expectations/expect_column_values_to_be_equal_to_or_greater_than_profile_min.py
|
{
"start": 2735,
"end": 6900
}
|
class ____(ColumnMapExpectation):
"""Expect the column values to be greater than or equal to the minimum value of the respective column within the DataProfiler report.
This function builds upon the custom column map expectations of Great Expectations. This function asks a yes/no question of each row in the user-specified column;
namely, is the value greater than or equal to the minimum value of the respective column within the provided profile report generated from the DataProfiler.
Args:
column(str): The column that you want to check.
profile(dict(str, Any)): The report, which is assumed to contain a column of the same name, previously generated using the DataProfiler.
df.expect_column_values_to_be_equal_to_or_greater_than_profile_min(
column,
profile
)
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
data = [
[-36, -25, -44],
[18, 45, 46],
[-16, -29, -49],
[21, 4, 35],
[-18, -7, -40],
[22, -4, -37],
[-17, -21, 11],
[48, -32, -48],
[0, -44, 20],
]
cols = ["col_a", "col_b", "col_c"]
df = pd.DataFrame(data, columns=cols)
profiler_opts = dp.ProfilerOptions()
profiler_opts.structured_options.multiprocess.is_enabled = False
profileObj = dp.Profiler(df, options=profiler_opts)
profileReport = profileObj.report(report_options={"output_format": "serializable"})
profileReport["global_stats"]["profile_schema"] = dict(
profileReport["global_stats"]["profile_schema"]
)
examples = [
{
"data": {
"col_a": [-3, 21, 20, 5],
"col_b": [-7, 41, -47, 12],
"col_c": [54, -10, 19, 19],
},
"tests": [
{
"title": "column_lower_bounded_by_min",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "col_a",
"profile": profileReport,
},
"out": {"success": True},
},
{
"title": "column_has_value_less_than_min",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "col_b",
"profile": profileReport,
},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.greater_than_or_equal_to_profile_min"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = (
"profile",
"mostly",
)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {
"profile": None,
"result_format": "BASIC",
"catch_exceptions": False,
}
# This object contains metadata for display in the public Gallery
library_metadata = {
"requirements": ["dataprofiler", "tensorflow", "scikit-learn", "numpy"],
"maturity": "experimental", # "concept_only", "experimental", "beta", or "production"
"tags": ["dataprofiler"], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@stevensecreti", # Don't forget to add your github handle here!
],
}
if __name__ == "__main__":
# ExpectColumnValuesToBeEqualToOrGreaterThanProfileMin().print_diagnostic_checklist()
diagnostics_report = ExpectColumnValuesToBeEqualToOrGreaterThanProfileMin().run_diagnostics()
print(diagnostics_report.generate_checklist())
|
ExpectColumnValuesToBeEqualToOrGreaterThanProfileMin
|
python
|
Pylons__pyramid
|
src/pyramid/request.py
|
{
"start": 4881,
"end": 12087
}
|
class ____(
BaseRequest,
URLMethodsMixin,
CallbackMethodsMixin,
InstancePropertyMixin,
LocalizerRequestMixin,
SecurityAPIMixin,
AuthenticationAPIMixin,
ViewMethodsMixin,
):
"""
A subclass of the :term:`WebOb` Request class. An instance of
this class is created by the :term:`router` and is provided to a
view callable (and to other subsystems) as the ``request``
argument.
The documentation below (save for the ``add_response_callback`` and
``add_finished_callback`` methods, which are defined in this subclass
itself, and the attributes ``context``, ``registry``, ``root``,
``subpath``, ``traversed``, ``view_name``, ``virtual_root`` , and
``virtual_root_path``, each of which is added to the request by the
:term:`router` at request ingress time) are autogenerated from the WebOb
source code used when this documentation was generated.
Due to technical constraints, we can't yet display the WebOb
version number from which this documentation is autogenerated, but
it will be the 'prevailing WebOb version' at the time of the
release of this :app:`Pyramid` version. See
https://webob.org/ for further information.
"""
exception = None
exc_info = None
matchdict = None
matched_route = None
request_iface = IRequest
ResponseClass = Response
@reify
def tmpl_context(self):
# docs-deprecated template context for Pylons-like apps; do not
# remove.
return TemplateContext()
@reify
def session(self):
"""Obtain the :term:`session` object associated with this
request. If a :term:`session factory` has not been registered
during application configuration, a
:class:`pyramid.exceptions.ConfigurationError` will be raised"""
factory = self.registry.queryUtility(ISessionFactory)
if factory is None:
raise AttributeError(
'No session factory registered '
'(see the Sessions chapter of the Pyramid documentation)'
)
return factory(self)
@reify
def response(self):
"""This attribute is actually a "reified" property which returns an
instance of the :class:`pyramid.response.Response`. class. The
response object returned does not exist until this attribute is
accessed. Subsequent accesses will return the same Response object.
The ``request.response`` API is used by renderers. A render obtains
the response object it will return from a view that uses that renderer
by accessing ``request.response``. Therefore, it's possible to use the
``request.response`` API to set up a response object with "the
right" attributes (e.g. by calling ``request.response.set_cookie()``)
within a view that uses a renderer. Mutations to this response object
will be preserved in the response sent to the client."""
response_factory = _get_response_factory(self.registry)
return response_factory(self)
def is_response(self, ob):
"""Return ``True`` if the object passed as ``ob`` is a valid
response object, ``False`` otherwise."""
if ob.__class__ is Response:
return True
registry = self.registry
adapted = registry.queryAdapterOrSelf(ob, IResponse)
if adapted is None:
return False
return adapted is ob
def route_request_iface(name, bases=()):
# zope.interface treats the __name__ as the __doc__ and changes __name__
# to None for interfaces that contain spaces if you do not pass a
# nonempty __doc__ (insane); see
# zope.interface.interface.Element.__init__ and
# https://github.com/Pylons/pyramid/issues/232; as a result, always pass
# __doc__ to the InterfaceClass constructor.
iface = InterfaceClass(
'%s_IRequest' % name,
bases=bases,
__doc__="route_request_iface-generated interface",
)
# for exception view lookups
iface.combined = InterfaceClass(
'%s_combined_IRequest' % name,
bases=(iface, IRequest),
__doc__='route_request_iface-generated combined interface',
)
return iface
def add_global_response_headers(request, headerlist):
def add_headers(request, response):
for k, v in headerlist:
response.headerlist.append((k, v))
request.add_response_callback(add_headers)
def call_app_with_subpath_as_path_info(request, app):
# Copy the request. Use the source request's subpath (if it exists) as
# the new request's PATH_INFO. Set the request copy's SCRIPT_NAME to the
# prefix before the subpath. Call the application with the new request
# and return a response.
#
# Postconditions:
# - SCRIPT_NAME and PATH_INFO are empty or start with /
# - At least one of SCRIPT_NAME or PATH_INFO are set.
# - SCRIPT_NAME is not '/' (it should be '', and PATH_INFO should
# be '/').
environ = request.environ
script_name = environ.get('SCRIPT_NAME', '')
path_info = environ.get('PATH_INFO', '/')
subpath = list(getattr(request, 'subpath', ()))
new_script_name = ''
# compute new_path_info
new_path_info = '/' + '/'.join(
[text_(x.encode('utf-8'), 'latin-1') for x in subpath]
)
if new_path_info != '/': # don't want a sole double-slash
if path_info != '/': # if orig path_info is '/', we're already done
if path_info.endswith('/'):
# readd trailing slash stripped by subpath (traversal)
# conversion
new_path_info += '/'
# compute new_script_name
workback = (script_name + path_info).split('/')
tmp = []
while workback:
if tmp == subpath:
break
el = workback.pop()
if el:
tmp.insert(0, text_(bytes_(el, 'latin-1'), 'utf-8'))
# strip all trailing slashes from workback to avoid appending undue slashes
# to end of script_name
while workback and (workback[-1] == ''):
workback = workback[:-1]
new_script_name = '/'.join(workback)
new_request = request.copy()
new_request.environ['SCRIPT_NAME'] = new_script_name
new_request.environ['PATH_INFO'] = new_path_info
return new_request.get_response(app)
def apply_request_extensions(request, extensions=None):
"""Apply request extensions (methods and properties) to an instance of
:class:`pyramid.interfaces.IRequest`. This method is dependent on the
``request`` containing a properly initialized registry.
After invoking this method, the ``request`` should have the methods
and properties that were defined using
:meth:`pyramid.config.Configurator.add_request_method`.
"""
if extensions is None:
extensions = request.registry.queryUtility(IRequestExtensions)
if extensions is not None:
for name, fn in extensions.methods.items():
method = fn.__get__(request, request.__class__)
setattr(request, name, method)
InstancePropertyHelper.apply_properties(
request, extensions.descriptors
)
|
Request
|
python
|
pytorch__pytorch
|
test/dynamo/cpython/3_13/test_itertools.py
|
{
"start": 113883,
"end": 116332
}
|
class ____(__TestCase):
def test_keywords_in_subclass(self):
# count is not subclassable...
testcases = [
(repeat, (1, 2), [1, 1]),
(zip, ([1, 2], 'ab'), [(1, 'a'), (2, 'b')]),
(filter, (None, [0, 1]), [1]),
(filterfalse, (None, [0, 1]), [0]),
(chain, ([1, 2], [3, 4]), [1, 2, 3]),
(map, (str, [1, 2]), ['1', '2']),
(starmap, (operator.pow, ((2, 3), (3, 2))), [8, 9]),
(islice, ([1, 2, 3, 4], 1, 3), [2, 3]),
(takewhile, (isEven, [2, 3, 4]), [2]),
(dropwhile, (isEven, [2, 3, 4]), [3, 4]),
(cycle, ([1, 2],), [1, 2, 1]),
(compress, ('ABC', [1, 0, 1]), ['A', 'C']),
]
for cls, args, result in testcases:
with self.subTest(cls):
class subclass(cls):
pass
u = subclass(*args)
self.assertIs(type(u), subclass)
self.assertEqual(list(islice(u, 0, 3)), result)
with self.assertRaises(TypeError):
subclass(*args, newarg=3)
for cls, args, result in testcases:
# Constructors of repeat, zip, compress accept keyword arguments.
# Their subclasses need overriding __new__ to support new
# keyword arguments.
if cls in [repeat, zip, compress]:
continue
with self.subTest(cls):
class subclass_with_init(cls):
def __init__(self, *args, newarg=None):
self.newarg = newarg
u = subclass_with_init(*args, newarg=3)
self.assertIs(type(u), subclass_with_init)
self.assertEqual(list(islice(u, 0, 3)), result)
self.assertEqual(u.newarg, 3)
for cls, args, result in testcases:
with self.subTest(cls):
class subclass_with_new(cls):
def __new__(cls, *args, newarg=None):
self = super().__new__(cls, *args)
self.newarg = newarg
return self
u = subclass_with_new(*args, newarg=3)
self.assertIs(type(u), subclass_with_new)
self.assertEqual(list(islice(u, 0, 3)), result)
self.assertEqual(u.newarg, 3)
if __name__ == "__main__":
run_tests()
|
SubclassWithKwargsTest
|
python
|
apache__airflow
|
airflow-core/src/airflow/models/hitl.py
|
{
"start": 2558,
"end": 2684
}
|
class ____(TypedDict):
"""Typed dict for saving a Human-in-the-loop user information."""
id: str
name: str
|
HITLUser
|
python
|
doocs__leetcode
|
solution/0000-0099/0034.Find First and Last Position of Element in Sorted Array/Solution.py
|
{
"start": 0,
"end": 216
}
|
class ____:
def searchRange(self, nums: List[int], target: int) -> List[int]:
l = bisect_left(nums, target)
r = bisect_left(nums, target + 1)
return [-1, -1] if l == r else [l, r - 1]
|
Solution
|
python
|
numpy__numpy
|
numpy/testing/_private/utils.py
|
{
"start": 1618,
"end": 53151
}
|
class ____(Exception):
'''Raise this exception to mark a test as a known failing test.'''
pass
KnownFailureTest = KnownFailureException # backwards compat
verbose = 0
NUMPY_ROOT = pathlib.Path(np.__file__).parent
try:
np_dist = importlib.metadata.distribution('numpy')
except importlib.metadata.PackageNotFoundError:
IS_INSTALLED = IS_EDITABLE = False
else:
IS_INSTALLED = True
try:
if sys.version_info >= (3, 13):
IS_EDITABLE = np_dist.origin.dir_info.editable
else:
# Backport importlib.metadata.Distribution.origin
import json # noqa: E401
import types
origin = json.loads(
np_dist.read_text('direct_url.json') or '{}',
object_hook=lambda data: types.SimpleNamespace(**data),
)
IS_EDITABLE = origin.dir_info.editable
except AttributeError:
IS_EDITABLE = False
# spin installs numpy directly via meson, instead of using meson-python, and
# runs the module by setting PYTHONPATH. This is problematic because the
# resulting installation lacks the Python metadata (.dist-info), and numpy
# might already be installed on the environment, causing us to find its
# metadata, even though we are not actually loading that package.
# Work around this issue by checking if the numpy root matches.
if not IS_EDITABLE and np_dist.locate_file('numpy') != NUMPY_ROOT:
IS_INSTALLED = False
IS_WASM = platform.machine() in ["wasm32", "wasm64"]
IS_PYPY = sys.implementation.name == 'pypy'
IS_PYSTON = hasattr(sys, "pyston_version_info")
HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON
BLAS_SUPPORTS_FPE = np._core._multiarray_umath._blas_supports_fpe(None)
HAS_LAPACK64 = numpy.linalg._umath_linalg._ilp64
IS_MUSL = False
# alternate way is
# from packaging.tags import sys_tags
# _tags = list(sys_tags())
# if 'musllinux' in _tags[0].platform:
_v = sysconfig.get_config_var('HOST_GNU_TYPE') or ''
if 'musl' in _v:
IS_MUSL = True
NOGIL_BUILD = bool(sysconfig.get_config_var("Py_GIL_DISABLED"))
IS_64BIT = np.dtype(np.intp).itemsize == 8
def assert_(val, msg=''):
"""
Assert that works in release mode.
Accepts callable msg to allow deferring evaluation until failure.
The Python built-in ``assert`` does not work when executing code in
optimized mode (the ``-O`` flag) - no byte-code is generated for it.
For documentation on usage, refer to the Python documentation.
"""
__tracebackhide__ = True # Hide traceback for py.test
if not val:
try:
smsg = msg()
except TypeError:
smsg = msg
raise AssertionError(smsg)
if os.name == 'nt':
# Code "stolen" from enthought/debug/memusage.py
def GetPerformanceAttributes(object, counter, instance=None,
inum=-1, format=None, machine=None):
# NOTE: Many counters require 2 samples to give accurate results,
# including "% Processor Time" (as by definition, at any instant, a
# thread's CPU usage is either 0 or 100). To read counters like this,
# you should copy this function, but keep the counter open, and call
# CollectQueryData() each time you need to know.
# See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp
# (dead link)
# My older explanation for this was that the "AddCounter" process
# forced the CPU to 100%, but the above makes more sense :)
import win32pdh
if format is None:
format = win32pdh.PDH_FMT_LONG
path = win32pdh.MakeCounterPath((machine, object, instance, None,
inum, counter))
hq = win32pdh.OpenQuery()
try:
hc = win32pdh.AddCounter(hq, path)
try:
win32pdh.CollectQueryData(hq)
type, val = win32pdh.GetFormattedCounterValue(hc, format)
return val
finally:
win32pdh.RemoveCounter(hc)
finally:
win32pdh.CloseQuery(hq)
def memusage(processName="python", instance=0):
# from win32pdhutil, part of the win32all package
import win32pdh
return GetPerformanceAttributes("Process", "Virtual Bytes",
processName, instance,
win32pdh.PDH_FMT_LONG, None)
elif sys.platform[:5] == 'linux':
def memusage(_proc_pid_stat=None):
"""
Return virtual memory size in bytes of the running python.
"""
_proc_pid_stat = _proc_pid_stat or f'/proc/{os.getpid()}/stat'
try:
with open(_proc_pid_stat) as f:
l = f.readline().split(' ')
return int(l[22])
except Exception:
return
else:
def memusage():
"""
Return memory usage of running python. [Not implemented]
"""
raise NotImplementedError
if sys.platform[:5] == 'linux':
def jiffies(_proc_pid_stat=None, _load_time=None):
"""
Return number of jiffies elapsed.
Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. See man 5 proc.
"""
_proc_pid_stat = _proc_pid_stat or f'/proc/{os.getpid()}/stat'
_load_time = _load_time or []
import time
if not _load_time:
_load_time.append(time.time())
try:
with open(_proc_pid_stat) as f:
l = f.readline().split(' ')
return int(l[13])
except Exception:
return int(100 * (time.time() - _load_time[0]))
else:
# os.getpid is not in all platforms available.
# Using time is safe but inaccurate, especially when process
# was suspended or sleeping.
def jiffies(_load_time=[]):
"""
Return number of jiffies elapsed.
Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. See man 5 proc.
"""
import time
if not _load_time:
_load_time.append(time.time())
return int(100 * (time.time() - _load_time[0]))
def build_err_msg(arrays, err_msg, header='Items are not equal:',
verbose=True, names=('ACTUAL', 'DESIRED'), precision=8):
msg = ['\n' + header]
err_msg = str(err_msg)
if err_msg:
if err_msg.find('\n') == -1 and len(err_msg) < 79 - len(header):
msg = [msg[0] + ' ' + err_msg]
else:
msg.append(err_msg)
if verbose:
for i, a in enumerate(arrays):
if isinstance(a, ndarray):
# precision argument is only needed if the objects are ndarrays
r_func = partial(array_repr, precision=precision)
else:
r_func = repr
try:
r = r_func(a)
except Exception as exc:
r = f'[repr failed for <{type(a).__name__}>: {exc}]'
if r.count('\n') > 3:
r = '\n'.join(r.splitlines()[:3])
r += '...'
msg.append(f' {names[i]}: {r}')
return '\n'.join(msg)
def assert_equal(actual, desired, err_msg='', verbose=True, *, strict=False):
"""
Raises an AssertionError if two objects are not equal.
Given two objects (scalars, lists, tuples, dictionaries or numpy arrays),
check that all elements of these objects are equal. An exception is raised
at the first conflicting values.
This function handles NaN comparisons as if NaN was a "normal" number.
That is, AssertionError is not raised if both objects have NaNs in the same
positions. This is in contrast to the IEEE standard on NaNs, which says
that NaN compared to anything must return False.
Parameters
----------
actual : array_like
The object to check.
desired : array_like
The expected object.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
strict : bool, optional
If True and either of the `actual` and `desired` arguments is an array,
raise an ``AssertionError`` when either the shape or the data type of
the arguments does not match. If neither argument is an array, this
parameter has no effect.
.. versionadded:: 2.0.0
Raises
------
AssertionError
If actual and desired are not equal.
See Also
--------
assert_allclose
assert_array_almost_equal_nulp,
assert_array_max_ulp,
Notes
-----
When one of `actual` and `desired` is a scalar and the other is array_like, the
function checks that each element of the array_like is equal to the scalar.
Note that empty arrays are therefore considered equal to scalars.
This behaviour can be disabled by setting ``strict==True``.
Examples
--------
>>> np.testing.assert_equal([4, 5], [4, 6])
Traceback (most recent call last):
...
AssertionError:
Items are not equal:
item=1
ACTUAL: 5
DESIRED: 6
The following comparison does not raise an exception. There are NaNs
in the inputs, but they are in the same positions.
>>> np.testing.assert_equal(np.array([1.0, 2.0, np.nan]), [1, 2, np.nan])
As mentioned in the Notes section, `assert_equal` has special
handling for scalars when one of the arguments is an array.
Here, the test checks that each value in `x` is 3:
>>> x = np.full((2, 5), fill_value=3)
>>> np.testing.assert_equal(x, 3)
Use `strict` to raise an AssertionError when comparing a scalar with an
array of a different shape:
>>> np.testing.assert_equal(x, 3, strict=True)
Traceback (most recent call last):
...
AssertionError:
Arrays are not equal
<BLANKLINE>
(shapes (2, 5), () mismatch)
ACTUAL: array([[3, 3, 3, 3, 3],
[3, 3, 3, 3, 3]])
DESIRED: array(3)
The `strict` parameter also ensures that the array data types match:
>>> x = np.array([2, 2, 2])
>>> y = np.array([2., 2., 2.], dtype=np.float32)
>>> np.testing.assert_equal(x, y, strict=True)
Traceback (most recent call last):
...
AssertionError:
Arrays are not equal
<BLANKLINE>
(dtypes int64, float32 mismatch)
ACTUAL: array([2, 2, 2])
DESIRED: array([2., 2., 2.], dtype=float32)
"""
__tracebackhide__ = True # Hide traceback for py.test
if isinstance(desired, dict):
if not isinstance(actual, dict):
raise AssertionError(repr(type(actual)))
assert_equal(len(actual), len(desired), err_msg, verbose)
for k in desired:
if k not in actual:
raise AssertionError(repr(k))
assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}',
verbose)
return
if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)):
assert_equal(len(actual), len(desired), err_msg, verbose)
for k in range(len(desired)):
assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}',
verbose)
return
from numpy import imag, iscomplexobj, real
from numpy._core import isscalar, ndarray, signbit
if isinstance(actual, ndarray) or isinstance(desired, ndarray):
return assert_array_equal(actual, desired, err_msg, verbose,
strict=strict)
msg = build_err_msg([actual, desired], err_msg, verbose=verbose)
# Handle complex numbers: separate into real/imag to handle
# nan/inf/negative zero correctly
# XXX: catch ValueError for subclasses of ndarray where iscomplex fail
try:
usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
except (ValueError, TypeError):
usecomplex = False
if usecomplex:
if iscomplexobj(actual):
actualr = real(actual)
actuali = imag(actual)
else:
actualr = actual
actuali = 0
if iscomplexobj(desired):
desiredr = real(desired)
desiredi = imag(desired)
else:
desiredr = desired
desiredi = 0
try:
assert_equal(actualr, desiredr)
assert_equal(actuali, desiredi)
except AssertionError:
raise AssertionError(msg)
# isscalar test to check cases such as [np.nan] != np.nan
if isscalar(desired) != isscalar(actual):
raise AssertionError(msg)
try:
isdesnat = isnat(desired)
isactnat = isnat(actual)
dtypes_match = (np.asarray(desired).dtype.type ==
np.asarray(actual).dtype.type)
if isdesnat and isactnat:
# If both are NaT (and have the same dtype -- datetime or
# timedelta) they are considered equal.
if dtypes_match:
return
else:
raise AssertionError(msg)
except (TypeError, ValueError, NotImplementedError):
pass
# Inf/nan/negative zero handling
try:
isdesnan = isnan(desired)
isactnan = isnan(actual)
if isdesnan and isactnan:
return # both nan, so equal
# handle signed zero specially for floats
array_actual = np.asarray(actual)
array_desired = np.asarray(desired)
if (array_actual.dtype.char in 'Mm' or
array_desired.dtype.char in 'Mm'):
# version 1.18
# until this version, isnan failed for datetime64 and timedelta64.
# Now it succeeds but comparison to scalar with a different type
# emits a DeprecationWarning.
# Avoid that by skipping the next check
raise NotImplementedError('cannot compare to a scalar '
'with a different type')
if desired == 0 and actual == 0:
if not signbit(desired) == signbit(actual):
raise AssertionError(msg)
except (TypeError, ValueError, NotImplementedError):
pass
try:
# Explicitly use __eq__ for comparison, gh-2552
if not (desired == actual):
raise AssertionError(msg)
except (DeprecationWarning, FutureWarning) as e:
# this handles the case when the two types are not even comparable
if 'elementwise == comparison' in e.args[0]:
raise AssertionError(msg)
else:
raise
def print_assert_equal(test_string, actual, desired):
"""
Test if two objects are equal, and print an error message if test fails.
The test is performed with ``actual == desired``.
Parameters
----------
test_string : str
The message supplied to AssertionError.
actual : object
The object to test for equality against `desired`.
desired : object
The expected result.
Examples
--------
>>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1])
>>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2])
Traceback (most recent call last):
...
AssertionError: Test XYZ of func xyz failed
ACTUAL:
[0, 1]
DESIRED:
[0, 2]
"""
__tracebackhide__ = True # Hide traceback for py.test
import pprint
if not (actual == desired):
msg = StringIO()
msg.write(test_string)
msg.write(' failed\nACTUAL: \n')
pprint.pprint(actual, msg)
msg.write('DESIRED: \n')
pprint.pprint(desired, msg)
raise AssertionError(msg.getvalue())
def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True):
"""
Raises an AssertionError if two items are not equal up to desired
precision.
.. note:: It is recommended to use one of `assert_allclose`,
`assert_array_almost_equal_nulp` or `assert_array_max_ulp`
instead of this function for more consistent floating point
comparisons.
The test verifies that the elements of `actual` and `desired` satisfy::
abs(desired-actual) < float64(1.5 * 10**(-decimal))
That is a looser test than originally documented, but agrees with what the
actual implementation in `assert_array_almost_equal` did up to rounding
vagaries. An exception is raised at conflicting values. For ndarrays this
delegates to assert_array_almost_equal
Parameters
----------
actual : array_like
The object to check.
desired : array_like
The expected object.
decimal : int, optional
Desired precision, default is 7.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_allclose: Compare two array_like objects for equality with desired
relative and/or absolute precision.
assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
Examples
--------
>>> from numpy.testing import assert_almost_equal
>>> assert_almost_equal(2.3333333333333, 2.33333334)
>>> assert_almost_equal(2.3333333333333, 2.33333334, decimal=10)
Traceback (most recent call last):
...
AssertionError:
Arrays are not almost equal to 10 decimals
ACTUAL: 2.3333333333333
DESIRED: 2.33333334
>>> assert_almost_equal(np.array([1.0,2.3333333333333]),
... np.array([1.0,2.33333334]), decimal=9)
Traceback (most recent call last):
...
AssertionError:
Arrays are not almost equal to 9 decimals
<BLANKLINE>
Mismatched elements: 1 / 2 (50%)
Mismatch at index:
[1]: 2.3333333333333 (ACTUAL), 2.33333334 (DESIRED)
Max absolute difference among violations: 6.66669964e-09
Max relative difference among violations: 2.85715698e-09
ACTUAL: array([1. , 2.333333333])
DESIRED: array([1. , 2.33333334])
"""
__tracebackhide__ = True # Hide traceback for py.test
from numpy import imag, iscomplexobj, real
from numpy._core import ndarray
# Handle complex numbers: separate into real/imag to handle
# nan/inf/negative zero correctly
# XXX: catch ValueError for subclasses of ndarray where iscomplex fail
try:
usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
except ValueError:
usecomplex = False
def _build_err_msg():
header = ('Arrays are not almost equal to %d decimals' % decimal)
return build_err_msg([actual, desired], err_msg, verbose=verbose,
header=header)
if usecomplex:
if iscomplexobj(actual):
actualr = real(actual)
actuali = imag(actual)
else:
actualr = actual
actuali = 0
if iscomplexobj(desired):
desiredr = real(desired)
desiredi = imag(desired)
else:
desiredr = desired
desiredi = 0
try:
assert_almost_equal(actualr, desiredr, decimal=decimal)
assert_almost_equal(actuali, desiredi, decimal=decimal)
except AssertionError:
raise AssertionError(_build_err_msg())
if isinstance(actual, (ndarray, tuple, list)) \
or isinstance(desired, (ndarray, tuple, list)):
return assert_array_almost_equal(actual, desired, decimal, err_msg)
try:
# If one of desired/actual is not finite, handle it specially here:
# check that both are nan if any is a nan, and test for equality
# otherwise
if not (isfinite(desired) and isfinite(actual)):
if isnan(desired) or isnan(actual):
if not (isnan(desired) and isnan(actual)):
raise AssertionError(_build_err_msg())
elif not desired == actual:
raise AssertionError(_build_err_msg())
return
except (NotImplementedError, TypeError):
pass
if abs(desired - actual) >= np.float64(1.5 * 10.0**(-decimal)):
raise AssertionError(_build_err_msg())
def assert_approx_equal(actual, desired, significant=7, err_msg='',
verbose=True):
"""
Raises an AssertionError if two items are not equal up to significant
digits.
.. note:: It is recommended to use one of `assert_allclose`,
`assert_array_almost_equal_nulp` or `assert_array_max_ulp`
instead of this function for more consistent floating point
comparisons.
Given two numbers, check that they are approximately equal.
Approximately equal is defined as the number of significant digits
that agree.
Parameters
----------
actual : scalar
The object to check.
desired : scalar
The expected object.
significant : int, optional
Desired precision, default is 7.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_allclose: Compare two array_like objects for equality with desired
relative and/or absolute precision.
assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
Examples
--------
>>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20)
>>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20,
... significant=8)
>>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20,
... significant=8)
Traceback (most recent call last):
...
AssertionError:
Items are not equal to 8 significant digits:
ACTUAL: 1.234567e-21
DESIRED: 1.2345672e-21
the evaluated condition that raises the exception is
>>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1)
True
"""
__tracebackhide__ = True # Hide traceback for py.test
import numpy as np
(actual, desired) = map(float, (actual, desired))
if desired == actual:
return
# Normalized the numbers to be in range (-10.0,10.0)
# scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))
with np.errstate(invalid='ignore'):
scale = 0.5 * (np.abs(desired) + np.abs(actual))
scale = np.power(10, np.floor(np.log10(scale)))
try:
sc_desired = desired / scale
except ZeroDivisionError:
sc_desired = 0.0
try:
sc_actual = actual / scale
except ZeroDivisionError:
sc_actual = 0.0
msg = build_err_msg(
[actual, desired], err_msg,
header='Items are not equal to %d significant digits:' % significant,
verbose=verbose)
try:
# If one of desired/actual is not finite, handle it specially here:
# check that both are nan if any is a nan, and test for equality
# otherwise
if not (isfinite(desired) and isfinite(actual)):
if isnan(desired) or isnan(actual):
if not (isnan(desired) and isnan(actual)):
raise AssertionError(msg)
elif not desired == actual:
raise AssertionError(msg)
return
except (TypeError, NotImplementedError):
pass
if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant - 1)):
raise AssertionError(msg)
def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='',
precision=6, equal_nan=True, equal_inf=True,
*, strict=False, names=('ACTUAL', 'DESIRED')):
__tracebackhide__ = True # Hide traceback for py.test
from numpy._core import all, array2string, errstate, inf, isnan, max, object_
x = np.asanyarray(x)
y = np.asanyarray(y)
# original array for output formatting
ox, oy = x, y
def isnumber(x):
return x.dtype.char in '?bhilqpBHILQPefdgFDG'
def istime(x):
return x.dtype.char in "Mm"
def isvstring(x):
return x.dtype.char == "T"
def robust_any_difference(x, y):
# We include work-arounds here to handle three types of slightly
# pathological ndarray subclasses:
# (1) all() on fully masked arrays returns np.ma.masked, so we use != True
# (np.ma.masked != True evaluates as np.ma.masked, which is falsy).
# (2) __eq__ on some ndarray subclasses returns Python booleans
# instead of element-wise comparisons, so we cast to np.bool() in
# that case (or in case __eq__ returns some other value with no
# all() method).
# (3) subclasses with bare-bones __array_function__ implementations may
# not implement np.all(), so favor using the .all() method
# We are not committed to supporting cases (2) and (3), but it's nice to
# support them if possible.
result = x == y
if not hasattr(result, "all") or not callable(result.all):
result = np.bool(result)
return result.all() != True
def func_assert_same_pos(x, y, func=isnan, hasval='nan'):
"""Handling nan/inf.
Combine results of running func on x and y, checking that they are True
at the same locations.
"""
__tracebackhide__ = True # Hide traceback for py.test
x_id = func(x)
y_id = func(y)
if robust_any_difference(x_id, y_id):
msg = build_err_msg(
[x, y],
err_msg + '\n%s location mismatch:'
% (hasval), verbose=verbose, header=header,
names=names,
precision=precision)
raise AssertionError(msg)
# If there is a scalar, then here we know the array has the same
# flag as it everywhere, so we should return the scalar flag.
# np.ma.masked is also handled and converted to np.False_ (even if the other
# array has nans/infs etc.; that's OK given the handling later of fully-masked
# results).
if isinstance(x_id, bool) or x_id.ndim == 0:
return np.bool(x_id)
elif isinstance(y_id, bool) or y_id.ndim == 0:
return np.bool(y_id)
else:
return y_id
def assert_same_inf_values(x, y, infs_mask):
"""
Verify all inf values match in the two arrays
"""
__tracebackhide__ = True # Hide traceback for py.test
if not infs_mask.any():
return
if x.ndim > 0 and y.ndim > 0:
x = x[infs_mask]
y = y[infs_mask]
else:
assert infs_mask.all()
if robust_any_difference(x, y):
msg = build_err_msg(
[x, y],
err_msg + '\ninf values mismatch:',
verbose=verbose, header=header,
names=names,
precision=precision)
raise AssertionError(msg)
try:
if strict:
cond = x.shape == y.shape and x.dtype == y.dtype
else:
cond = (x.shape == () or y.shape == ()) or x.shape == y.shape
if not cond:
if x.shape != y.shape:
reason = f'\n(shapes {x.shape}, {y.shape} mismatch)'
else:
reason = f'\n(dtypes {x.dtype}, {y.dtype} mismatch)'
msg = build_err_msg([x, y],
err_msg
+ reason,
verbose=verbose, header=header,
names=names,
precision=precision)
raise AssertionError(msg)
flagged = np.bool(False)
if isnumber(x) and isnumber(y):
if equal_nan:
flagged = func_assert_same_pos(x, y, func=isnan, hasval='nan')
if equal_inf:
# If equal_nan=True, skip comparing nans below for equality if they are
# also infs (e.g. inf+nanj) since that would always fail.
isinf_func = lambda xy: np.logical_and(np.isinf(xy), np.invert(flagged))
infs_mask = func_assert_same_pos(
x, y,
func=isinf_func,
hasval='inf')
assert_same_inf_values(x, y, infs_mask)
flagged |= infs_mask
elif istime(x) and istime(y):
# If one is datetime64 and the other timedelta64 there is no point
if equal_nan and x.dtype.type == y.dtype.type:
flagged = func_assert_same_pos(x, y, func=isnat, hasval="NaT")
elif isvstring(x) and isvstring(y):
dt = x.dtype
if equal_nan and dt == y.dtype and hasattr(dt, 'na_object'):
is_nan = (isinstance(dt.na_object, float) and
np.isnan(dt.na_object))
bool_errors = 0
try:
bool(dt.na_object)
except TypeError:
bool_errors = 1
if is_nan or bool_errors:
# nan-like NA object
flagged = func_assert_same_pos(
x, y, func=isnan, hasval=x.dtype.na_object)
if flagged.ndim > 0:
x, y = x[~flagged], y[~flagged]
# Only do the comparison if actual values are left
if x.size == 0:
return
elif flagged:
# no sense doing comparison if everything is flagged.
return
val = comparison(x, y)
invalids = np.logical_not(val)
if isinstance(val, bool):
cond = val
reduced = array([val])
else:
reduced = val.ravel()
cond = reduced.all()
# The below comparison is a hack to ensure that fully masked
# results, for which val.ravel().all() returns np.ma.masked,
# do not trigger a failure (np.ma.masked != True evaluates as
# np.ma.masked, which is falsy).
if cond != True:
n_mismatch = reduced.size - reduced.sum(dtype=intp)
n_elements = flagged.size if flagged.ndim != 0 else reduced.size
percent_mismatch = 100 * n_mismatch / n_elements
remarks = [f'Mismatched elements: {n_mismatch} / {n_elements} '
f'({percent_mismatch:.3g}%)']
if invalids.ndim != 0:
if flagged.ndim > 0:
positions = np.argwhere(np.asarray(~flagged))[invalids]
else:
positions = np.argwhere(np.asarray(invalids))
s = "\n".join(
[
f" {p.tolist()}: {ox if ox.ndim == 0 else ox[tuple(p)]} "
f"({names[0]}), {oy if oy.ndim == 0 else oy[tuple(p)]} "
f"({names[1]})"
for p in positions[:5]
]
)
if len(positions) == 1:
remarks.append(
f"Mismatch at index:\n{s}"
)
elif len(positions) <= 5:
remarks.append(
f"Mismatch at indices:\n{s}"
)
else:
remarks.append(
f"First 5 mismatches are at indices:\n{s}"
)
with errstate(all='ignore'):
# ignore errors for non-numeric types
with contextlib.suppress(TypeError):
error = abs(x - y)
if np.issubdtype(x.dtype, np.unsignedinteger):
error2 = abs(y - x)
np.minimum(error, error2, out=error)
reduced_error = error[invalids]
max_abs_error = max(reduced_error)
if getattr(error, 'dtype', object_) == object_:
remarks.append(
'Max absolute difference among violations: '
+ str(max_abs_error))
else:
remarks.append(
'Max absolute difference among violations: '
+ array2string(max_abs_error))
# note: this definition of relative error matches that one
# used by assert_allclose (found in np.isclose)
# Filter values where the divisor would be zero
nonzero = np.bool(y != 0)
nonzero_and_invalid = np.logical_and(invalids, nonzero)
if all(~nonzero_and_invalid):
max_rel_error = array(inf)
else:
nonzero_invalid_error = error[nonzero_and_invalid]
broadcasted_y = np.broadcast_to(y, error.shape)
nonzero_invalid_y = broadcasted_y[nonzero_and_invalid]
max_rel_error = max(nonzero_invalid_error
/ abs(nonzero_invalid_y))
if getattr(error, 'dtype', object_) == object_:
remarks.append(
'Max relative difference among violations: '
+ str(max_rel_error))
else:
remarks.append(
'Max relative difference among violations: '
+ array2string(max_rel_error))
err_msg = str(err_msg)
err_msg += '\n' + '\n'.join(remarks)
msg = build_err_msg([ox, oy], err_msg,
verbose=verbose, header=header,
names=names,
precision=precision)
raise AssertionError(msg)
except ValueError:
import traceback
efmt = traceback.format_exc()
header = f'error during assertion:\n\n{efmt}\n\n{header}'
msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header,
names=names, precision=precision)
raise ValueError(msg)
def assert_array_equal(actual, desired, err_msg='', verbose=True, *,
strict=False):
"""
Raises an AssertionError if two array_like objects are not equal.
Given two array_like objects, check that the shape is equal and all
elements of these objects are equal (but see the Notes for the special
handling of a scalar). An exception is raised at shape mismatch or
conflicting values. In contrast to the standard usage in numpy, NaNs
are compared like numbers, no assertion is raised if both objects have
NaNs in the same positions.
The usual caution for verifying equality with floating point numbers is
advised.
.. note:: When either `actual` or `desired` is already an instance of
`numpy.ndarray` and `desired` is not a ``dict``, the behavior of
``assert_equal(actual, desired)`` is identical to the behavior of this
function. Otherwise, this function performs `np.asanyarray` on the
inputs before comparison, whereas `assert_equal` defines special
comparison rules for common Python types. For example, only
`assert_equal` can be used to compare nested Python lists. In new code,
consider using only `assert_equal`, explicitly converting either
`actual` or `desired` to arrays if the behavior of `assert_array_equal`
is desired.
Parameters
----------
actual : array_like
The actual object to check.
desired : array_like
The desired, expected object.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
strict : bool, optional
If True, raise an AssertionError when either the shape or the data
type of the array_like objects does not match. The special
handling for scalars mentioned in the Notes section is disabled.
.. versionadded:: 1.24.0
Raises
------
AssertionError
If actual and desired objects are not equal.
See Also
--------
assert_allclose: Compare two array_like objects for equality with desired
relative and/or absolute precision.
assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
Notes
-----
When one of `actual` and `desired` is a scalar and the other is array_like, the
function checks that each element of the array_like is equal to the scalar.
Note that empty arrays are therefore considered equal to scalars.
This behaviour can be disabled by setting ``strict==True``.
Examples
--------
The first assert does not raise an exception:
>>> np.testing.assert_array_equal([1.0,2.33333,np.nan],
... [np.exp(0),2.33333, np.nan])
Assert fails with numerical imprecision with floats:
>>> np.testing.assert_array_equal([1.0,np.pi,np.nan],
... [1, np.sqrt(np.pi)**2, np.nan])
Traceback (most recent call last):
...
AssertionError:
Arrays are not equal
<BLANKLINE>
Mismatched elements: 1 / 3 (33.3%)
Mismatch at index:
[1]: 3.141592653589793 (ACTUAL), 3.1415926535897927 (DESIRED)
Max absolute difference among violations: 4.4408921e-16
Max relative difference among violations: 1.41357986e-16
ACTUAL: array([1. , 3.141593, nan])
DESIRED: array([1. , 3.141593, nan])
Use `assert_allclose` or one of the nulp (number of floating point values)
functions for these cases instead:
>>> np.testing.assert_allclose([1.0,np.pi,np.nan],
... [1, np.sqrt(np.pi)**2, np.nan],
... rtol=1e-10, atol=0)
As mentioned in the Notes section, `assert_array_equal` has special
handling for scalars. Here the test checks that each value in `x` is 3:
>>> x = np.full((2, 5), fill_value=3)
>>> np.testing.assert_array_equal(x, 3)
Use `strict` to raise an AssertionError when comparing a scalar with an
array:
>>> np.testing.assert_array_equal(x, 3, strict=True)
Traceback (most recent call last):
...
AssertionError:
Arrays are not equal
<BLANKLINE>
(shapes (2, 5), () mismatch)
ACTUAL: array([[3, 3, 3, 3, 3],
[3, 3, 3, 3, 3]])
DESIRED: array(3)
The `strict` parameter also ensures that the array data types match:
>>> x = np.array([2, 2, 2])
>>> y = np.array([2., 2., 2.], dtype=np.float32)
>>> np.testing.assert_array_equal(x, y, strict=True)
Traceback (most recent call last):
...
AssertionError:
Arrays are not equal
<BLANKLINE>
(dtypes int64, float32 mismatch)
ACTUAL: array([2, 2, 2])
DESIRED: array([2., 2., 2.], dtype=float32)
"""
__tracebackhide__ = True # Hide traceback for py.test
assert_array_compare(operator.__eq__, actual, desired, err_msg=err_msg,
verbose=verbose, header='Arrays are not equal',
strict=strict)
def assert_array_almost_equal(actual, desired, decimal=6, err_msg='',
verbose=True):
"""
Raises an AssertionError if two objects are not equal up to desired
precision.
.. note:: It is recommended to use one of `assert_allclose`,
`assert_array_almost_equal_nulp` or `assert_array_max_ulp`
instead of this function for more consistent floating point
comparisons.
The test verifies identical shapes and that the elements of ``actual`` and
``desired`` satisfy::
abs(desired-actual) < 1.5 * 10**(-decimal)
That is a looser test than originally documented, but agrees with what the
actual implementation did up to rounding vagaries. An exception is raised
at shape mismatch or conflicting values. In contrast to the standard usage
in numpy, NaNs are compared like numbers, no assertion is raised if both
objects have NaNs in the same positions.
Parameters
----------
actual : array_like
The actual object to check.
desired : array_like
The desired, expected object.
decimal : int, optional
Desired precision, default is 6.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_allclose: Compare two array_like objects for equality with desired
relative and/or absolute precision.
assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
Examples
--------
the first assert does not raise an exception
>>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan],
... [1.0,2.333,np.nan])
>>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
... [1.0,2.33339,np.nan], decimal=5)
Traceback (most recent call last):
...
AssertionError:
Arrays are not almost equal to 5 decimals
<BLANKLINE>
Mismatched elements: 1 / 3 (33.3%)
Mismatch at index:
[1]: 2.33333 (ACTUAL), 2.33339 (DESIRED)
Max absolute difference among violations: 6.e-05
Max relative difference among violations: 2.57136612e-05
ACTUAL: array([1. , 2.33333, nan])
DESIRED: array([1. , 2.33339, nan])
>>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
... [1.0,2.33333, 5], decimal=5)
Traceback (most recent call last):
...
AssertionError:
Arrays are not almost equal to 5 decimals
<BLANKLINE>
nan location mismatch:
ACTUAL: array([1. , 2.33333, nan])
DESIRED: array([1. , 2.33333, 5. ])
"""
__tracebackhide__ = True # Hide traceback for py.test
from numpy._core import number, result_type
from numpy._core.numerictypes import issubdtype
def compare(x, y):
# make sure y is an inexact type to avoid abs(MIN_INT); will cause
# casting of x later.
dtype = result_type(y, 1.)
y = np.asanyarray(y, dtype)
z = abs(x - y)
if not issubdtype(z.dtype, number):
z = z.astype(np.float64) # handle object arrays
return z < 1.5 * 10.0**(-decimal)
assert_array_compare(compare, actual, desired, err_msg=err_msg,
verbose=verbose,
header=('Arrays are not almost equal to %d decimals' % decimal),
precision=decimal)
def assert_array_less(x, y, err_msg='', verbose=True, *, strict=False):
"""
Raises an AssertionError if two array_like objects are not ordered by less
than.
Given two array_like objects `x` and `y`, check that the shape is equal and
all elements of `x` are strictly less than the corresponding elements of
`y` (but see the Notes for the special handling of a scalar). An exception
is raised at shape mismatch or values that are not correctly ordered. In
contrast to the standard usage in NumPy, no assertion is raised if both
objects have NaNs in the same positions.
Parameters
----------
x : array_like
The smaller object to check.
y : array_like
The larger object to compare.
err_msg : string
The error message to be printed in case of failure.
verbose : bool
If True, the conflicting values are appended to the error message.
strict : bool, optional
If True, raise an AssertionError when either the shape or the data
type of the array_like objects does not match. The special
handling for scalars mentioned in the Notes section is disabled.
.. versionadded:: 2.0.0
Raises
------
AssertionError
If x is not strictly smaller than y, element-wise.
See Also
--------
assert_array_equal: tests objects for equality
assert_array_almost_equal: test objects for equality up to precision
Notes
-----
When one of `x` and `y` is a scalar and the other is array_like, the
function performs the comparison as though the scalar were broadcasted
to the shape of the array. This behaviour can be disabled with the `strict`
parameter.
Examples
--------
The following assertion passes because each finite element of `x` is
strictly less than the corresponding element of `y`, and the NaNs are in
corresponding locations.
>>> x = [1.0, 1.0, np.nan]
>>> y = [1.1, 2.0, np.nan]
>>> np.testing.assert_array_less(x, y)
The following assertion fails because the zeroth element of `x` is no
longer strictly less than the zeroth element of `y`.
>>> y[0] = 1
>>> np.testing.assert_array_less(x, y)
Traceback (most recent call last):
...
AssertionError:
Arrays are not strictly ordered `x < y`
<BLANKLINE>
Mismatched elements: 1 / 3 (33.3%)
Mismatch at index:
[0]: 1.0 (x), 1.0 (y)
Max absolute difference among violations: 0.
Max relative difference among violations: 0.
x: array([ 1., 1., nan])
y: array([ 1., 2., nan])
Here, `y` is a scalar, so each element of `x` is compared to `y`, and
the assertion passes.
>>> x = [1.0, 4.0]
>>> y = 5.0
>>> np.testing.assert_array_less(x, y)
However, with ``strict=True``, the assertion will fail because the shapes
do not match.
>>> np.testing.assert_array_less(x, y, strict=True)
Traceback (most recent call last):
...
AssertionError:
Arrays are not strictly ordered `x < y`
<BLANKLINE>
(shapes (2,), () mismatch)
x: array([1., 4.])
y: array(5.)
With ``strict=True``, the assertion also fails if the dtypes of the two
arrays do not match.
>>> y = [5, 5]
>>> np.testing.assert_array_less(x, y, strict=True)
Traceback (most recent call last):
...
AssertionError:
Arrays are not strictly ordered `x < y`
<BLANKLINE>
(dtypes float64, int64 mismatch)
x: array([1., 4.])
y: array([5, 5])
"""
__tracebackhide__ = True # Hide traceback for py.test
assert_array_compare(operator.__lt__, x, y, err_msg=err_msg,
verbose=verbose,
header='Arrays are not strictly ordered `x < y`',
equal_inf=False,
strict=strict,
names=('x', 'y'))
def runstring(astr, dict):
exec(astr, dict)
def assert_string_equal(actual, desired):
"""
Test if two strings are equal.
If the given strings are equal, `assert_string_equal` does nothing.
If they are not equal, an AssertionError is raised, and the diff
between the strings is shown.
Parameters
----------
actual : str
The string to test for equality against the expected string.
desired : str
The expected string.
Examples
--------
>>> np.testing.assert_string_equal('abc', 'abc')
>>> np.testing.assert_string_equal('abc', 'abcd')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
...
AssertionError: Differences in strings:
- abc+ abcd? +
"""
# delay import of difflib to reduce startup time
__tracebackhide__ = True # Hide traceback for py.test
import difflib
if not isinstance(actual, str):
raise AssertionError(repr(type(actual)))
if not isinstance(desired, str):
raise AssertionError(repr(type(desired)))
if desired == actual:
return
diff = list(difflib.Differ().compare(actual.splitlines(True),
desired.splitlines(True)))
diff_list = []
while diff:
d1 = diff.pop(0)
if d1.startswith(' '):
continue
if d1.startswith('- '):
l = [d1]
d2 = diff.pop(0)
if d2.startswith('? '):
l.append(d2)
d2 = diff.pop(0)
if not d2.startswith('+ '):
raise AssertionError(repr(d2))
l.append(d2)
if diff:
d3 = diff.pop(0)
if d3.startswith('? '):
l.append(d3)
else:
diff.insert(0, d3)
if d2[2:] == d1[2:]:
continue
diff_list.extend(l)
continue
raise AssertionError(repr(d1))
if not diff_list:
return
msg = f"Differences in strings:\n{''.join(diff_list).rstrip()}"
if actual != desired:
raise AssertionError(msg)
def rundocs(filename=None, raise_on_error=True):
"""
Run doctests found in the given file.
By default `rundocs` raises an AssertionError on failure.
Parameters
----------
filename : str
The path to the file for which the doctests are run.
raise_on_error : bool
Whether to raise an AssertionError when a doctest fails. Default is
True.
Notes
-----
The doctests can be run by the user/developer by adding the ``doctests``
argument to the ``test()`` call. For example, to run all tests (including
doctests) for ``numpy.lib``:
>>> np.lib.test(doctests=True) # doctest: +SKIP
"""
import doctest
from numpy.distutils.misc_util import exec_mod_from_location
if filename is None:
f = sys._getframe(1)
filename = f.f_globals['__file__']
name = os.path.splitext(os.path.basename(filename))[0]
m = exec_mod_from_location(name, filename)
tests = doctest.DocTestFinder().find(m)
runner = doctest.DocTestRunner(verbose=False)
msg = []
if raise_on_error:
out = msg.append
else:
out = None
for test in tests:
runner.run(test, out=out)
if runner.failures > 0 and raise_on_error:
raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg))
def check_support_sve(__cache=[]):
"""
gh-22982
"""
if __cache:
return __cache[0]
import subprocess
cmd = 'lscpu'
try:
output = subprocess.run(cmd, capture_output=True, text=True)
result = 'sve' in output.stdout
except (OSError, subprocess.SubprocessError):
result = False
__cache.append(result)
return __cache[0]
#
# assert_raises and assert_raises_regex are taken from unittest.
#
import unittest
|
KnownFailureException
|
python
|
openai__openai-python
|
src/openai/types/beta/realtime/input_audio_buffer_commit_event.py
|
{
"start": 233,
"end": 493
}
|
class ____(BaseModel):
type: Literal["input_audio_buffer.commit"]
"""The event type, must be `input_audio_buffer.commit`."""
event_id: Optional[str] = None
"""Optional client-generated ID used to identify this event."""
|
InputAudioBufferCommitEvent
|
python
|
huggingface__transformers
|
src/transformers/models/exaone4/modeling_exaone4.py
|
{
"start": 2239,
"end": 2966
}
|
class ____(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Exaone4RMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
|
Exaone4RMSNorm
|
python
|
donnemartin__interactive-coding-challenges
|
math_probability/sum_two/test_sum_two.py
|
{
"start": 18,
"end": 479
}
|
class ____(unittest.TestCase):
def test_sum_two(self):
solution = Solution()
self.assertRaises(TypeError, solution.sum_two, None)
self.assertEqual(solution.sum_two(5, 7), 12)
self.assertEqual(solution.sum_two(-5, -7), -12)
self.assertEqual(solution.sum_two(5, -7), -2)
print('Success: test_sum_two')
def main():
test = TestSumTwo()
test.test_sum_two()
if __name__ == '__main__':
main()
|
TestSumTwo
|
python
|
apache__airflow
|
airflow-ctl/src/airflowctl/api/datamodels/generated.py
|
{
"start": 55667,
"end": 56283
}
|
class ____(BaseModel):
"""
Request body for Clear Task Instances endpoint.
"""
model_config = ConfigDict(
extra="forbid",
)
new_state: TaskInstanceState | None = None
note: Annotated[Note | None, Field(title="Note")] = None
include_upstream: Annotated[bool | None, Field(title="Include Upstream")] = False
include_downstream: Annotated[bool | None, Field(title="Include Downstream")] = False
include_future: Annotated[bool | None, Field(title="Include Future")] = False
include_past: Annotated[bool | None, Field(title="Include Past")] = False
|
PatchTaskInstanceBody
|
python
|
networkx__networkx
|
networkx/classes/tests/test_reportviews.py
|
{
"start": 36337,
"end": 36620
}
|
class ____(TestMultiDegreeView):
GRAPH = nx.MultiDiGraph
dview = nx.reportviews.DiMultiDegreeView
def test_repr(self):
dv = self.G.degree()
rep = "DiMultiDegreeView({0: 1, 1: 4, 2: 2, 3: 4, 4: 2, 5: 1})"
assert repr(dv) == rep
|
TestDiMultiDegreeView
|
python
|
gevent__gevent
|
src/greentest/3.14/test_urllib2.py
|
{
"start": 13002,
"end": 14231
}
|
class ____:
def __init__(self):
self.level = 0
self.req_headers = []
self.data = None
self.raise_on_endheaders = False
self.sock = None
self._tunnel_headers = {}
def __call__(self, host, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.host = host
self.timeout = timeout
return self
def set_debuglevel(self, level):
self.level = level
def set_tunnel(self, host, port=None, headers=None):
self._tunnel_host = host
self._tunnel_port = port
if headers:
self._tunnel_headers = headers
else:
self._tunnel_headers.clear()
def request(self, method, url, body=None, headers=None, *,
encode_chunked=False):
self.method = method
self.selector = url
if headers is not None:
self.req_headers += headers.items()
self.req_headers.sort()
if body:
self.data = body
self.encode_chunked = encode_chunked
if self.raise_on_endheaders:
raise OSError()
def getresponse(self):
return MockHTTPResponse(MockFile(), {}, 200, "OK")
def close(self):
pass
|
MockHTTPClass
|
python
|
Pylons__pyramid
|
src/pyramid/testing.py
|
{
"start": 805,
"end": 1019
}
|
class ____:
__parent__ = None
__name__ = None
def __init__(self, request):
if 'bfg.routes.matchdict' in request:
self.__dict__.update(request['bfg.routes.matchdict'])
|
DummyRootFactory
|
python
|
doocs__leetcode
|
solution/3700-3799/3745.Maximize Expression of Three Elements/Solution.py
|
{
"start": 0,
"end": 307
}
|
class ____:
def maximizeExpressionOfThree(self, nums: List[int]) -> int:
a = b = -inf
c = inf
for x in nums:
if x < c:
c = x
if x >= a:
a, b = x, a
elif x > b:
b = x
return a + b - c
|
Solution
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/dataplex.py
|
{
"start": 39491,
"end": 46999
}
|
class ____(GoogleCloudBaseOperator):
"""
Runs an on-demand execution of a DataScan.
:param project_id: Required. The ID of the Google Cloud project that the lake belongs to.
:param region: Required. The ID of the Google Cloud region that the lake belongs to.
:param data_scan_id: Required. Data Quality scan identifier.
:param api_version: The version of the api that will be requested for example 'v1'.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param asynchronous: Flag informing that the Dataplex job should be run asynchronously.
This is useful for submitting long-running jobs and
waiting on them asynchronously using the DataplexDataQualityJobStatusSensor
:param fail_on_dq_failure: If set to true and not all Data Quality scan rules have been passed,
an exception is thrown. If set to false and not all Data Quality scan rules have been passed,
execution will finish with success.
:param result_timeout: Value in seconds for which operator will wait for the Data Quality scan result
when the flag `asynchronous = False`.
Throws exception if there is no result found after specified amount of seconds.
:param polling_interval_seconds: time in seconds between polling for job completion.
The value is considered only when running in deferrable mode. Must be greater than 0.
:param deferrable: Run operator in the deferrable mode.
:return: Dataplex Data Quality scan job id.
"""
template_fields = ("project_id", "data_scan_id", "impersonation_chain")
def __init__(
self,
project_id: str,
region: str,
data_scan_id: str,
api_version: str = "v1",
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
asynchronous: bool = False,
fail_on_dq_failure: bool = False,
result_timeout: float = 60.0 * 10,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
polling_interval_seconds: int = 10,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.data_scan_id = data_scan_id
self.api_version = api_version
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.asynchronous = asynchronous
self.fail_on_dq_failure = fail_on_dq_failure
self.result_timeout = result_timeout
self.deferrable = deferrable
self.polling_interval_seconds = polling_interval_seconds
def execute(self, context: Context) -> str:
hook = DataplexHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
result = hook.run_data_scan(
project_id=self.project_id,
region=self.region,
data_scan_id=self.data_scan_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
job_id = result.job.name.split("/")[-1]
if self.deferrable:
if self.asynchronous:
raise AirflowException(
"Both asynchronous and deferrable parameters were passed. Please, provide only one."
)
self.defer(
trigger=DataplexDataQualityJobTrigger(
job_id=job_id,
data_scan_id=self.data_scan_id,
project_id=self.project_id,
region=self.region,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
polling_interval_seconds=self.polling_interval_seconds,
),
method_name="execute_complete",
)
if not self.asynchronous:
job = hook.wait_for_data_scan_job(
job_id=job_id,
data_scan_id=self.data_scan_id,
project_id=self.project_id,
region=self.region,
result_timeout=self.result_timeout,
)
if job.state == DataScanJob.State.FAILED:
raise AirflowException(f"Data Quality job failed: {job_id}")
if job.state == DataScanJob.State.SUCCEEDED:
if not job.data_quality_result.passed:
if self.fail_on_dq_failure:
raise AirflowDataQualityScanException(
f"Data Quality job {job_id} execution failed due to failure of its scanning "
f"rules: {self.data_scan_id}"
)
else:
self.log.info("Data Quality job executed successfully.")
else:
self.log.info("Data Quality job execution returned status: %s", job.status)
return job_id
def execute_complete(self, context, event=None) -> None:
"""
Act as a callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was
successful.
"""
job_state = event["job_state"]
job_id = event["job_id"]
if job_state == DataScanJob.State.FAILED.name: # type: ignore
raise AirflowException(f"Job failed:\n{job_id}")
if job_state == DataScanJob.State.CANCELLED.name: # type: ignore
raise AirflowException(f"Job was cancelled:\n{job_id}")
if job_state == DataScanJob.State.SUCCEEDED.name: # type: ignore
job = event["job"]
if not job["data_quality_result"]["passed"]:
if self.fail_on_dq_failure:
raise AirflowDataQualityScanException(
f"Data Quality job {job_id} execution failed due to failure of its scanning "
f"rules: {self.data_scan_id}"
)
else:
self.log.info("Data Quality job executed successfully.")
return job_id
|
DataplexRunDataQualityScanOperator
|
python
|
python-openxml__python-docx
|
src/docx/oxml/table.py
|
{
"start": 30780,
"end": 33094
}
|
class ____(BaseOxmlElement):
"""``<w:trPr>`` element, defining table row properties."""
get_or_add_trHeight: Callable[[], CT_Height]
_tag_seq = (
"w:cnfStyle",
"w:divId",
"w:gridBefore",
"w:gridAfter",
"w:wBefore",
"w:wAfter",
"w:cantSplit",
"w:trHeight",
"w:tblHeader",
"w:tblCellSpacing",
"w:jc",
"w:hidden",
"w:ins",
"w:del",
"w:trPrChange",
)
gridAfter: CT_DecimalNumber | None = ZeroOrOne( # pyright: ignore[reportAssignmentType]
"w:gridAfter", successors=_tag_seq[4:]
)
gridBefore: CT_DecimalNumber | None = ZeroOrOne( # pyright: ignore[reportAssignmentType]
"w:gridBefore", successors=_tag_seq[3:]
)
trHeight: CT_Height | None = ZeroOrOne( # pyright: ignore[reportAssignmentType]
"w:trHeight", successors=_tag_seq[8:]
)
del _tag_seq
@property
def grid_after(self) -> int:
"""The number of unpopulated layout-grid cells at the end of this row."""
gridAfter = self.gridAfter
return 0 if gridAfter is None else gridAfter.val
@property
def grid_before(self) -> int:
"""The number of unpopulated layout-grid cells at the start of this row."""
gridBefore = self.gridBefore
return 0 if gridBefore is None else gridBefore.val
@property
def trHeight_hRule(self) -> WD_ROW_HEIGHT_RULE | None:
"""Return the value of `w:trHeight@w:hRule`, or |None| if not present."""
trHeight = self.trHeight
return None if trHeight is None else trHeight.hRule
@trHeight_hRule.setter
def trHeight_hRule(self, value: WD_ROW_HEIGHT_RULE | None):
if value is None and self.trHeight is None:
return
trHeight = self.get_or_add_trHeight()
trHeight.hRule = value
@property
def trHeight_val(self):
"""Return the value of `w:trHeight@w:val`, or |None| if not present."""
trHeight = self.trHeight
return None if trHeight is None else trHeight.val
@trHeight_val.setter
def trHeight_val(self, value: Length | None):
if value is None and self.trHeight is None:
return
trHeight = self.get_or_add_trHeight()
trHeight.val = value
|
CT_TrPr
|
python
|
walkccc__LeetCode
|
solutions/2953. Count Complete Substrings/2953.py
|
{
"start": 0,
"end": 1033
}
|
class ____:
def countCompleteSubstrings(self, word: str, k: int) -> int:
uniqueLetters = len(set(word))
return sum(self._countCompleteStrings(word, k, windowSize)
for windowSize in range(k, k * uniqueLetters + 1, k))
def _countCompleteStrings(self, word: str, k: int, windowSize: int) -> int:
"""
Returns the number of complete substrings of `windowSize` of `word`.
"""
res = 0
countLetters = 0 # the number of letters in the running substring
count = collections.Counter()
for i, c in enumerate(word):
count[c] += 1
countLetters += 1
if i > 0 and abs(ord(c) - ord(word[i - 1])) > 2:
count = collections.Counter()
# Start a new substring starting at word[i].
count[c] += 1
countLetters = 1
if countLetters == windowSize + 1:
count[word[i - windowSize]] -= 1
countLetters -= 1
if countLetters == windowSize:
res += all(freq == 0 or freq == k for freq in count.values())
return res
|
Solution
|
python
|
pytorch__pytorch
|
torch/testing/_internal/opinfo/definitions/fft.py
|
{
"start": 925,
"end": 29445
}
|
class ____(SpectralFuncInfo):
"""
An OpInfo for a Python reference of an elementwise unary operation.
"""
def __init__(
self,
name, # the stringname of the callable Python reference
*,
op=None, # the function variant of the operation, populated as torch.<name> if None
torch_opinfo_name, # the string name of the corresponding torch opinfo
torch_opinfo_variant="",
**kwargs,
): # additional kwargs override kwargs inherited from the torch opinfo
self.torch_opinfo_name = torch_opinfo_name
self.torch_opinfo = _find_referenced_opinfo(
torch_opinfo_name, torch_opinfo_variant, op_db=op_db
)
assert isinstance(self.torch_opinfo, SpectralFuncInfo)
inherited = self.torch_opinfo._original_spectral_func_args
ukwargs = _inherit_constructor_args(name, op, inherited, kwargs)
super().__init__(**ukwargs)
def error_inputs_fft(op_info, device, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=torch.float32)
# Zero-dimensional tensor has no dimension to take FFT of
yield ErrorInput(
SampleInput(make_arg()),
error_type=IndexError,
error_regex="Dimension specified as -1 but tensor has no dimensions",
)
def error_inputs_fftn(op_info, device, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=torch.float32)
# Specifying a dimension on a zero-dimensional tensor
yield ErrorInput(
SampleInput(make_arg(), dim=(0,)),
error_type=IndexError,
error_regex="Dimension specified as 0 but tensor has no dimensions",
)
def sample_inputs_fft_with_min(
op_info, device, dtype, requires_grad=False, *, min_size, **kwargs
):
yield from sample_inputs_spectral_ops(
op_info, device, dtype, requires_grad, **kwargs
)
if TEST_WITH_ROCM:
# FIXME: Causes floating point exception on ROCm
return
# Check the "Invalid number of data points" error isn't too strict
# https://github.com/pytorch/pytorch/pull/109083
a = make_tensor(min_size, dtype=dtype, device=device, requires_grad=requires_grad)
yield SampleInput(a)
def sample_inputs_fftshift(op_info, device, dtype, requires_grad, **kwargs):
def mt(shape, **kwargs):
return make_tensor(
shape, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs
)
yield SampleInput(mt((9, 10)))
yield SampleInput(mt((50,)), kwargs=dict(dim=0))
yield SampleInput(mt((5, 11)), kwargs=dict(dim=(1,)))
yield SampleInput(mt((5, 6)), kwargs=dict(dim=(0, 1)))
yield SampleInput(mt((5, 6, 2)), kwargs=dict(dim=(0, 2)))
# Operator database
op_db: list[OpInfo] = [
SpectralFuncInfo(
"fft.fft",
aten_name="fft_fft",
decomp_aten_name="_fft_c2c",
ref=np.fft.fft,
ndimensional=SpectralFuncType.OneD,
dtypes=all_types_and_complex_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool,
*(() if (not SM53OrLater) else (torch.half, torch.complex32)),
),
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=1),
error_inputs_func=error_inputs_fft,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
),
SpectralFuncInfo(
"fft.fft2",
aten_name="fft_fft2",
ref=np.fft.fft2,
decomp_aten_name="_fft_c2c",
ndimensional=SpectralFuncType.TwoD,
dtypes=all_types_and_complex_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool,
*(() if (not SM53OrLater) else (torch.half, torch.complex32)),
),
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)),
error_inputs_func=error_inputs_fftn,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
decorators=[precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4})],
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_complex_half_reference_testing",
device_type="cuda",
dtypes=[torch.complex32],
active_if=TEST_WITH_ROCM,
),
),
),
SpectralFuncInfo(
"fft.fftn",
aten_name="fft_fftn",
decomp_aten_name="_fft_c2c",
ref=np.fft.fftn,
ndimensional=SpectralFuncType.ND,
dtypes=all_types_and_complex_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool,
*(() if (not SM53OrLater) else (torch.half, torch.complex32)),
),
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)),
error_inputs_func=error_inputs_fftn,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
decorators=[precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4})],
),
SpectralFuncInfo(
"fft.hfft",
aten_name="fft_hfft",
decomp_aten_name="_fft_c2r",
ref=np.fft.hfft,
ndimensional=SpectralFuncType.OneD,
dtypes=all_types_and_complex_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool,
*(() if (not SM53OrLater) else (torch.half, torch.complex32)),
),
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=2),
error_inputs_func=error_inputs_fft,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
check_batched_gradgrad=False,
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
"TestSchemaCheckModeOpInfo",
"test_schema_correctness",
dtypes=(torch.complex64, torch.complex128),
),
),
),
SpectralFuncInfo(
"fft.hfft2",
aten_name="fft_hfft2",
decomp_aten_name="_fft_c2r",
ref=scipy.fft.hfft2 if has_scipy_fft else None,
ndimensional=SpectralFuncType.TwoD,
dtypes=all_types_and_complex_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool,
*(() if (not SM53OrLater) else (torch.half, torch.complex32)),
),
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(2, 2)),
error_inputs_func=error_inputs_fftn,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_gradgrad=False,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
decorators=[
DecorateInfo(
precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}),
"TestFFT",
"test_reference_nd",
),
],
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
"TestSchemaCheckModeOpInfo",
"test_schema_correctness",
),
# FIXME: errors are too large; needs investigation
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_complex_half_reference_testing",
device_type="cuda",
),
),
),
SpectralFuncInfo(
"fft.hfftn",
aten_name="fft_hfftn",
decomp_aten_name="_fft_c2r",
ref=scipy.fft.hfftn if has_scipy_fft else None,
ndimensional=SpectralFuncType.ND,
dtypes=all_types_and_complex_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool,
*(() if (not SM53OrLater) else (torch.half, torch.complex32)),
),
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(2, 2)),
error_inputs_func=error_inputs_fftn,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_gradgrad=False,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
decorators=[
DecorateInfo(
precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}),
"TestFFT",
"test_reference_nd",
),
],
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
"TestSchemaCheckModeOpInfo",
"test_schema_correctness",
),
),
),
SpectralFuncInfo(
"fft.rfft",
aten_name="fft_rfft",
decomp_aten_name="_fft_r2c",
ref=np.fft.rfft,
ndimensional=SpectralFuncType.OneD,
dtypes=all_types_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and(
torch.bool, *(() if (not SM53OrLater) else (torch.half,))
),
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=1),
error_inputs_func=error_inputs_fft,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_grad=False,
skips=(),
check_batched_gradgrad=False,
),
SpectralFuncInfo(
"fft.rfft2",
aten_name="fft_rfft2",
decomp_aten_name="_fft_r2c",
ref=np.fft.rfft2,
ndimensional=SpectralFuncType.TwoD,
dtypes=all_types_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and(
torch.bool, *(() if (not SM53OrLater) else (torch.half,))
),
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)),
error_inputs_func=error_inputs_fftn,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_grad=False,
check_batched_gradgrad=False,
decorators=[
precisionOverride({torch.float: 1e-4}),
],
),
SpectralFuncInfo(
"fft.rfftn",
aten_name="fft_rfftn",
decomp_aten_name="_fft_r2c",
ref=np.fft.rfftn,
ndimensional=SpectralFuncType.ND,
dtypes=all_types_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and(
torch.bool, *(() if (not SM53OrLater) else (torch.half,))
),
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)),
error_inputs_func=error_inputs_fftn,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_grad=False,
check_batched_gradgrad=False,
decorators=[
precisionOverride({torch.float: 1e-4}),
],
),
SpectralFuncInfo(
"fft.ifft",
aten_name="fft_ifft",
decomp_aten_name="_fft_c2c",
ref=np.fft.ifft,
ndimensional=SpectralFuncType.OneD,
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=1),
error_inputs_func=error_inputs_fft,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
dtypes=all_types_and_complex_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool,
*(() if (not SM53OrLater) else (torch.half, torch.complex32)),
),
),
SpectralFuncInfo(
"fft.ifft2",
aten_name="fft_ifft2",
decomp_aten_name="_fft_c2c",
ref=np.fft.ifft2,
ndimensional=SpectralFuncType.TwoD,
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)),
error_inputs_func=error_inputs_fftn,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
dtypes=all_types_and_complex_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool,
*(() if (not SM53OrLater) else (torch.half, torch.complex32)),
),
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
"TestFFT",
"test_reference_nd",
)
],
),
SpectralFuncInfo(
"fft.ifftn",
aten_name="fft_ifftn",
decomp_aten_name="_fft_c2c",
ref=np.fft.ifftn,
ndimensional=SpectralFuncType.ND,
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)),
error_inputs_func=error_inputs_fftn,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
dtypes=all_types_and_complex_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool,
*(() if (not SM53OrLater) else (torch.half, torch.complex32)),
),
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
"TestFFT",
"test_reference_nd",
)
],
),
SpectralFuncInfo(
"fft.ihfft",
aten_name="fft_ihfft",
decomp_aten_name="_fft_r2c",
ref=np.fft.ihfft,
ndimensional=SpectralFuncType.OneD,
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)),
error_inputs_func=error_inputs_fft,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
dtypes=all_types_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and(
torch.bool, *(() if (not SM53OrLater) else (torch.half,))
),
skips=(),
check_batched_grad=False,
),
SpectralFuncInfo(
"fft.ihfft2",
aten_name="fft_ihfft2",
decomp_aten_name="_fft_r2c",
ref=scipy.fft.ihfftn if has_scipy_fft else None,
ndimensional=SpectralFuncType.TwoD,
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)),
error_inputs_func=error_inputs_fftn,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
dtypes=all_types_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and(
torch.bool, *(() if (not SM53OrLater) else (torch.half,))
),
check_batched_grad=False,
check_batched_gradgrad=False,
decorators=(
# The values for attribute 'shape' do not match: torch.Size([5, 6, 5]) != torch.Size([5, 6, 6]).
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out_warning"),
DecorateInfo(
precisionOverride({torch.float: 2e-4}), "TestFFT", "test_reference_nd"
),
# Mismatched elements!
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out"),
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out_warnings"),
),
),
SpectralFuncInfo(
"fft.ihfftn",
aten_name="fft_ihfftn",
decomp_aten_name="_fft_r2c",
ref=scipy.fft.ihfftn if has_scipy_fft else None,
ndimensional=SpectralFuncType.ND,
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)),
error_inputs_func=error_inputs_fftn,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
dtypes=all_types_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archss
dtypesIfCUDA=all_types_and(
torch.bool, *(() if (not SM53OrLater) else (torch.half,))
),
check_batched_grad=False,
check_batched_gradgrad=False,
decorators=[
# The values for attribute 'shape' do not match: torch.Size([5, 6, 5]) != torch.Size([5, 6, 6]).
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out_warning"),
# Mismatched elements!
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out"),
DecorateInfo(
precisionOverride({torch.float: 2e-4}), "TestFFT", "test_reference_nd"
),
],
),
SpectralFuncInfo(
"fft.irfft",
aten_name="fft_irfft",
decomp_aten_name="_fft_c2r",
ref=np.fft.irfft,
ndimensional=SpectralFuncType.OneD,
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 2)),
error_inputs_func=error_inputs_fft,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
dtypes=all_types_and_complex_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool,
*(() if (not SM53OrLater) else (torch.half, torch.complex32)),
),
check_batched_gradgrad=False,
),
SpectralFuncInfo(
"fft.irfft2",
aten_name="fft_irfft2",
decomp_aten_name="_fft_c2r",
ref=np.fft.irfft2,
ndimensional=SpectralFuncType.TwoD,
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 2)),
error_inputs_func=error_inputs_fftn,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
dtypes=all_types_and_complex_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool,
*(() if (not SM53OrLater) else (torch.half, torch.complex32)),
),
check_batched_gradgrad=False,
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
"TestFFT",
"test_reference_nd",
)
],
),
SpectralFuncInfo(
"fft.irfftn",
aten_name="fft_irfftn",
decomp_aten_name="_fft_c2r",
ref=np.fft.irfftn,
ndimensional=SpectralFuncType.ND,
sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 2)),
error_inputs_func=error_inputs_fftn,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
dtypes=all_types_and_complex_and(torch.bool),
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool,
*(() if (not SM53OrLater) else (torch.half, torch.complex32)),
),
check_batched_gradgrad=False,
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
"TestFFT",
"test_reference_nd",
)
],
),
OpInfo(
"fft.fftshift",
dtypes=all_types_and_complex_and(
torch.bool, torch.bfloat16, torch.half, torch.chalf
),
sample_inputs_func=sample_inputs_fftshift,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
),
OpInfo(
"fft.ifftshift",
dtypes=all_types_and_complex_and(
torch.bool, torch.bfloat16, torch.half, torch.chalf
),
sample_inputs_func=sample_inputs_fftshift,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
),
]
python_ref_db: list[OpInfo] = [
SpectralFuncPythonRefInfo(
"_refs.fft.fft",
torch_opinfo_name="fft.fft",
),
SpectralFuncPythonRefInfo(
"_refs.fft.ifft",
torch_opinfo_name="fft.ifft",
),
SpectralFuncPythonRefInfo(
"_refs.fft.rfft",
torch_opinfo_name="fft.rfft",
),
SpectralFuncPythonRefInfo(
"_refs.fft.irfft",
torch_opinfo_name="fft.irfft",
),
SpectralFuncPythonRefInfo(
"_refs.fft.hfft",
torch_opinfo_name="fft.hfft",
),
SpectralFuncPythonRefInfo(
"_refs.fft.ihfft",
torch_opinfo_name="fft.ihfft",
),
SpectralFuncPythonRefInfo(
"_refs.fft.fftn",
torch_opinfo_name="fft.fftn",
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
"TestFFT",
"test_reference_nd",
)
],
),
SpectralFuncPythonRefInfo(
"_refs.fft.ifftn",
torch_opinfo_name="fft.ifftn",
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
"TestFFT",
"test_reference_nd",
)
],
),
SpectralFuncPythonRefInfo(
"_refs.fft.rfftn",
torch_opinfo_name="fft.rfftn",
),
SpectralFuncPythonRefInfo(
"_refs.fft.irfftn",
torch_opinfo_name="fft.irfftn",
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
"TestFFT",
"test_reference_nd",
)
],
),
SpectralFuncPythonRefInfo(
"_refs.fft.hfftn",
torch_opinfo_name="fft.hfftn",
decorators=[
DecorateInfo(
precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}),
"TestFFT",
"test_reference_nd",
)
],
),
SpectralFuncPythonRefInfo(
"_refs.fft.ihfftn",
torch_opinfo_name="fft.ihfftn",
decorators=[
DecorateInfo(
precisionOverride({torch.float: 2e-4}),
"TestFFT",
"test_reference_nd",
),
# AssertionError: Reference result was farther (0.09746177145360499) from the precise
# computation than the torch result was (0.09111555632069855)
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_python_ref_torch_fallback",
dtypes=(torch.float16,),
device_type="cuda",
),
# AssertionError: Reference result was farther (0.0953431016138116) from the precise
# computation than the torch result was (0.09305490684430734)
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_python_ref_executor",
dtypes=(torch.float16,),
device_type="cuda",
),
],
),
SpectralFuncPythonRefInfo(
"_refs.fft.fft2",
torch_opinfo_name="fft.fft2",
),
SpectralFuncPythonRefInfo(
"_refs.fft.ifft2",
torch_opinfo_name="fft.ifft2",
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
"TestFFT",
"test_reference_nd",
)
],
),
SpectralFuncPythonRefInfo(
"_refs.fft.rfft2",
torch_opinfo_name="fft.rfft2",
),
SpectralFuncPythonRefInfo(
"_refs.fft.irfft2",
torch_opinfo_name="fft.irfft2",
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
"TestFFT",
"test_reference_nd",
)
],
),
SpectralFuncPythonRefInfo(
"_refs.fft.hfft2",
torch_opinfo_name="fft.hfft2",
decorators=[
DecorateInfo(
precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}),
"TestFFT",
"test_reference_nd",
)
],
),
SpectralFuncPythonRefInfo(
"_refs.fft.ihfft2",
torch_opinfo_name="fft.ihfft2",
decorators=[
DecorateInfo(
precisionOverride({torch.float: 2e-4}),
"TestFFT",
"test_reference_nd",
),
# FIXME:
# Reference result was farther (0.0953431016138116) from the precise computation
# than the torch result was (0.09305490684430734)!
DecorateInfo(
unittest.skip("Skipped!"),
"TestCommon",
"test_python_ref_executor",
device_type="cuda",
),
],
),
PythonRefInfo(
"_refs.fft.fftshift",
op_db=op_db,
torch_opinfo_name="fft.fftshift",
),
PythonRefInfo(
"_refs.fft.ifftshift",
op_db=op_db,
torch_opinfo_name="fft.ifftshift",
),
]
|
SpectralFuncPythonRefInfo
|
python
|
getsentry__sentry
|
src/sentry/utils/locking/manager.py
|
{
"start": 132,
"end": 568
}
|
class ____:
def __init__(self, backend: LockBackend) -> None:
self.backend = backend
def get(
self, key: str, duration: int, routing_key: str | None = None, name: str | None = None
) -> Lock:
"""
Retrieve a ``Lock`` instance.
"""
metrics.incr("lockmanager.get", tags={"lock_name": name} if name else None)
return Lock(self.backend, key, duration, routing_key)
|
LockManager
|
python
|
google__jax
|
jax/_src/pjit.py
|
{
"start": 35700,
"end": 52959
}
|
class ____:
def __repr__(self): return "pytree leaf"
@util.cache(max_size=4096, trace_context_in_key=False)
def _process_in_axis_resources(in_shardings_treedef, in_shardings_leaves,
in_layouts_treedef, in_layouts_leaves,
in_avals, in_tree, debug_info: core.DebugInfo,
device_or_backend_set, kws):
if not kws:
in_tree, _ = treedef_children(in_tree)
orig_in_shardings = tree_unflatten(in_shardings_treedef, in_shardings_leaves)
# Only do this if original in_shardings are unspecified. If it is AUTO, go
# via flatten_axis_resources.
if isinstance(orig_in_shardings, UnspecifiedValue):
in_shardings_flat = (orig_in_shardings,) * len(in_avals)
else:
in_shardings_flat = flatten_axis_resources(
"pjit in_shardings", in_tree, orig_in_shardings, tupled_args=True)
in_layouts = tree_unflatten(in_layouts_treedef, in_layouts_leaves)
if in_layouts is None:
in_layouts_flat = (in_layouts,) * len(in_avals)
else:
in_layouts_flat = flatten_axis_resources(
"pjit in_layouts", in_tree, in_layouts, tupled_args=True)
if not config.dynamic_shapes.value:
pjit_check_aval_sharding(in_shardings_flat, in_avals,
debug_info.safe_arg_names(len(in_avals)),
"pjit arguments", allow_uneven_sharding=False)
check_aval_layout_compatibility(
in_layouts_flat, in_avals,
debug_info.safe_arg_names(len(in_avals)), "jit arguments") # type: ignore[arg-type]
return in_shardings_flat, in_layouts_flat
callsites_with_tracing_cache_miss: set[str] = set()
def diff_tracing_cache_keys(
k: tuple, oldk: tuple, debug_info: lu.DebugInfo) -> tuple[Sequence[str], int]:
"""Explanations of differences between the cache keys, along with diff sizes.
Result: a pair of a list of explanations for differences, and the total size
of the differences. The sizes are used to pick the old key with the smallest
different size for the explanation that is shown to the user.
"""
(fun_transforms_k, fun_params_k, fun_in_type_k,
(arg_in_type_k, _, arg_inline_k), ctx_k) = k
(fun_transforms_ok, fun_params_ok, fun_in_type_ok,
(arg_in_type_ok, _, arg_inline_ok), ctx_ok) = oldk
diffs: list[tuple[str, int]] = [] # each difference with its size
def unavailable(key_field: str, what_k, what_ok):
diffs.append(
(f"different {key_field}:\n now: {what_k}\n != before: {what_ok}.\n"
"explanation unavailable! "
"please open an issue at https://github.com/jax-ml/jax.",
10))
def list_diff_size(s1: Sequence, s2: Sequence) -> int:
min_len = min(len(s1), len(s2))
diff_size = max(len(s1), len(s2)) - min_len
diff_size += sum(e1 != e2 for e1, e2 in zip(s1[:min_len],
s2[:min_len]))
return diff_size
different_leaf_count = False
def explain_transform_argnums_partial(param_k: tuple, param_ok: tuple):
dyn_argnums_k, static_args_k = param_k
dyn_argnums_ok, static_args_ok = param_ok
if dyn_argnums_k != dyn_argnums_ok:
diffs.append(
("different static_argnums:\n"
f" dynamic argnums now {dyn_argnums_k} and before {dyn_argnums_ok}",
1))
if static_args_k != static_args_ok:
diffs.append(
("different value of static args:\n"
f" now {', '.join(repr(a.val) for a in static_args_k)}"
f" and before {', '.join(repr(a.val) for a in static_args_ok)}",
list_diff_size(static_args_k, static_args_ok)))
def explain_transform_argnames_partial(param_k: tuple, param_ok: tuple):
static_kwargs_k, = param_k
static_kwargs_ok, = param_ok
static_kwargs_k = [(k, v.val) for k, v in
sorted(static_kwargs_k.val.items())]
static_kwargs_ok = [(k, v.val) for k, v in
sorted(static_kwargs_ok.val.items())]
if static_kwargs_k != static_kwargs_ok:
diffs.append(
("different value of static kwargs:\n"
f" now {{{', '.join(f'{k}: {repr(v)}' for k, v in static_kwargs_k)}}}"
f" and before {{{', '.join(f'{k}: {repr(v)}' for k, v in static_kwargs_ok)}}}",
list_diff_size(static_kwargs_k, static_kwargs_ok)))
def explain_in_tree_diff(in_tree_k: PyTreeDef, in_tree_ok: PyTreeDef):
nonlocal different_leaf_count
different_leaf_count = (in_tree_k.num_leaves != in_tree_ok.num_leaves)
if not different_leaf_count:
# Look for the special case of passing positional args as kwargs or
# vice-versa; the common prefix of positional args match.
args_tree_k, kwargs_tree_k = treedef_children(in_tree_k)
nr_args_k = len(treedef_children(args_tree_k))
args_tree_ok, kwargs_tree_ok = treedef_children(in_tree_ok)
nr_args_ok = len(treedef_children(args_tree_k))
if (treedef_children(args_tree_k)[:min(nr_args_k, nr_args_ok)] ==
treedef_children(args_tree_ok)[:min(nr_args_k, nr_args_ok)]):
keys_k = kwargs_tree_k.node_data()[1] # type: ignore[index]
keys_ok = kwargs_tree_ok.node_data()[1] # type: ignore[index]
diffs.append(
(("different number of args and kwargs, but same total number.\n"
f" now {nr_args_k} args and kwargs "
f"with keys {keys_k}\n"
f" before {nr_args_ok} args and kwargs "
f"with keys {keys_ok}"),
abs(nr_args_ok - nr_args_k)))
return
in_tree_k_str = str(in_tree_k)
in_tree_k_str = (in_tree_k_str if len(in_tree_k_str) < 73
else in_tree_k_str[:73] + "...")
in_tree_ok_str = str(in_tree_ok)
in_tree_ok_str = (in_tree_ok_str if len(in_tree_ok_str) < 73
else in_tree_ok_str[:73] + "...")
diff = [f"different input pytree:\n now: {in_tree_k_str}\n"
f" before: {in_tree_ok_str}"]
errs = list(tree_util.equality_errors_pytreedef(in_tree_k, in_tree_ok))
for path, thing1, thing2, explanation in errs:
fst, *path = path # type: ignore
base = ["args", "kwargs"][fst.idx]
diff.append(
f" * at {base}{keystr(tuple(path))}, now {thing1} and before {thing2},"
f" so {explanation}")
diffs.append(("\n".join(diff), len(errs)))
def explain_args_type_diff(args_k: tuple[core.AbstractValue],
args_ok: tuple[core.AbstractValue]):
diff_size = 0
arg_names = debug_info.safe_arg_names(len(args_k))
def arg_type_to_str(at):
if hasattr(at, "str_short"):
return at.str_short(short_dtypes=True)
else:
return str(at)
args_k_str = ", ".join(f"{an}: {arg_type_to_str(at)}"
for an, at in zip(arg_names, args_k))
args_k_str = args_k_str if len(args_k_str) < 73 else args_k_str[:73] + "..."
diff = [f"different input types:\n types now: {args_k_str}"]
add_weak_type_hint = False
for name, arg_t_k, arg_t_ok in zip(arg_names, args_k, args_ok):
if arg_t_k == arg_t_ok: continue
this_arg_diff_size = 0
if type(arg_t_k) == type(arg_t_ok) == core.ShapedArray:
s1, s2 = arg_type_to_str(arg_t_k), arg_type_to_str(arg_t_ok)
this_arg_diff_size += list_diff_size(arg_t_k.shape, arg_t_ok.shape) # type: ignore
if arg_t_k.weak_type != arg_t_ok.weak_type: # type: ignore
s1 += f"{{weak_type={arg_t_k.weak_type}}}" # type: ignore
s2 += f"{{weak_type={arg_t_ok.weak_type}}}" # type: ignore
add_weak_type_hint = True
this_arg_diff_size += 1
elif arg_t_k.sharding != arg_t_ok.sharding: # type: ignore
s1 = arg_t_k.str_short(short_dtypes=True, mesh_axis_types=True) # type: ignore
s2 = arg_t_ok.str_short(short_dtypes=True, mesh_axis_types=True) # type: ignore
this_arg_diff_size += 1
else:
s1, s2 = str(arg_t_k), str(arg_t_ok)
diff_size += max(1, this_arg_diff_size)
diff.append(f" * at {name}, now {s1} and before {s2}")
if add_weak_type_hint:
diff.append(
"where weak_type=True often means a Python builtin numeric value, and \n"
"weak_type=False means a jax.Array.\n"
"See https://docs.jax.dev/en/latest/type_promotion.html#weak-types.")
diffs.append(("\n".join(diff), diff_size))
if fun_transforms_k != fun_transforms_ok:
if len(fun_transforms_k) != len(fun_transforms_ok):
different_leaf_count = True # Skip other more precise checks
unavailable("fun_transforms length",
fun_transforms_k, fun_transforms_ok)
else:
for i, (t, ot) in enumerate(zip(fun_transforms_k, fun_transforms_ok)):
t_name = t[0].__name__
if t == ot: continue
if t[0] != ot[0]:
unavailable(f"fun_transforms[{i}] transform", t, ot)
continue
if t_name == "flatten_fun":
explain_in_tree_diff(t[1][0], ot[1][0])
continue
if t_name == "_argnums_partial":
explain_transform_argnums_partial(t[1], ot[1])
continue
if t_name == "_argnames_partial":
explain_transform_argnames_partial(t[1], ot[1])
continue
unavailable(f"fun_transforms.{t_name} params", t[1:], ot[1:])
continue
# If we had different leaf counts, we can discard the _argnums_partial
# difference. That transform sometimes occurs before the flatten_fun
if different_leaf_count:
diffs = [d for d in diffs if "fun_transforms._argnums_partial" not in d[0]]
if fun_params_k != fun_params_ok:
unavailable("fun_params", fun_params_k, fun_params_ok)
if fun_in_type_k != fun_in_type_ok:
unavailable("fun_in_type", fun_params_k, fun_params_ok)
if arg_in_type_k != arg_in_type_ok and not different_leaf_count:
explain_args_type_diff(arg_in_type_k, arg_in_type_ok)
if arg_inline_k != arg_inline_ok:
unavailable("arg_inline", arg_inline_k, arg_inline_ok)
if ctx_k != ctx_ok:
assert len(ctx_k) == len(ctx_ok)
idxs = [f" [{i}]: now {c_k} and before {c_ok}"
for i, (c_k, c_ok) in enumerate(zip(ctx_k, ctx_ok)) if c_k != c_ok]
diffs.append(
("different tracing context, e.g. due to config or context manager.\n"
"found differences at positions\n" +
", and\n".join(idxs) +
"\ncompare to tuple returned by "
"config.trace_context() in jax/_src/config.py.",
len(idxs)))
if not diffs: # Should never happen, but let's not crash
unavailable("something (unexpected empty diffs)", k, oldk)
diffs_and_sizes = util.unzip2(sorted(diffs, key=lambda d: d[1]))
return (diffs_and_sizes[0], sum(diffs_and_sizes[1]))
def explain_tracing_cache_miss(
fun: lu.WrappedFun, unseen_f: bool, cache: dict,
key: tuple, elapsed_sec: float):
if config.check_tracer_leaks.value: return # TODO(mattjj): can remove this
if key[3][2].val: return # No explanations for "inline" functions
debug_info = fun.debug_info
func_filename = debug_info.func_filename
if func_filename and not source_info_util.is_user_filename(func_filename):
return
msg: list[str] = []
p = msg.append
done = lambda: logger.log(logging.WARNING, "\n".join(msg))
callsite = source_info_util.summarize(source_info_util.current())
p(f"TRACING CACHE MISS at {callsite} costing {elapsed_sec * 1e3:.3f} ms because:")
# have we seen this function before at all?
src_info = ""
if func_filename:
src_info += f" defined at {func_filename}"
if func_lineno := debug_info.func_lineno:
src_info += f":{func_lineno}"
func_name = debug_info.func_name
if unseen_f or not cache:
p(f" never seen function:\n {func_name} id={id(fun.f)}{src_info}")
if callsite in callsites_with_tracing_cache_miss:
p(" but seen another function defined on the same line; maybe the function is\n"
" being re-defined repeatedly, preventing caching?")
else:
callsites_with_tracing_cache_miss.add(callsite)
return done()
p(f" for {func_name}{src_info}")
# Do *not* remove the list() around the call to keys(). The cache may be
# updated concurrently by other threads, and we need to perform the iteration
# over the dictionary keys in a way that is concurrency safe. Here we are
# relying on an implementation behavior of CPython wherein the particular list
# constructor used here acts atomically.
# See https://github.com/jax-ml/jax/issues/30163
cache_keys = list(cache.keys())
diffs = [diff_tracing_cache_keys(key, ok, debug_info)
for ok in cache_keys if key != ok]
assert diffs, "we must find some diffs if key differs from all cache keys"
min_diff = min(diffs, key=lambda v: v[1])
smallest_diffs: Sequence[Sequence[str]] # the diffs for the closest keys
smallest_diffs = [d[0] for d in diffs if d[1] == min_diff[1]]
def indent_subsequent_lines(indent: int, msg: str) -> str:
return msg.replace("\n", "\n" + " " * indent)
def p_one_diff(diff: Sequence[str]):
for d in diff:
p(" * key with " + indent_subsequent_lines(4, d))
if len(smallest_diffs) == 1:
p(" all previously seen cache keys are different. Closest previous key:")
p_one_diff(smallest_diffs[0])
else:
p(" all previously seen cache keys are different. "
"Several previous keys are closest:")
for d in smallest_diffs:
p_one_diff(d)
done()
@partial(lu.cache, explain=explain_tracing_cache_miss)
def _create_pjit_jaxpr(
fun: lu.WrappedFun,
in_type: core.InputType | Sequence[core.AbstractValue],
qdd_token: int,
ignored_inline: IgnoreKey
) -> tuple[core.ClosedJaxpr, list[core.Value], list[core.AbstractValue]]:
util.test_event("create_pjit_jaxpr")
del qdd_token # just part of the cache key
del ignored_inline # just for explain_cache_miss
if config.no_tracing.value:
raise RuntimeError(f"re-tracing function {fun.f} for `jit`, but "
"'no_tracing' is set")
with dispatch.log_elapsed_time(
"Finished tracing + transforming {fun_name} for pjit in {elapsed_time:.9f} sec",
fun_name=fun.__name__, event=dispatch.JAXPR_TRACE_EVENT):
if config.dynamic_shapes.value:
assert isinstance(in_type, core.InputType)
jaxpr, global_out_avals, consts = pe.trace_to_jaxpr_dynamic2(
lu.annotate(fun, in_type))
else:
jaxpr, global_out_avals, consts = pe.trace_to_jaxpr_dynamic(fun, in_type)
if config.debug_key_reuse.value:
# Import here to avoid circular imports
from jax.experimental.key_reuse._core import check_key_reuse_jaxpr # pytype: disable=import-error
check_key_reuse_jaxpr(jaxpr)
# TODO(mattjj,yashkatariya): if we take the 'true' path then we *must* fall
# off the C++ dispatch fast path for correctness. Ensure that happens.
if any(isinstance(c, core.Tracer) or core.typeof(c).has_qdd for c in consts):
closed_jaxpr = pe.close_jaxpr(pe.convert_constvars_jaxpr(jaxpr))
final_consts = consts
else:
closed_jaxpr = core.ClosedJaxpr(jaxpr, consts)
final_consts = []
return closed_jaxpr, final_consts, global_out_avals
@util.cache(max_size=4096, trace_context_in_key=False)
def _check_and_canonicalize_out_shardings(
out_shardings_treedef, out_shardings_leaves, out_layouts_treedef,
out_layouts_leaves, out_tree, out_avals,
debug_info: core.DebugInfo,
device_or_backend_set):
orig_out_shardings = tree_unflatten(out_shardings_treedef, out_shardings_leaves)
if isinstance(orig_out_shardings, (UnspecifiedValue, Sharding)):
out_shardings_flat = (orig_out_shardings,) * len(out_avals)
else:
out_shardings_flat = flatten_axis_resources(
"pjit out_shardings", out_tree(), orig_out_shardings,
tupled_args=False)
out_layouts = tree_unflatten(out_layouts_treedef, out_layouts_leaves)
if out_layouts is None:
out_layouts_flat = (out_layouts,) * len(out_avals)
else:
out_layouts_flat = flatten_axis_resources(
"pjit out_layouts", out_tree(), out_layouts, tupled_args=False)
if not config.dynamic_shapes.value:
pjit_check_aval_sharding(
out_shardings_flat, out_avals,
debug_info.safe_result_paths(len(out_avals)),
"pjit outputs", allow_uneven_sharding=False)
check_aval_layout_compatibility(
out_layouts_flat, out_avals,
debug_info.safe_result_paths(len(out_avals)),
"jit outputs")
return out_shardings_flat, out_layouts_flat
_seen_qdds = weakref.WeakKeyDictionary() # type: ignore
def _seen_qdds_get(fun, in_type) -> list:
assert fun.in_type is None or fun.in_type == in_type
cache = _seen_qdds.setdefault(fun.f, defaultdict(list))
return cache[(fun.transforms, fun.params, in_type)]
def _qdd_cache_index(fun, in_type) -> int:
cases = _seen_qdds_get(fun, in_type)
for i, records in enumerate(cases):
for obj, qdd in records:
if core.cur_qdd(obj) != qdd: break
else:
return i
return len(cases)
def _qdd_cache_update(fun, in_type, i, consts, aval_qdds):
cases = _seen_qdds_get(fun, in_type)
if i == len(cases):
cases.append([(c, aval_qdd.qdd) for c, aval_qdd in zip(consts, aval_qdds)
if aval_qdd.has_qdd])
@dataclass(frozen=True)
|
PytreeLeaf
|
python
|
faif__python-patterns
|
patterns/creational/builder.py
|
{
"start": 2223,
"end": 2343
}
|
class ____:
def __repr__(self) -> str:
return "Floor: {0.floor} | Size: {0.size}".format(self)
|
ComplexBuilding
|
python
|
scrapy__scrapy
|
tests/test_pipeline_files.py
|
{
"start": 24834,
"end": 25865
}
|
class ____:
@inlineCallbacks
def test_persist(self):
data = b"TestFTPFilesStore: \xe2\x98\x83"
buf = BytesIO(data)
meta = {"foo": "bar"}
path = "full/filename"
with MockFTPServer() as ftp_server:
store = FTPFilesStore(ftp_server.url("/"))
empty_dict = yield store.stat_file(path, info=None)
assert empty_dict == {}
yield store.persist_file(path, buf, info=None, meta=meta, headers=None)
stat = yield store.stat_file(path, info=None)
assert "last_modified" in stat
assert "checksum" in stat
assert stat["checksum"] == "d113d66b2ec7258724a268bd88eef6b6"
path = f"{store.basedir}/{path}"
content = get_ftp_content_and_delete(
path,
store.host,
store.port,
store.username,
store.password,
store.USE_ACTIVE_MODE,
)
assert data == content
|
TestFTPFileStore
|
python
|
pypa__warehouse
|
warehouse/macaroons/caveats/__init__.py
|
{
"start": 1700,
"end": 2238
}
|
class ____(Caveat):
normalized_names: list[StrictStr]
def verify(self, request: Request, context: Any, permission: str) -> Result:
if not isinstance(context, Project):
return Failure("project-scoped token used outside of a project context")
if context.normalized_name not in self.normalized_names:
return Failure(
f"project-scoped token is not valid for project: {context.name!r}"
)
return Success()
@as_caveat(tag=2)
@dataclass(frozen=True)
|
ProjectName
|
python
|
pypa__hatch
|
tests/backend/builders/test_wheel.py
|
{
"start": 15389,
"end": 18995
}
|
class ____:
def test_default(self, isolation):
builder = WheelBuilder(str(isolation))
assert builder.config.shared_scripts == builder.config.shared_scripts == {}
def test_invalid_type(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"wheel": {"shared-scripts": 42}}}}}}
builder = WheelBuilder(str(isolation), config=config)
with pytest.raises(TypeError, match="Field `tool.hatch.build.targets.wheel.shared-scripts` must be a mapping"):
_ = builder.config.shared_scripts
def test_absolute(self, isolation):
config = {
"tool": {
"hatch": {"build": {"targets": {"wheel": {"shared-scripts": {str(isolation / "source"): "/target/"}}}}}
}
}
builder = WheelBuilder(str(isolation), config=config)
assert builder.config.shared_scripts == {str(isolation / "source"): "target"}
def test_relative(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"wheel": {"shared-scripts": {"../source": "/target/"}}}}}}}
builder = WheelBuilder(str(isolation / "foo"), config=config)
assert builder.config.shared_scripts == {str(isolation / "source"): "target"}
def test_source_empty_string(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"wheel": {"shared-scripts": {"": "/target/"}}}}}}}
builder = WheelBuilder(str(isolation), config=config)
with pytest.raises(
ValueError,
match="Source #1 in field `tool.hatch.build.targets.wheel.shared-scripts` cannot be an empty string",
):
_ = builder.config.shared_scripts
def test_relative_path_not_string(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"wheel": {"shared-scripts": {"source": 0}}}}}}}
builder = WheelBuilder(str(isolation), config=config)
with pytest.raises(
TypeError,
match="Path for source `source` in field `tool.hatch.build.targets.wheel.shared-scripts` must be a string",
):
_ = builder.config.shared_scripts
def test_relative_path_empty_string(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"wheel": {"shared-scripts": {"source": ""}}}}}}}
builder = WheelBuilder(str(isolation), config=config)
with pytest.raises(
ValueError,
match=(
"Path for source `source` in field `tool.hatch.build.targets.wheel.shared-scripts` "
"cannot be an empty string"
),
):
_ = builder.config.shared_scripts
def test_order(self, isolation):
config = {
"tool": {
"hatch": {
"build": {
"targets": {
"wheel": {
"shared-scripts": {
"../very-nested": "target1/embedded",
"../source1": "/target2/",
"../source2": "/target1/",
}
}
}
}
}
}
}
builder = WheelBuilder(str(isolation / "foo"), config=config)
assert builder.config.shared_scripts == {
str(isolation / "source2"): "target1",
str(isolation / "very-nested"): f"target1{os.sep}embedded",
str(isolation / "source1"): "target2",
}
|
TestSharedScripts
|
python
|
PrefectHQ__prefect
|
src/prefect/blocks/notifications.py
|
{
"start": 12336,
"end": 15074
}
|
class ____(AbstractAppriseNotificationBlock):
"""Enables sending notifications via Twilio SMS.
Find more on sending Twilio SMS messages in the [docs](https://www.twilio.com/docs/sms).
Examples:
Load a saved `TwilioSMS` block and send a message:
```python
from prefect.blocks.notifications import TwilioSMS
twilio_webhook_block = TwilioSMS.load("BLOCK_NAME")
twilio_webhook_block.notify("Hello from Prefect!")
```
"""
_description = "Enables sending notifications via Twilio SMS."
_block_type_name = "Twilio SMS"
_block_type_slug = "twilio-sms"
_logo_url = HttpUrl(
"https://cdn.sanity.io/images/3ugk85nk/production/8bd8777999f82112c09b9c8d57083ac75a4a0d65-250x250.png"
) # noqa
_documentation_url = HttpUrl(
"https://docs.prefect.io/latest/automate/events/automations-triggers#sending-notifications-with-automations"
)
account_sid: str = Field(
default=...,
description=(
"The Twilio Account SID - it can be found on the homepage "
"of the Twilio console."
),
)
auth_token: SecretStr = Field(
default=...,
description=(
"The Twilio Authentication Token - "
"it can be found on the homepage of the Twilio console."
),
)
from_phone_number: str = Field(
default=...,
description="The valid Twilio phone number to send the message from.",
examples=["18001234567"],
)
to_phone_numbers: list[str] = Field(
default=...,
description="A list of valid Twilio phone number(s) to send the message to.",
# not wrapped in brackets because of the way UI displays examples; in code should be ["18004242424"]
examples=["18004242424"],
)
def block_initialization(self) -> None:
try:
# Try importing for apprise>=1.18.0
from apprise.plugins.twilio import NotifyTwilio
except ImportError:
# Fallback for versions apprise<1.18.0
from apprise.plugins.NotifyTwilio import ( # pyright: ignore[reportMissingImports] this is a fallback
NotifyTwilio, # pyright: ignore[reportUnknownVariableType] incomplete type hints in apprise
)
url = SecretStr(
NotifyTwilio(
account_sid=self.account_sid,
auth_token=self.auth_token.get_secret_value(),
source=self.from_phone_number,
targets=self.to_phone_numbers,
).url() # pyright: ignore[reportUnknownMemberType, reportUnknownArgumentType] incomplete type hints in apprise
)
self._start_apprise_client(url)
|
TwilioSMS
|
python
|
django-import-export__django-import-export
|
import_export/resources.py
|
{
"start": 1414,
"end": 2270
}
|
class ____:
def __init__(self, resource, instance, new):
self.left = Diff._read_field_values(resource, instance)
self.right = []
self.new = new
def compare_with(self, resource, instance):
self.right = Diff._read_field_values(resource, instance)
def as_html(self):
data = []
dmp = diff_match_patch()
for v1, v2 in zip(self.left, self.right):
if v1 != v2 and self.new:
v1 = ""
diff = dmp.diff_main(force_str(v1), force_str(v2))
dmp.diff_cleanupSemantic(diff)
html = dmp.diff_prettyHtml(diff)
html = mark_safe(html)
data.append(html)
return data
@classmethod
def _read_field_values(cls, resource, instance):
return [f.export(instance) for f in resource.get_import_fields()]
|
Diff
|
python
|
run-llama__llama_index
|
llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/ipynb/base.py
|
{
"start": 214,
"end": 1552
}
|
class ____(BaseReader):
"""Image parser."""
def __init__(
self,
parser_config: Optional[Dict] = None,
concatenate: bool = False,
):
"""Init params."""
self._parser_config = parser_config
self._concatenate = concatenate
def load_data(
self,
file: Path,
extra_info: Optional[Dict] = None,
fs: Optional[AbstractFileSystem] = None,
) -> List[Document]:
"""Parse file."""
if file.name.endswith(".ipynb"):
try:
import nbconvert
except ImportError:
raise ImportError("Please install nbconvert 'pip install nbconvert' ")
if fs:
with fs.open(file, encoding="utf-8") as f:
string = nbconvert.exporters.ScriptExporter().from_file(f)[0]
else:
string = nbconvert.exporters.ScriptExporter().from_file(file)[0]
# split each In[] cell into a separate string
splits = re.split(r"In\[\d+\]:", string)
# remove the first element, which is empty
splits.pop(0)
if self._concatenate:
docs = [Document(text="\n\n".join(splits), metadata=extra_info or {})]
else:
docs = [Document(text=s, metadata=extra_info or {}) for s in splits]
return docs
|
IPYNBReader
|
python
|
mlflow__mlflow
|
mlflow/genai/label_schemas/label_schemas.py
|
{
"start": 1929,
"end": 2814
}
|
class ____(InputType):
"""A multi-select dropdown for collecting assessments from stakeholders.
.. note::
This functionality is only available in Databricks. Please run
`pip install mlflow[databricks]` to use it.
"""
options: list[str]
"""List of available options for the multi-select categorical (dropdown)."""
def _to_databricks_input(self) -> "_InputCategoricalList":
"""Convert to the internal Databricks input type."""
from databricks.agents.review_app import label_schemas as _label_schemas
return _label_schemas.InputCategoricalList(options=self.options)
@classmethod
def _from_databricks_input(cls, input_obj: "_InputCategoricalList") -> "InputCategoricalList":
"""Create from the internal Databricks input type."""
return cls(options=input_obj.options)
@dataclass
|
InputCategoricalList
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/events.py
|
{
"start": 1466,
"end": 1874
}
|
class ____(NamedTuple):
"""An AssetKey with an (optional) partition key. Refers either to a non-partitioned asset or a
partition of a partitioned asset.
"""
asset_key: AssetKey
partition_key: Optional[str] = None
# This is currently used only for the asset partition wipe codepath. In the future, we can rename
# to AssetPartitionRange or similar for more general use.
|
AssetKeyPartitionKey
|
python
|
weaviate__weaviate-python-client
|
weaviate/users/sync.py
|
{
"start": 384,
"end": 601
}
|
class ____(_UsersExecutor[ConnectionSync]):
def __init__(self, connection: ConnectionSync):
super().__init__(connection)
self.db = _UsersDB(connection)
self.oidc = _UsersOIDC(connection)
|
_Users
|
python
|
ray-project__ray
|
python/ray/llm/_internal/common/base_pydantic.py
|
{
"start": 136,
"end": 1162
}
|
class ____(BaseModel):
# NOTE(edoakes): Pydantic protects the namespace `model_` by default and prints
# warnings if you define fields with that prefix. However, we added such fields
# before this behavior existed. To avoid spamming user-facing logs, we mark the
# namespace as not protected. This means we need to be careful about overriding
# internal attributes starting with `model_`.
# See: https://github.com/anyscale/ray-llm/issues/1425
model_config = ConfigDict(
protected_namespaces=tuple(),
extra="forbid",
)
@classmethod
def parse_yaml(cls: Type[ModelT], file, **kwargs) -> ModelT:
kwargs.setdefault("Loader", yaml.SafeLoader)
dict_args = yaml.load(file, **kwargs)
return cls.model_validate(dict_args)
@classmethod
def from_file(cls: Type[ModelT], path: str, **kwargs) -> ModelT:
"""Load a model from a YAML file path."""
with open(path, "r") as f:
return cls.parse_yaml(f, **kwargs)
|
BaseModelExtended
|
python
|
PrefectHQ__prefect
|
src/prefect/tasks.py
|
{
"start": 3277,
"end": 9991
}
|
class ____(TypedDict, total=False):
"""
A TypedDict representing all available task configuration options.
This can be used with `Unpack` to provide type hints for **kwargs.
"""
name: Optional[str]
description: Optional[str]
tags: Optional[Iterable[str]]
version: Optional[str]
cache_policy: Union[CachePolicy, type[NotSet]]
cache_key_fn: Union[
Callable[["TaskRunContext", dict[str, Any]], Optional[str]], None
]
cache_expiration: Optional[datetime.timedelta]
task_run_name: Optional[TaskRunNameValueOrCallable]
retries: Optional[int]
retry_delay_seconds: Union[
float, int, list[float], Callable[[int], list[float]], None
]
retry_jitter_factor: Optional[float]
persist_result: Optional[bool]
result_storage: Optional[ResultStorage]
result_serializer: Optional[ResultSerializer]
result_storage_key: Optional[str]
cache_result_in_memory: bool
timeout_seconds: Union[int, float, None]
log_prints: Optional[bool]
refresh_cache: Optional[bool]
on_completion: Optional[list[StateHookCallable]]
on_failure: Optional[list[StateHookCallable]]
on_running: Optional[list[StateHookCallable]]
on_rollback: Optional[list[Callable[["Transaction"], None]]]
on_commit: Optional[list[Callable[["Transaction"], None]]]
retry_condition_fn: Optional[RetryConditionCallable]
viz_return_value: Any
asset_deps: Optional[list[Union[Asset, str]]]
def task_input_hash(
context: "TaskRunContext", arguments: dict[str, Any]
) -> Optional[str]:
"""
A task cache key implementation which hashes all inputs to the task using a JSON or
cloudpickle serializer. If any arguments are not JSON serializable, the pickle
serializer is used as a fallback. If cloudpickle fails, this will return a null key
indicating that a cache key could not be generated for the given inputs.
Arguments:
context: the active `TaskRunContext`
arguments: a dictionary of arguments to be passed to the underlying task
Returns:
a string hash if hashing succeeded, else `None`
"""
return hash_objects(
# We use the task key to get the qualified name for the task and include the
# task functions `co_code` bytes to avoid caching when the underlying function
# changes
context.task.task_key,
context.task.fn.__code__.co_code.hex(),
arguments,
)
def exponential_backoff(backoff_factor: float) -> Callable[[int], list[float]]:
"""
A task retry backoff utility that configures exponential backoff for task retries.
The exponential backoff design matches the urllib3 implementation.
Arguments:
backoff_factor: the base delay for the first retry, subsequent retries will
increase the delay time by powers of 2.
Returns:
a callable that can be passed to the task constructor
"""
def retry_backoff_callable(retries: int) -> list[float]:
# no more than 50 retry delays can be configured on a task
retries = min(retries, 50)
return [backoff_factor * max(0, 2**r) for r in range(retries)]
return retry_backoff_callable
def _infer_parent_task_runs(
flow_run_context: Optional[FlowRunContext],
task_run_context: Optional[TaskRunContext],
parameters: dict[str, Any],
) -> list[TaskRunResult]:
"""
Attempt to infer the parent task runs for this task run based on the
provided flow run and task run contexts, as well as any parameters. It is
assumed that the task run is running within those contexts.
If any parameter comes from a running task run, that task run is considered
a parent. This is expected to happen when task inputs are yielded from
generator tasks.
"""
parents: list[TaskRunResult] = []
# check if this task has a parent task run based on running in another
# task run's existing context. A task run is only considered a parent if
# it is in the same flow run (because otherwise presumably the child is
# in a subflow, so the subflow serves as the parent) or if there is no
# flow run
if task_run_context:
# there is no flow run
if not flow_run_context:
parents.append(TaskRunResult(id=task_run_context.task_run.id))
# there is a flow run and the task run is in the same flow run
elif flow_run_context and task_run_context.task_run.flow_run_id == getattr(
flow_run_context.flow_run, "id", None
):
parents.append(TaskRunResult(id=task_run_context.task_run.id))
# parent dependency tracking: for every provided parameter value, try to
# load the corresponding task run state. If the task run state is still
# running, we consider it a parent task run. Note this is only done if
# there is an active flow run context because dependencies are only
# tracked within the same flow run.
if flow_run_context:
for v in parameters.values():
upstream_state = None
if isinstance(v, State):
upstream_state = v
elif isinstance(v, PrefectFuture):
upstream_state = v.state
else:
res = flow_run_context.run_results.get(id(v))
if res:
upstream_state, _ = res
if upstream_state and upstream_state.is_running():
parents.append(
TaskRunResult(id=upstream_state.state_details.task_run_id)
)
return parents
def _generate_task_key(fn: Callable[..., Any]) -> str:
"""Generate a task key based on the function name and source code.
We may eventually want some sort of top-level namespace here to
disambiguate tasks with the same function name in different modules,
in a more human-readable way, while avoiding relative import problems (see #12337).
As long as the task implementations are unique (even if named the same), we should
not have any collisions.
Args:
fn: The function to generate a task key for.
"""
if not hasattr(fn, "__qualname__"):
return to_qualified_name(type(fn))
qualname = fn.__qualname__.split(".")[-1]
try:
code_obj = getattr(fn, "__code__", None)
if code_obj is None:
code_obj = fn.__call__.__code__
except AttributeError:
raise AttributeError(
f"{fn} is not a standard Python function object and could not be converted to a task."
) from None
code_hash = (
h[:NUM_CHARS_DYNAMIC_KEY] if (h := hash_objects(code_obj)) else "unknown"
)
return f"{qualname}-{code_hash}"
|
TaskOptions
|
python
|
ray-project__ray
|
python/ray/serve/tests/test_config_files/sqlalchemy.py
|
{
"start": 42,
"end": 288
}
|
class ____:
def __init__(self):
import pymysql
from sqlalchemy import create_engine
pymysql.install_as_MySQLdb()
create_engine("mysql://some_wrong_url:3306").connect()
app = TestDeployment.bind()
|
TestDeployment
|
python
|
ray-project__ray
|
python/ray/train/tests/test_trainer_restore.py
|
{
"start": 1092,
"end": 1543
}
|
class ____(RuntimeError):
pass
def _failing_train_fn(config):
checkpoint = train.get_checkpoint()
it = 1
if checkpoint:
it = load_dict_checkpoint(checkpoint)["it"] + 1
print(f"\nLoading from checkpoint, which is at iteration {it}...\n")
with create_dict_checkpoint({"it": it}) as checkpoint:
train.report({"it": it}, checkpoint=checkpoint)
if it == 1:
raise _TestSpecificError
|
_TestSpecificError
|
python
|
sanic-org__sanic
|
sanic/exceptions.py
|
{
"start": 20575,
"end": 21532
}
|
class ____(HTTPException):
"""403 Forbidden
Args:
message (Optional[Union[str, bytes]], optional): The message to be sent to the client. If `None`
then the HTTP status 'Bad Request' will be sent. Defaults to `None`.
quiet (Optional[bool], optional): When `True`, the error traceback will be suppressed
from the logs. Defaults to `None`.
context (Optional[Dict[str, Any]], optional): Additional mapping of key/value data that will be
sent to the client upon exception. Defaults to `None`.
extra (Optional[Dict[str, Any]], optional): Additional mapping of key/value data that will NOT be
sent to the client when in PRODUCTION mode. Defaults to `None`.
headers (Optional[Dict[str, Any]], optional): Additional headers that should be sent with the HTTP
response. Defaults to `None`.
""" # noqa: E501
status_code = 403
quiet = True
|
Forbidden
|
python
|
scikit-learn__scikit-learn
|
sklearn/utils/tests/test_param_validation.py
|
{
"start": 1518,
"end": 24407
}
|
class ____(BaseEstimator):
"""An estimator to test the validation of estimator parameters."""
_parameter_constraints: dict = {"a": [Real]}
def __init__(self, a):
self.a = a
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X=None, y=None):
pass
@pytest.mark.parametrize("interval_type", [Integral, Real])
def test_interval_range(interval_type):
"""Check the range of values depending on closed."""
interval = Interval(interval_type, -2, 2, closed="left")
assert -2 in interval
assert 2 not in interval
interval = Interval(interval_type, -2, 2, closed="right")
assert -2 not in interval
assert 2 in interval
interval = Interval(interval_type, -2, 2, closed="both")
assert -2 in interval
assert 2 in interval
interval = Interval(interval_type, -2, 2, closed="neither")
assert -2 not in interval
assert 2 not in interval
@pytest.mark.parametrize("interval_type", [Integral, Real])
def test_interval_large_integers(interval_type):
"""Check that Interval constraint work with large integers.
non-regression test for #26648.
"""
interval = Interval(interval_type, 0, 2, closed="neither")
assert 2**65 not in interval
assert 2**128 not in interval
assert float(2**65) not in interval
assert float(2**128) not in interval
interval = Interval(interval_type, 0, 2**128, closed="neither")
assert 2**65 in interval
assert 2**128 not in interval
assert float(2**65) in interval
assert float(2**128) not in interval
assert 2**1024 not in interval
def test_interval_inf_in_bounds():
"""Check that inf is included iff a bound is closed and set to None.
Only valid for real intervals.
"""
interval = Interval(Real, 0, None, closed="right")
assert np.inf in interval
interval = Interval(Real, None, 0, closed="left")
assert -np.inf in interval
interval = Interval(Real, None, None, closed="neither")
assert np.inf not in interval
assert -np.inf not in interval
@pytest.mark.parametrize(
"interval",
[Interval(Real, 0, 1, closed="left"), Interval(Real, None, None, closed="both")],
)
def test_nan_not_in_interval(interval):
"""Check that np.nan is not in any interval."""
assert np.nan not in interval
@pytest.mark.parametrize(
"params, error, match",
[
(
{"type": Integral, "left": 1.0, "right": 2, "closed": "both"},
TypeError,
r"Expecting left to be an int for an interval over the integers",
),
(
{"type": Integral, "left": 1, "right": 2.0, "closed": "neither"},
TypeError,
"Expecting right to be an int for an interval over the integers",
),
(
{"type": Integral, "left": None, "right": 0, "closed": "left"},
ValueError,
r"left can't be None when closed == left",
),
(
{"type": Integral, "left": 0, "right": None, "closed": "right"},
ValueError,
r"right can't be None when closed == right",
),
(
{"type": Integral, "left": 1, "right": -1, "closed": "both"},
ValueError,
r"right can't be less than left",
),
],
)
def test_interval_errors(params, error, match):
"""Check that informative errors are raised for invalid combination of parameters"""
with pytest.raises(error, match=match):
Interval(**params)
def test_stroptions():
"""Sanity check for the StrOptions constraint"""
options = StrOptions({"a", "b", "c"}, deprecated={"c"})
assert options.is_satisfied_by("a")
assert options.is_satisfied_by("c")
assert not options.is_satisfied_by("d")
assert "'c' (deprecated)" in str(options)
def test_options():
"""Sanity check for the Options constraint"""
options = Options(Real, {-0.5, 0.5, np.inf}, deprecated={-0.5})
assert options.is_satisfied_by(-0.5)
assert options.is_satisfied_by(np.inf)
assert not options.is_satisfied_by(1.23)
assert "-0.5 (deprecated)" in str(options)
@pytest.mark.parametrize(
"type, expected_type_name",
[
(int, "int"),
(Integral, "int"),
(Real, "float"),
(np.ndarray, "numpy.ndarray"),
],
)
def test_instances_of_type_human_readable(type, expected_type_name):
"""Check the string representation of the _InstancesOf constraint."""
constraint = _InstancesOf(type)
assert str(constraint) == f"an instance of '{expected_type_name}'"
def test_hasmethods():
"""Check the HasMethods constraint."""
constraint = HasMethods(["a", "b"])
class _Good:
def a(self):
pass # pragma: no cover
def b(self):
pass # pragma: no cover
class _Bad:
def a(self):
pass # pragma: no cover
assert constraint.is_satisfied_by(_Good())
assert not constraint.is_satisfied_by(_Bad())
assert str(constraint) == "an object implementing 'a' and 'b'"
@pytest.mark.parametrize(
"constraint",
[
Interval(Real, None, 0, closed="left"),
Interval(Real, 0, None, closed="left"),
Interval(Real, None, None, closed="neither"),
StrOptions({"a", "b", "c"}),
MissingValues(),
MissingValues(numeric_only=True),
_VerboseHelper(),
HasMethods("fit"),
_IterablesNotString(),
_CVObjects(),
],
)
def test_generate_invalid_param_val(constraint):
"""Check that the value generated does not satisfy the constraint"""
bad_value = generate_invalid_param_val(constraint)
assert not constraint.is_satisfied_by(bad_value)
@pytest.mark.parametrize(
"integer_interval, real_interval",
[
(
Interval(Integral, None, 3, closed="right"),
Interval(RealNotInt, -5, 5, closed="both"),
),
(
Interval(Integral, None, 3, closed="right"),
Interval(RealNotInt, -5, 5, closed="neither"),
),
(
Interval(Integral, None, 3, closed="right"),
Interval(RealNotInt, 4, 5, closed="both"),
),
(
Interval(Integral, None, 3, closed="right"),
Interval(RealNotInt, 5, None, closed="left"),
),
(
Interval(Integral, None, 3, closed="right"),
Interval(RealNotInt, 4, None, closed="neither"),
),
(
Interval(Integral, 3, None, closed="left"),
Interval(RealNotInt, -5, 5, closed="both"),
),
(
Interval(Integral, 3, None, closed="left"),
Interval(RealNotInt, -5, 5, closed="neither"),
),
(
Interval(Integral, 3, None, closed="left"),
Interval(RealNotInt, 1, 2, closed="both"),
),
(
Interval(Integral, 3, None, closed="left"),
Interval(RealNotInt, None, -5, closed="left"),
),
(
Interval(Integral, 3, None, closed="left"),
Interval(RealNotInt, None, -4, closed="neither"),
),
(
Interval(Integral, -5, 5, closed="both"),
Interval(RealNotInt, None, 1, closed="right"),
),
(
Interval(Integral, -5, 5, closed="both"),
Interval(RealNotInt, 1, None, closed="left"),
),
(
Interval(Integral, -5, 5, closed="both"),
Interval(RealNotInt, -10, -4, closed="neither"),
),
(
Interval(Integral, -5, 5, closed="both"),
Interval(RealNotInt, -10, -4, closed="right"),
),
(
Interval(Integral, -5, 5, closed="neither"),
Interval(RealNotInt, 6, 10, closed="neither"),
),
(
Interval(Integral, -5, 5, closed="neither"),
Interval(RealNotInt, 6, 10, closed="left"),
),
(
Interval(Integral, 2, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="both"),
),
(
Interval(Integral, 1, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="both"),
),
],
)
def test_generate_invalid_param_val_2_intervals(integer_interval, real_interval):
"""Check that the value generated for an interval constraint does not satisfy any of
the interval constraints.
"""
bad_value = generate_invalid_param_val(constraint=real_interval)
assert not real_interval.is_satisfied_by(bad_value)
assert not integer_interval.is_satisfied_by(bad_value)
bad_value = generate_invalid_param_val(constraint=integer_interval)
assert not real_interval.is_satisfied_by(bad_value)
assert not integer_interval.is_satisfied_by(bad_value)
@pytest.mark.parametrize(
"constraint",
[
_ArrayLikes(),
_InstancesOf(list),
_Callables(),
_NoneConstraint(),
_RandomStates(),
_SparseMatrices(),
_Booleans(),
Interval(Integral, None, None, closed="neither"),
],
)
def test_generate_invalid_param_val_all_valid(constraint):
"""Check that the function raises NotImplementedError when there's no invalid value
for the constraint.
"""
with pytest.raises(NotImplementedError):
generate_invalid_param_val(constraint)
@pytest.mark.parametrize(
"constraint",
[
_ArrayLikes(),
_Callables(),
_InstancesOf(list),
_NoneConstraint(),
_RandomStates(),
_SparseMatrices(),
_Booleans(),
_VerboseHelper(),
MissingValues(),
MissingValues(numeric_only=True),
StrOptions({"a", "b", "c"}),
Options(Integral, {1, 2, 3}),
Interval(Integral, None, None, closed="neither"),
Interval(Integral, 0, 10, closed="neither"),
Interval(Integral, 0, None, closed="neither"),
Interval(Integral, None, 0, closed="neither"),
Interval(Real, 0, 1, closed="neither"),
Interval(Real, 0, None, closed="both"),
Interval(Real, None, 0, closed="right"),
HasMethods("fit"),
_IterablesNotString(),
_CVObjects(),
],
)
def test_generate_valid_param(constraint):
"""Check that the value generated does satisfy the constraint."""
value = generate_valid_param(constraint)
assert constraint.is_satisfied_by(value)
@pytest.mark.parametrize(
"constraint_declaration, value",
[
(Interval(Real, 0, 1, closed="both"), 0.42),
(Interval(Integral, 0, None, closed="neither"), 42),
(StrOptions({"a", "b", "c"}), "b"),
(Options(type, {np.float32, np.float64}), np.float64),
(callable, lambda x: x + 1),
(None, None),
("array-like", [[1, 2], [3, 4]]),
("array-like", np.array([[1, 2], [3, 4]])),
("sparse matrix", csr_matrix([[1, 2], [3, 4]])),
*[
("sparse matrix", container([[1, 2], [3, 4]]))
for container in CSR_CONTAINERS
],
("random_state", 0),
("random_state", np.random.RandomState(0)),
("random_state", None),
(_Class, _Class()),
(int, 1),
(Real, 0.5),
("boolean", False),
("verbose", 1),
("nan", np.nan),
(MissingValues(), -1),
(MissingValues(), -1.0),
(MissingValues(), 2**1028),
(MissingValues(), None),
(MissingValues(), float("nan")),
(MissingValues(), np.nan),
(MissingValues(), "missing"),
(HasMethods("fit"), _Estimator(a=0)),
("cv_object", 5),
],
)
def test_is_satisfied_by(constraint_declaration, value):
"""Sanity check for the is_satisfied_by method"""
constraint = make_constraint(constraint_declaration)
assert constraint.is_satisfied_by(value)
@pytest.mark.parametrize(
"constraint_declaration, expected_constraint_class",
[
(Interval(Real, 0, 1, closed="both"), Interval),
(StrOptions({"option1", "option2"}), StrOptions),
(Options(Real, {0.42, 1.23}), Options),
("array-like", _ArrayLikes),
("sparse matrix", _SparseMatrices),
("random_state", _RandomStates),
(None, _NoneConstraint),
(callable, _Callables),
(int, _InstancesOf),
("boolean", _Booleans),
("verbose", _VerboseHelper),
(MissingValues(numeric_only=True), MissingValues),
(HasMethods("fit"), HasMethods),
("cv_object", _CVObjects),
("nan", _NanConstraint),
(np.nan, _NanConstraint),
],
)
def test_make_constraint(constraint_declaration, expected_constraint_class):
"""Check that make_constraint dispatches to the appropriate constraint class"""
constraint = make_constraint(constraint_declaration)
assert constraint.__class__ is expected_constraint_class
def test_make_constraint_unknown():
"""Check that an informative error is raised when an unknown constraint is passed"""
with pytest.raises(ValueError, match="Unknown constraint"):
make_constraint("not a valid constraint")
def test_validate_params():
"""Check that validate_params works no matter how the arguments are passed"""
with pytest.raises(
InvalidParameterError, match="The 'a' parameter of _func must be"
):
_func("wrong", c=1)
with pytest.raises(
InvalidParameterError, match="The 'b' parameter of _func must be"
):
_func(*[1, "wrong"], c=1)
with pytest.raises(
InvalidParameterError, match="The 'c' parameter of _func must be"
):
_func(1, **{"c": "wrong"})
with pytest.raises(
InvalidParameterError, match="The 'd' parameter of _func must be"
):
_func(1, c=1, d="wrong")
# check in the presence of extra positional and keyword args
with pytest.raises(
InvalidParameterError, match="The 'b' parameter of _func must be"
):
_func(0, *["wrong", 2, 3], c=4, **{"e": 5})
with pytest.raises(
InvalidParameterError, match="The 'c' parameter of _func must be"
):
_func(0, *[1, 2, 3], c="four", **{"e": 5})
def test_validate_params_missing_params():
"""Check that no error is raised when there are parameters without
constraints
"""
@validate_params({"a": [int]}, prefer_skip_nested_validation=True)
def func(a, b):
pass
func(1, 2)
def test_decorate_validated_function():
"""Check that validate_params functions can be decorated"""
decorated_function = deprecated()(_func)
with pytest.warns(FutureWarning, match="Function _func is deprecated"):
decorated_function(1, 2, c=3)
# outer decorator does not interfere with validation
with pytest.warns(FutureWarning, match="Function _func is deprecated"):
with pytest.raises(
InvalidParameterError, match=r"The 'c' parameter of _func must be"
):
decorated_function(1, 2, c="wrong")
def test_validate_params_method():
"""Check that validate_params works with methods"""
with pytest.raises(
InvalidParameterError, match="The 'a' parameter of _Class._method must be"
):
_Class()._method("wrong")
# validated method can be decorated
with pytest.warns(FutureWarning, match="Function _deprecated_method is deprecated"):
with pytest.raises(
InvalidParameterError,
match="The 'a' parameter of _Class._deprecated_method must be",
):
_Class()._deprecated_method("wrong")
def test_validate_params_estimator():
"""Check that validate_params works with Estimator instances"""
# no validation in init
est = _Estimator("wrong")
with pytest.raises(
InvalidParameterError, match="The 'a' parameter of _Estimator must be"
):
est.fit()
def test_stroptions_deprecated_subset():
"""Check that the deprecated parameter must be a subset of options."""
with pytest.raises(ValueError, match="deprecated options must be a subset"):
StrOptions({"a", "b", "c"}, deprecated={"a", "d"})
def test_hidden_constraint():
"""Check that internal constraints are not exposed in the error message."""
@validate_params(
{"param": [Hidden(list), dict]}, prefer_skip_nested_validation=True
)
def f(param):
pass
# list and dict are valid params
f({"a": 1, "b": 2, "c": 3})
f([1, 2, 3])
with pytest.raises(
InvalidParameterError, match="The 'param' parameter"
) as exc_info:
f(param="bad")
# the list option is not exposed in the error message
err_msg = str(exc_info.value)
assert "an instance of 'dict'" in err_msg
assert "an instance of 'list'" not in err_msg
def test_hidden_stroptions():
"""Check that we can have 2 StrOptions constraints, one being hidden."""
@validate_params(
{"param": [StrOptions({"auto"}), Hidden(StrOptions({"warn"}))]},
prefer_skip_nested_validation=True,
)
def f(param):
pass
# "auto" and "warn" are valid params
f("auto")
f("warn")
with pytest.raises(
InvalidParameterError, match="The 'param' parameter"
) as exc_info:
f(param="bad")
# the "warn" option is not exposed in the error message
err_msg = str(exc_info.value)
assert "auto" in err_msg
assert "warn" not in err_msg
def test_validate_params_set_param_constraints_attribute():
"""Check that the validate_params decorator properly sets the parameter constraints
as attribute of the decorated function/method.
"""
assert hasattr(_func, "_skl_parameter_constraints")
assert hasattr(_Class()._method, "_skl_parameter_constraints")
def test_boolean_constraint_deprecated_int():
"""Check that validate_params raise a deprecation message but still passes
validation when using an int for a parameter accepting a boolean.
"""
@validate_params({"param": ["boolean"]}, prefer_skip_nested_validation=True)
def f(param):
pass
# True/False and np.bool_(True/False) are valid params
f(True)
f(np.bool_(False))
def test_no_validation():
"""Check that validation can be skipped for a parameter."""
@validate_params(
{"param1": [int, None], "param2": "no_validation"},
prefer_skip_nested_validation=True,
)
def f(param1=None, param2=None):
pass
# param1 is validated
with pytest.raises(InvalidParameterError, match="The 'param1' parameter"):
f(param1="wrong")
# param2 is not validated: any type is valid.
class SomeType:
pass
f(param2=SomeType)
f(param2=SomeType())
def test_pandas_na_constraint_with_pd_na():
"""Add a specific test for checking support for `pandas.NA`."""
pd = pytest.importorskip("pandas")
na_constraint = _PandasNAConstraint()
assert na_constraint.is_satisfied_by(pd.NA)
assert not na_constraint.is_satisfied_by(np.array([1, 2, 3]))
def test_iterable_not_string():
"""Check that a string does not satisfy the _IterableNotString constraint."""
constraint = _IterablesNotString()
assert constraint.is_satisfied_by([1, 2, 3])
assert constraint.is_satisfied_by(range(10))
assert not constraint.is_satisfied_by("some string")
def test_cv_objects():
"""Check that the _CVObjects constraint accepts all current ways
to pass cv objects."""
constraint = _CVObjects()
assert constraint.is_satisfied_by(5)
assert constraint.is_satisfied_by(LeaveOneOut())
assert constraint.is_satisfied_by([([1, 2], [3, 4]), ([3, 4], [1, 2])])
assert constraint.is_satisfied_by(None)
assert not constraint.is_satisfied_by("not a CV object")
def test_third_party_estimator():
"""Check that the validation from a scikit-learn estimator inherited by a third
party estimator does not impose a match between the dict of constraints and the
parameters of the estimator.
"""
class ThirdPartyEstimator(_Estimator):
def __init__(self, b):
self.b = b
super().__init__(a=0)
def fit(self, X=None, y=None):
super().fit(X, y)
# does not raise, even though "b" is not in the constraints dict and "a" is not
# a parameter of the estimator.
ThirdPartyEstimator(b=0).fit()
def test_interval_real_not_int():
"""Check for the type RealNotInt in the Interval constraint."""
constraint = Interval(RealNotInt, 0, 1, closed="both")
assert constraint.is_satisfied_by(1.0)
assert not constraint.is_satisfied_by(1)
def test_real_not_int():
"""Check for the RealNotInt type."""
assert isinstance(1.0, RealNotInt)
assert not isinstance(1, RealNotInt)
assert isinstance(np.float64(1), RealNotInt)
assert not isinstance(np.int64(1), RealNotInt)
def test_skip_param_validation():
"""Check that param validation can be skipped using config_context."""
@validate_params({"a": [int]}, prefer_skip_nested_validation=True)
def f(a):
pass
with pytest.raises(InvalidParameterError, match="The 'a' parameter"):
f(a="1")
# does not raise
with config_context(skip_parameter_validation=True):
f(a="1")
@pytest.mark.parametrize("prefer_skip_nested_validation", [True, False])
def test_skip_nested_validation(prefer_skip_nested_validation):
"""Check that nested validation can be skipped."""
@validate_params({"a": [int]}, prefer_skip_nested_validation=True)
def f(a):
pass
@validate_params(
{"b": [int]},
prefer_skip_nested_validation=prefer_skip_nested_validation,
)
def g(b):
# calls f with a bad parameter type
return f(a="invalid_param_value")
# Validation for g is never skipped.
with pytest.raises(InvalidParameterError, match="The 'b' parameter"):
g(b="invalid_param_value")
if prefer_skip_nested_validation:
g(b=1) # does not raise because inner f is not validated
else:
with pytest.raises(InvalidParameterError, match="The 'a' parameter"):
g(b=1)
@pytest.mark.parametrize(
"skip_parameter_validation, prefer_skip_nested_validation, expected_skipped",
[
(True, True, True),
(True, False, True),
(False, True, True),
(False, False, False),
],
)
def test_skip_nested_validation_and_config_context(
skip_parameter_validation, prefer_skip_nested_validation, expected_skipped
):
"""Check interaction between global skip and local skip."""
@validate_params(
{"a": [int]}, prefer_skip_nested_validation=prefer_skip_nested_validation
)
def g(a):
return get_config()["skip_parameter_validation"]
with config_context(skip_parameter_validation=skip_parameter_validation):
actual_skipped = g(1)
assert actual_skipped == expected_skipped
|
_Estimator
|
python
|
docker__docker-py
|
tests/integration/api_image_test.py
|
{
"start": 3389,
"end": 4012
}
|
class ____(BaseAPIIntegrationTest):
def test_remove(self):
container = self.client.create_container(TEST_IMG, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.commit(id)
assert 'Id' in res
img_id = res['Id']
self.tmp_imgs.append(img_id)
logs = self.client.remove_image(img_id, force=True)
assert {"Deleted": img_id} in logs
images = self.client.images(all=True)
res = [x for x in images if x['Id'].startswith(img_id)]
assert len(res) == 0
|
RemoveImageTest
|
python
|
django-haystack__django-haystack
|
test_haystack/test_fields.py
|
{
"start": 18940,
"end": 19508
}
|
class ____(TestCase):
def test_init(self):
try:
foo = FacetCharField(model_attr="foo")
foo_exact = FacetCharField(facet_for="bar")
except:
self.fail()
self.assertEqual(foo.facet_for, None)
self.assertEqual(foo_exact.null, True)
self.assertEqual(foo_exact.facet_for, "bar")
def test_prepare(self):
mock = MockModel()
mock.user = "daniel"
author = FacetCharField(model_attr="user")
self.assertEqual(author.prepare(mock), "daniel")
|
FacetCharFieldTestCase
|
python
|
getsentry__sentry
|
tests/sentry/workflow_engine/endpoints/test_organization_available_action_index.py
|
{
"start": 1117,
"end": 22846
}
|
class ____(APITestCase):
endpoint = "sentry-api-0-organization-available-action-index"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.registry = Registry[type[ActionHandler]](enable_reverse_lookup=False)
self.registry_patcher = patch(
"sentry.workflow_engine.endpoints.organization_available_action_index.action_handler_registry",
new=self.registry,
)
self.registry_patcher.start()
self.plugin_registry = PluginManager()
self.plugins_registry_patcher = patch(
"sentry.workflow_engine.processors.action.plugins",
new=self.plugin_registry,
)
self.plugins_registry_patcher.start()
def tearDown(self) -> None:
super().tearDown()
self.registry_patcher.stop()
self.plugins_registry_patcher.stop()
def setup_email(self) -> None:
@self.registry.register(Action.Type.EMAIL)
@dataclass(frozen=True)
class EmailActionHandler(ActionHandler):
group = ActionHandler.Group.NOTIFICATION
config_schema = {}
data_schema = {}
def setup_integrations(self) -> None:
@self.registry.register(Action.Type.SLACK)
@dataclass(frozen=True)
class SlackActionHandler(IntegrationActionHandler):
group = ActionHandler.Group.NOTIFICATION
provider_slug = IntegrationProviderSlug.SLACK
config_schema = {}
data_schema = {}
token = "xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx"
self.slack_integration = self.create_integration(
organization=self.organization,
external_id="1",
name="My Slack Integration",
provider="slack",
metadata={"access_token": token, "installation_type": "born_as_bot"},
)
@self.registry.register(Action.Type.GITHUB)
@dataclass(frozen=True)
class GithubActionHandler(IntegrationActionHandler):
group = ActionHandler.Group.TICKET_CREATION
provider_slug = IntegrationProviderSlug.GITHUB
config_schema = {}
data_schema = {}
token = "xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx"
self.github_integration = self.create_integration(
organization=self.organization,
external_id="1",
name="My GitHub Integration",
provider="github",
metadata={"access_token": token, "installation_type": "born_as_bot"},
)
# should not return integrations that are not installed
@self.registry.register(Action.Type.MSTEAMS)
@dataclass(frozen=True)
class MSTeamsActionHandler(IntegrationActionHandler):
group = ActionHandler.Group.NOTIFICATION
provider_slug = IntegrationProviderSlug.MSTEAMS
config_schema = {}
data_schema = {}
def setup_integrations_with_services(self) -> None:
@self.registry.register(Action.Type.PAGERDUTY)
@dataclass(frozen=True)
class PagerdutyActionHandler(IntegrationActionHandler):
group = ActionHandler.Group.TICKET_CREATION
provider_slug = IntegrationProviderSlug.PAGERDUTY
config_schema = {}
data_schema = {}
services = [
{
"type": "service",
"integration_key": "PND4F9",
"service_id": "123",
"service_name": "moo-deng",
},
{
"type": "service",
"integration_key": "PND4F98",
"service_id": "234",
"service_name": "moo-waan",
},
]
self.pagerduty_integration, org_integration = self.create_provider_integration_for(
self.organization,
self.user,
provider="pagerduty",
name="Example PagerDuty",
external_id="example-pagerduty",
metadata={"services": services},
)
with assume_test_silo_mode(SiloMode.CONTROL):
self.pagerduty_service_1 = add_service(
org_integration,
service_name=services[0]["service_name"],
integration_key=services[0]["integration_key"],
)
self.pagerduty_service_2 = add_service(
org_integration,
service_name=services[1]["service_name"],
integration_key=services[1]["integration_key"],
)
@self.registry.register(Action.Type.OPSGENIE)
@dataclass(frozen=True)
class OpsgenieActionHandler(IntegrationActionHandler):
group = ActionHandler.Group.TICKET_CREATION
provider_slug = IntegrationProviderSlug.OPSGENIE
config_schema = {}
data_schema = {}
metadata = {
"api_key": "1234-ABCD",
"base_url": "https://api.opsgenie.com/",
"domain_name": "test-app.app.opsgenie.com",
}
self.og_team = {"id": "123-id", "team": "cool-team", "integration_key": "1234-5678"}
self.opsgenie_integration, _ = self.create_provider_integration_for(
self.organization,
self.user,
provider="opsgenie",
name="test-app",
external_id="test-app",
metadata=metadata,
)
with assume_test_silo_mode(SiloMode.CONTROL):
self.opsgenie_integration.add_organization(self.organization, self.user)
self.org_integration = OrganizationIntegration.objects.get(
organization_id=self.organization.id, integration_id=self.opsgenie_integration.id
)
self.org_integration.config = {"team_table": [self.og_team]}
self.org_integration.save()
def setup_sentry_apps(self) -> None:
@self.registry.register(Action.Type.SENTRY_APP)
@dataclass(frozen=True)
class SentryAppActionHandler(ActionHandler):
group = ActionHandler.Group.OTHER
config_schema = {}
data_schema = {}
self.no_component_sentry_app = self.create_sentry_app(
name="Poppy's Fire Sentry App",
organization=self.organization,
is_alertable=True,
)
self.no_component_sentry_app_installation = self.create_sentry_app_installation(
slug=self.no_component_sentry_app.slug, organization=self.organization
)
self.sentry_app_settings_schema = self.create_alert_rule_action_schema()
self.sentry_app = self.create_sentry_app(
name="Moo Deng's Fire Sentry App",
organization=self.organization,
schema={
"elements": [
self.sentry_app_settings_schema,
]
},
is_alertable=True,
)
self.sentry_app_installation = self.create_sentry_app_installation(
slug=self.sentry_app.slug, organization=self.organization
)
# should not return sentry apps that are not alertable
self.not_alertable_sentry_app = self.create_sentry_app(
name="Not Alertable Sentry App",
organization=self.organization,
is_alertable=False,
)
self.not_alertable_sentry_app_installation = self.create_sentry_app_installation(
slug=self.not_alertable_sentry_app.slug, organization=self.organization
)
self.not_alertable_sentry_app = self.create_sentry_app(
name="Not Alertable Sentry App With Component",
organization=self.organization,
schema={
"elements": [
self.sentry_app_settings_schema,
]
},
is_alertable=False,
)
self.not_alertable_sentry_app_with_component_installation = (
self.create_sentry_app_installation(
slug=self.not_alertable_sentry_app.slug, organization=self.organization
)
)
# should not return sentry apps that are not installed
self.create_sentry_app(
name="Bad Sentry App",
organization=self.organization,
is_alertable=True,
)
def setup_webhooks(self) -> None:
@self.registry.register(Action.Type.WEBHOOK)
@dataclass(frozen=True)
class WebhookActionHandler(ActionHandler):
group = ActionHandler.Group.OTHER
config_schema = {}
data_schema = {}
self.plugin_registry.register(WebHooksPlugin)
self.webhooks_plugin = self.plugin_registry.get(WebHooksPlugin.slug)
self.webhooks_plugin.enable(self.project)
self.plugin_registry.register(SlackPlugin)
self.slack_plugin = self.plugin_registry.get(SlackPlugin.slug)
self.slack_plugin.enable(self.project)
# each plugin should only be returned once, even if it's enabled for multiple projects
self.slack_plugin.enable(self.create_project())
# non notification plugins should not be returned
self.plugin_registry.register(TrelloPlugin)
self.trello_plugin = self.plugin_registry.get(TrelloPlugin.slug)
self.trello_plugin.enable(self.project)
# plugins that are not enabled should not be returned
self.plugin_registry.register(PagerDutyPlugin)
def test_simple(self) -> None:
self.setup_email()
response = self.get_success_response(
self.organization.slug,
status_code=200,
)
assert len(response.data) == 1
assert response.data == [
{
"type": Action.Type.EMAIL,
"handlerGroup": ActionHandler.Group.NOTIFICATION.value,
"configSchema": {},
"dataSchema": {},
}
]
def test_simple_integrations(self) -> None:
self.setup_integrations()
response = self.get_success_response(
self.organization.slug,
status_code=200,
)
assert len(response.data) == 2
assert response.data == [
# notification actions first
{
"type": Action.Type.SLACK,
"handlerGroup": ActionHandler.Group.NOTIFICATION.value,
"configSchema": {},
"dataSchema": {},
"integrations": [
{"id": str(self.slack_integration.id), "name": self.slack_integration.name}
],
},
# then ticket creation actions
{
"type": Action.Type.GITHUB,
"handlerGroup": ActionHandler.Group.TICKET_CREATION.value,
"configSchema": {},
"dataSchema": {},
"integrations": [
{"id": str(self.github_integration.id), "name": self.github_integration.name}
],
},
]
@with_feature({"organizations:integrations-ticket-rules": False})
def test_does_not_return_ticket_actions_without_feature(self) -> None:
self.setup_integrations()
response = self.get_success_response(
self.organization.slug,
status_code=200,
)
assert len(response.data) == 1
assert response.data == [
# only notification actions are returned
{
"type": Action.Type.SLACK,
"handlerGroup": ActionHandler.Group.NOTIFICATION.value,
"configSchema": {},
"dataSchema": {},
"integrations": [
{"id": str(self.slack_integration.id), "name": self.slack_integration.name}
],
}
]
def test_integrations_with_services(self) -> None:
self.setup_integrations_with_services()
response = self.get_success_response(
self.organization.slug,
status_code=200,
)
assert len(response.data) == 2
assert response.data == [
{
"type": Action.Type.OPSGENIE,
"handlerGroup": ActionHandler.Group.TICKET_CREATION.value,
"configSchema": {},
"dataSchema": {},
"integrations": [
{
"id": str(self.opsgenie_integration.id),
"name": self.opsgenie_integration.name,
"services": [
{
"id": str(self.og_team["id"]),
"name": self.og_team["team"],
},
],
}
],
},
{
"type": Action.Type.PAGERDUTY,
"handlerGroup": ActionHandler.Group.TICKET_CREATION.value,
"configSchema": {},
"dataSchema": {},
"integrations": [
{
"id": str(self.pagerduty_integration.id),
"name": self.pagerduty_integration.name,
"services": [
{
"id": str(self.pagerduty_service_1["id"]),
"name": self.pagerduty_service_1["service_name"],
},
{
"id": str(self.pagerduty_service_2["id"]),
"name": self.pagerduty_service_2["service_name"],
},
],
},
],
},
]
@patch("sentry.sentry_apps.components.SentryAppComponentPreparer.run")
def test_sentry_apps(self, mock_sentry_app_component_preparer: MagicMock) -> None:
self.setup_sentry_apps()
response = self.get_success_response(
self.organization.slug,
status_code=200,
)
# should only return the sentry app with a component
assert len(response.data) == 1
assert response.data == [
{
"type": Action.Type.SENTRY_APP,
"handlerGroup": ActionHandler.Group.OTHER.value,
"configSchema": {},
"dataSchema": {},
"sentryApp": {
"id": str(self.sentry_app.id),
"name": self.sentry_app.name,
"installationId": str(self.sentry_app_installation.id),
"installationUuid": str(self.sentry_app_installation.uuid),
"status": SentryAppStatus.as_str(self.sentry_app.status),
"settings": self.sentry_app_settings_schema["settings"],
"title": self.sentry_app_settings_schema["title"],
},
},
]
@patch(
"sentry.workflow_engine.endpoints.organization_available_action_index.prepare_ui_component"
)
def test_sentry_apps_filters_failed_component_preparation(
self, mock_prepare_ui_component: MagicMock
) -> None:
"""Test that sentry apps whose components fail to prepare are filtered out"""
self.setup_sentry_apps()
# make prepare_ui_component return None to simulate a broken app
mock_prepare_ui_component.return_value = None
response = self.get_success_response(
self.organization.slug,
status_code=200,
)
# verify prepare_ui_component was called
assert mock_prepare_ui_component.called
# should return no sentry apps since component preparation failed
assert len(response.data) == 0
def test_webhooks(self) -> None:
self.setup_webhooks()
response = self.get_success_response(
self.organization.slug,
status_code=200,
)
assert len(response.data) == 1
assert response.data == [
{
"type": Action.Type.WEBHOOK,
"handlerGroup": ActionHandler.Group.OTHER.value,
"configSchema": {},
"dataSchema": {},
"services": [
{"slug": "slack", "name": "(Legacy) Slack"},
{"slug": "webhooks", "name": "WebHooks"},
],
}
]
@patch("sentry.sentry_apps.components.SentryAppComponentPreparer.run")
def test_actions_sorting(self, mock_sentry_app_component_preparer: MagicMock) -> None:
self.setup_sentry_apps()
self.setup_integrations()
self.setup_integrations_with_services()
self.setup_webhooks()
self.setup_email()
@self.registry.register(Action.Type.PLUGIN)
@dataclass(frozen=True)
class PluginActionHandler(ActionHandler):
group = ActionHandler.Group.OTHER
config_schema = {}
data_schema = {}
response = self.get_success_response(
self.organization.slug,
status_code=200,
)
assert len(response.data) == 8
assert response.data == [
# notification actions, sorted alphabetically with email first
{
"type": Action.Type.EMAIL,
"handlerGroup": ActionHandler.Group.NOTIFICATION.value,
"configSchema": {},
"dataSchema": {},
},
{
"type": Action.Type.SLACK,
"handlerGroup": ActionHandler.Group.NOTIFICATION.value,
"configSchema": {},
"dataSchema": {},
"integrations": [
{"id": str(self.slack_integration.id), "name": self.slack_integration.name}
],
},
# other actions, non sentry app actions first then sentry apps sorted alphabetically by name
{
"type": Action.Type.PLUGIN,
"handlerGroup": ActionHandler.Group.OTHER.value,
"configSchema": {},
"dataSchema": {},
},
# webhook action should include sentry apps without components
{
"type": Action.Type.WEBHOOK,
"handlerGroup": ActionHandler.Group.OTHER.value,
"configSchema": {},
"dataSchema": {},
"services": [
{"slug": "slack", "name": "(Legacy) Slack"},
{
"slug": self.no_component_sentry_app.slug,
"name": self.no_component_sentry_app.name,
},
{"slug": "webhooks", "name": "WebHooks"},
],
},
{
"type": Action.Type.SENTRY_APP,
"handlerGroup": ActionHandler.Group.OTHER.value,
"configSchema": {},
"dataSchema": {},
"sentryApp": {
"id": str(self.sentry_app.id),
"name": self.sentry_app.name,
"installationId": str(self.sentry_app_installation.id),
"installationUuid": str(self.sentry_app_installation.uuid),
"status": SentryAppStatus.as_str(self.sentry_app.status),
"settings": self.sentry_app_settings_schema["settings"],
"title": self.sentry_app_settings_schema["title"],
},
},
# ticket creation actions, sorted alphabetically
{
"type": Action.Type.GITHUB,
"handlerGroup": ActionHandler.Group.TICKET_CREATION.value,
"configSchema": {},
"dataSchema": {},
"integrations": [
{"id": str(self.github_integration.id), "name": self.github_integration.name}
],
},
{
"type": Action.Type.OPSGENIE,
"handlerGroup": ActionHandler.Group.TICKET_CREATION.value,
"configSchema": {},
"dataSchema": {},
"integrations": [
{
"id": str(self.opsgenie_integration.id),
"name": self.opsgenie_integration.name,
"services": [
{
"id": str(self.og_team["id"]),
"name": self.og_team["team"],
},
],
}
],
},
{
"type": Action.Type.PAGERDUTY,
"handlerGroup": ActionHandler.Group.TICKET_CREATION.value,
"configSchema": {},
"dataSchema": {},
"integrations": [
{
"id": str(self.pagerduty_integration.id),
"name": self.pagerduty_integration.name,
"services": [
{
"id": str(self.pagerduty_service_1["id"]),
"name": self.pagerduty_service_1["service_name"],
},
{
"id": str(self.pagerduty_service_2["id"]),
"name": self.pagerduty_service_2["service_name"],
},
],
}
],
},
]
|
OrganizationAvailableActionAPITestCase
|
python
|
ray-project__ray
|
python/ray/serve/_private/logging_utils.py
|
{
"start": 1192,
"end": 1409
}
|
class ____(CoreContextFilter):
def filter(self, record: logging.LogRecord) -> bool:
if should_skip_context_filter(record):
return True
return super().filter(record)
|
ServeCoreContextFilter
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.