language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
huggingface__transformers
|
src/transformers/models/vit_msn/modeling_vit_msn.py
|
{
"start": 15266,
"end": 16674
}
|
class ____(PreTrainedModel):
config: ViTMSNConfig
base_model_prefix = "vit"
main_input_name = "pixel_values"
input_modalities = ("image",)
supports_gradient_checkpointing = True
_no_split_modules = ["ViTMSNAttention", "ViTMSNSdpaAttention"]
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": ViTMSNLayer,
"attentions": ViTMSNSelfAttention,
}
# todo: Resort to https://github.com/facebookresearch/msn/blob/main/src/deit.py#L200-#L211
# when creating pre-training scripts.
@torch.no_grad()
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
elif isinstance(module, ViTMSNEmbeddings):
init.zeros_(module.cls_token)
init.zeros_(module.position_embeddings)
if module.mask_token is not None:
init.zeros_(module.mask_token)
@auto_docstring
|
ViTMSNPreTrainedModel
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/join_merge.py
|
{
"start": 10963,
"end": 11984
}
|
class ____:
params = [
[
("ns", "ns"),
("ms", "ms"),
("ns", "ms"),
],
[None, "Europe/Brussels"],
[True, False],
]
param_names = ["units", "tz", "monotonic"]
def setup(self, units, tz, monotonic):
unit_left, unit_right = units
N = 10_000
keys = Series(date_range("2012-01-01", freq="min", periods=N, tz=tz))
self.left = DataFrame(
{
"key": keys.sample(N * 10, replace=True).dt.as_unit(unit_left),
"value1": np.random.randn(N * 10),
}
)
self.right = DataFrame(
{
"key": keys[:8000].dt.as_unit(unit_right),
"value2": np.random.randn(8000),
}
)
if monotonic:
self.left = self.left.sort_values("key")
self.right = self.right.sort_values("key")
def time_merge(self, units, tz, monotonic):
merge(self.left, self.right)
|
MergeDatetime
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/lib/bedrock/_beta_messages.py
|
{
"start": 496,
"end": 1376
}
|
class ____(SyncAPIResource):
create = FirstPartyMessagesAPI.create
@cached_property
def with_raw_response(self) -> MessagesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return the
the raw response object instead of the parsed content.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
"""
return MessagesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> MessagesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
"""
return MessagesWithStreamingResponse(self)
|
Messages
|
python
|
numba__numba
|
numba/tests/test_datamodel.py
|
{
"start": 1782,
"end": 1873
}
|
class ____(test_factory()):
fe_type = types.Array(types.int32, 0, 'C')
|
Test0DArrayOfInt32
|
python
|
pandas-dev__pandas
|
pandas/tests/frame/test_arithmetic.py
|
{
"start": 43314,
"end": 74927
}
|
class ____:
def test_frame_add_tz_mismatch_converts_to_utc(self):
rng = pd.date_range("1/1/2011", periods=10, freq="h", tz="US/Eastern")
df = DataFrame(
np.random.default_rng(2).standard_normal(len(rng)), index=rng, columns=["a"]
)
df_moscow = df.tz_convert("Europe/Moscow")
result = df + df_moscow
assert result.index.tz is timezone.utc
result = df_moscow + df
assert result.index.tz is timezone.utc
def test_align_frame(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="Y")
ts = DataFrame(
np.random.default_rng(2).standard_normal((len(rng), 3)), index=rng
)
result = ts + ts[::2]
expected = ts + ts
expected.iloc[1::2] = np.nan
tm.assert_frame_equal(result, expected)
half = ts[::2]
result = ts + half.take(np.random.default_rng(2).permutation(len(half)))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"op", [operator.add, operator.sub, operator.mul, operator.truediv]
)
def test_operators_none_as_na(self, op):
df = DataFrame(
{"col1": [2, 5.0, 123, None], "col2": [1, 2, 3, 4]}, dtype=object
)
# since filling converts dtypes from object, changed expected to be
# object
filled = df.fillna(np.nan)
result = op(df, 3)
expected = op(filled, 3).astype(object)
expected[pd.isna(expected)] = np.nan
tm.assert_frame_equal(result, expected)
result = op(df, df)
expected = op(filled, filled).astype(object)
expected[pd.isna(expected)] = np.nan
tm.assert_frame_equal(result, expected)
result = op(df, df.fillna(7))
tm.assert_frame_equal(result, expected)
result = op(df.fillna(7), df)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("op,res", [("__eq__", False), ("__ne__", True)])
# TODO: not sure what's correct here.
@pytest.mark.filterwarnings("ignore:elementwise:FutureWarning")
def test_logical_typeerror_with_non_valid(self, op, res, float_frame):
# we are comparing floats vs a string
result = getattr(float_frame, op)("foo")
assert bool(result.all().all()) is res
@pytest.mark.parametrize("op", ["add", "sub", "mul", "div", "truediv"])
def test_binary_ops_align(self, op):
# test aligning binary ops
# GH 6681
index = MultiIndex.from_product(
[list("abc"), ["one", "two", "three"], [1, 2, 3]],
names=["first", "second", "third"],
)
df = DataFrame(
np.arange(27 * 3).reshape(27, 3),
index=index,
columns=["value1", "value2", "value3"],
).sort_index()
idx = pd.IndexSlice
opa = getattr(operator, op, None)
if opa is None:
return
x = Series([1.0, 10.0, 100.0], [1, 2, 3])
result = getattr(df, op)(x, level="third", axis=0)
expected = pd.concat(
[opa(df.loc[idx[:, :, i], :], v) for i, v in x.items()]
).sort_index()
tm.assert_frame_equal(result, expected)
x = Series([1.0, 10.0], ["two", "three"])
result = getattr(df, op)(x, level="second", axis=0)
expected = (
pd.concat([opa(df.loc[idx[:, i], :], v) for i, v in x.items()])
.reindex_like(df)
.sort_index()
)
tm.assert_frame_equal(result, expected)
def test_binary_ops_align_series_dataframe(self):
# GH9463 (alignment level of dataframe with series)
midx = MultiIndex.from_product([["A", "B"], ["a", "b"]])
df = DataFrame(np.ones((2, 4), dtype="int64"), columns=midx)
s = Series({"a": 1, "b": 2})
df2 = df.copy()
df2.columns.names = ["lvl0", "lvl1"]
s2 = s.copy()
s2.index.name = "lvl1"
# different cases of integer/string level names:
res1 = df.mul(s, axis=1, level=1)
res2 = df.mul(s2, axis=1, level=1)
res3 = df2.mul(s, axis=1, level=1)
res4 = df2.mul(s2, axis=1, level=1)
res5 = df2.mul(s, axis=1, level="lvl1")
res6 = df2.mul(s2, axis=1, level="lvl1")
exp = DataFrame(
np.array([[1, 2, 1, 2], [1, 2, 1, 2]], dtype="int64"), columns=midx
)
for res in [res1, res2]:
tm.assert_frame_equal(res, exp)
exp.columns.names = ["lvl0", "lvl1"]
for res in [res3, res4, res5, res6]:
tm.assert_frame_equal(res, exp)
def test_add_with_dti_mismatched_tzs(self):
base = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="UTC")
idx1 = base.tz_convert("Asia/Tokyo")[:2]
idx2 = base.tz_convert("US/Eastern")[1:]
df1 = DataFrame({"A": [1, 2]}, index=idx1)
df2 = DataFrame({"A": [1, 1]}, index=idx2)
exp = DataFrame({"A": [np.nan, 3, np.nan]}, index=base)
tm.assert_frame_equal(df1 + df2, exp)
def test_combineFrame(self, float_frame, mixed_float_frame, mixed_int_frame):
frame_copy = float_frame.reindex(float_frame.index[::2])
del frame_copy["D"]
# adding NAs to first 5 values of column "C"
frame_copy.loc[: frame_copy.index[4], "C"] = np.nan
added = float_frame + frame_copy
indexer = added["A"].dropna().index
exp = (float_frame["A"] * 2).copy()
tm.assert_series_equal(added["A"].dropna(), exp.loc[indexer])
exp.loc[~exp.index.isin(indexer)] = np.nan
tm.assert_series_equal(added["A"], exp.loc[added["A"].index])
assert np.isnan(added["C"].reindex(frame_copy.index)[:5]).all()
# assert(False)
assert np.isnan(added["D"]).all()
self_added = float_frame + float_frame
tm.assert_index_equal(self_added.index, float_frame.index)
added_rev = frame_copy + float_frame
assert np.isnan(added["D"]).all()
assert np.isnan(added_rev["D"]).all()
# corner cases
# empty
plus_empty = float_frame + DataFrame()
assert np.isnan(plus_empty.values).all()
empty_plus = DataFrame() + float_frame
assert np.isnan(empty_plus.values).all()
empty_empty = DataFrame() + DataFrame()
assert empty_empty.empty
# out of order
reverse = float_frame.reindex(columns=float_frame.columns[::-1])
tm.assert_frame_equal(reverse + float_frame, float_frame * 2)
# mix vs float64, upcast
added = float_frame + mixed_float_frame
_check_mixed_float(added, dtype="float64")
added = mixed_float_frame + float_frame
_check_mixed_float(added, dtype="float64")
# mix vs mix
added = mixed_float_frame + mixed_float_frame
_check_mixed_float(added, dtype={"C": None})
# with int
added = float_frame + mixed_int_frame
_check_mixed_float(added, dtype="float64")
def test_combine_series(self, float_frame, mixed_float_frame, mixed_int_frame):
# Series
series = float_frame.xs(float_frame.index[0])
added = float_frame + series
for key, s in added.items():
tm.assert_series_equal(s, float_frame[key] + series[key])
larger_series = series.to_dict()
larger_series["E"] = 1
larger_series = Series(larger_series)
larger_added = float_frame + larger_series
for key, s in float_frame.items():
tm.assert_series_equal(larger_added[key], s + series[key])
assert "E" in larger_added
assert np.isnan(larger_added["E"]).all()
# no upcast needed
added = mixed_float_frame + series
assert np.all(added.dtypes == series.dtype)
# vs mix (upcast) as needed
added = mixed_float_frame + series.astype("float32")
_check_mixed_float(added, dtype={"C": None})
added = mixed_float_frame + series.astype("float16")
_check_mixed_float(added, dtype={"C": None})
# these used to raise with numexpr as we are adding an int64 to an
# uint64....weird vs int
added = mixed_int_frame + (100 * series).astype("int64")
_check_mixed_int(
added, dtype={"A": "int64", "B": "float64", "C": "int64", "D": "int64"}
)
added = mixed_int_frame + (100 * series).astype("int32")
_check_mixed_int(
added, dtype={"A": "int32", "B": "float64", "C": "int32", "D": "int64"}
)
def test_combine_timeseries(self, datetime_frame):
# TimeSeries
ts = datetime_frame["A"]
# 10890
# we no longer allow auto timeseries broadcasting
# and require explicit broadcasting
added = datetime_frame.add(ts, axis="index")
for key, col in datetime_frame.items():
result = col + ts
tm.assert_series_equal(added[key], result, check_names=False)
assert added[key].name == key
if col.name == ts.name:
assert result.name == "A"
else:
assert result.name is None
smaller_frame = datetime_frame[:-5]
smaller_added = smaller_frame.add(ts, axis="index")
tm.assert_index_equal(smaller_added.index, datetime_frame.index)
smaller_ts = ts[:-5]
smaller_added2 = datetime_frame.add(smaller_ts, axis="index")
tm.assert_frame_equal(smaller_added, smaller_added2)
# length 0, result is all-nan
result = datetime_frame.add(ts[:0], axis="index")
expected = DataFrame(
np.nan, index=datetime_frame.index, columns=datetime_frame.columns
)
tm.assert_frame_equal(result, expected)
# Frame is all-nan
result = datetime_frame[:0].add(ts, axis="index")
expected = DataFrame(
np.nan, index=datetime_frame.index, columns=datetime_frame.columns
)
tm.assert_frame_equal(result, expected)
# empty but with non-empty index
frame = datetime_frame[:1].reindex(columns=[])
result = frame.mul(ts, axis="index")
assert len(result) == len(ts)
def test_combineFunc(self, float_frame, mixed_float_frame):
result = float_frame * 2
tm.assert_numpy_array_equal(result.values, float_frame.values * 2)
# vs mix
result = mixed_float_frame * 2
for c, s in result.items():
tm.assert_numpy_array_equal(s.values, mixed_float_frame[c].values * 2)
_check_mixed_float(result, dtype={"C": None})
result = DataFrame() * 2
assert result.index.equals(DataFrame().index)
assert len(result.columns) == 0
@pytest.mark.parametrize(
"func",
[operator.eq, operator.ne, operator.lt, operator.gt, operator.ge, operator.le],
)
def test_comparisons(self, simple_frame, float_frame, func):
df1 = DataFrame(
np.random.default_rng(2).standard_normal((30, 4)),
columns=Index(list("ABCD"), dtype=object),
index=pd.date_range("2000-01-01", periods=30, freq="B"),
)
df2 = df1.copy()
row = simple_frame.xs("a")
ndim_5 = np.ones(df1.shape + (1, 1, 1))
result = func(df1, df2)
tm.assert_numpy_array_equal(result.values, func(df1.values, df2.values))
msg = (
"Unable to coerce to Series/DataFrame, "
"dimension must be <= 2: (30, 4, 1, 1, 1)"
)
with pytest.raises(ValueError, match=re.escape(msg)):
func(df1, ndim_5)
result2 = func(simple_frame, row)
tm.assert_numpy_array_equal(
result2.values, func(simple_frame.values, row.values)
)
result3 = func(float_frame, 0)
tm.assert_numpy_array_equal(result3.values, func(float_frame.values, 0))
msg = (
r"Can only compare identically-labeled \(both index and columns\) "
"DataFrame objects"
)
with pytest.raises(ValueError, match=msg):
func(simple_frame, simple_frame[:2])
def test_strings_to_numbers_comparisons_raises(self, compare_operators_no_eq_ne):
# GH 11565
df = DataFrame(
{x: {"x": "foo", "y": "bar", "z": "baz"} for x in ["a", "b", "c"]}
)
f = getattr(operator, compare_operators_no_eq_ne)
msg = "|".join(
[
"'[<>]=?' not supported between instances of 'str' and 'int'",
"Invalid comparison between dtype=str and int",
]
)
with pytest.raises(TypeError, match=msg):
f(df, 0)
def test_comparison_protected_from_errstate(self):
missing_df = DataFrame(
np.ones((10, 4), dtype=np.float64),
columns=Index(list("ABCD"), dtype=object),
)
missing_df.loc[missing_df.index[0], "A"] = np.nan
with np.errstate(invalid="ignore"):
expected = missing_df.values < 0
with np.errstate(invalid="raise"):
result = (missing_df < 0).values
tm.assert_numpy_array_equal(result, expected)
def test_boolean_comparison(self):
# GH 4576
# boolean comparisons with a tuple/list give unexpected results
df = DataFrame(np.arange(6).reshape((3, 2)))
b = np.array([2, 2])
b_r = np.atleast_2d([2, 2])
b_c = b_r.T
lst = [2, 2, 2]
tup = tuple(lst)
# gt
expected = DataFrame([[False, False], [False, True], [True, True]])
result = df > b
tm.assert_frame_equal(result, expected)
result = df.values > b
tm.assert_numpy_array_equal(result, expected.values)
msg1d = "Unable to coerce to Series, length must be 2: given 3"
msg2d = "Unable to coerce to DataFrame, shape must be"
msg2db = "operands could not be broadcast together with shapes"
with pytest.raises(ValueError, match=msg1d):
# wrong shape
df > lst
with pytest.raises(ValueError, match=msg1d):
# wrong shape
df > tup
# broadcasts like ndarray (GH#23000)
result = df > b_r
tm.assert_frame_equal(result, expected)
result = df.values > b_r
tm.assert_numpy_array_equal(result, expected.values)
with pytest.raises(ValueError, match=msg2d):
df > b_c
with pytest.raises(ValueError, match=msg2db):
df.values > b_c
# ==
expected = DataFrame([[False, False], [True, False], [False, False]])
result = df == b
tm.assert_frame_equal(result, expected)
with pytest.raises(ValueError, match=msg1d):
df == lst
with pytest.raises(ValueError, match=msg1d):
df == tup
# broadcasts like ndarray (GH#23000)
result = df == b_r
tm.assert_frame_equal(result, expected)
result = df.values == b_r
tm.assert_numpy_array_equal(result, expected.values)
with pytest.raises(ValueError, match=msg2d):
df == b_c
assert df.values.shape != b_c.shape
# with alignment
df = DataFrame(
np.arange(6).reshape((3, 2)), columns=list("AB"), index=list("abc")
)
expected.index = df.index
expected.columns = df.columns
with pytest.raises(ValueError, match=msg1d):
df == lst
with pytest.raises(ValueError, match=msg1d):
df == tup
def test_inplace_ops_alignment(self):
# inplace ops / ops alignment
# GH 8511
columns = list("abcdefg")
X_orig = DataFrame(
np.arange(10 * len(columns)).reshape(-1, len(columns)),
columns=columns,
index=range(10),
)
Z = 100 * X_orig.iloc[:, 1:-1].copy()
block1 = list("bedcf")
subs = list("bcdef")
# add
X = X_orig.copy()
result1 = (X[block1] + Z).reindex(columns=subs)
X[block1] += Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] + Z[block1]).reindex(columns=subs)
X[block1] += Z[block1]
result4 = X.reindex(columns=subs)
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result1, result3)
tm.assert_frame_equal(result1, result4)
# sub
X = X_orig.copy()
result1 = (X[block1] - Z).reindex(columns=subs)
X[block1] -= Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] - Z[block1]).reindex(columns=subs)
X[block1] -= Z[block1]
result4 = X.reindex(columns=subs)
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result1, result3)
tm.assert_frame_equal(result1, result4)
def test_inplace_ops_identity(self):
# GH 5104
# make sure that we are actually changing the object
s_orig = Series([1, 2, 3])
df_orig = DataFrame(
np.random.default_rng(2).integers(0, 5, size=10).reshape(-1, 5)
)
# no dtype change
s = s_orig.copy()
s2 = s
s += 1
tm.assert_series_equal(s, s2)
tm.assert_series_equal(s_orig + 1, s)
assert s is s2
assert s._mgr is s2._mgr
df = df_orig.copy()
df2 = df
df += 1
tm.assert_frame_equal(df, df2)
tm.assert_frame_equal(df_orig + 1, df)
assert df is df2
assert df._mgr is df2._mgr
# dtype change
s = s_orig.copy()
s2 = s
s += 1.5
tm.assert_series_equal(s, s2)
tm.assert_series_equal(s_orig + 1.5, s)
df = df_orig.copy()
df2 = df
df += 1.5
tm.assert_frame_equal(df, df2)
tm.assert_frame_equal(df_orig + 1.5, df)
assert df is df2
assert df._mgr is df2._mgr
# mixed dtype
arr = np.random.default_rng(2).integers(0, 10, size=5)
df_orig = DataFrame({"A": arr.copy(), "B": "foo"})
df = df_orig.copy()
df2 = df
df["A"] += 1
expected = DataFrame({"A": arr.copy() + 1, "B": "foo"})
tm.assert_frame_equal(df, expected)
tm.assert_frame_equal(df2, expected)
assert df._mgr is df2._mgr
df = df_orig.copy()
df2 = df
df["A"] += 1.5
expected = DataFrame({"A": arr.copy() + 1.5, "B": "foo"})
tm.assert_frame_equal(df, expected)
tm.assert_frame_equal(df2, expected)
assert df._mgr is df2._mgr
@pytest.mark.parametrize(
"op",
[
"add",
"and",
pytest.param(
"div",
marks=pytest.mark.xfail(
raises=AttributeError, reason="__idiv__ not implemented"
),
),
"floordiv",
"mod",
"mul",
"or",
"pow",
"sub",
"truediv",
"xor",
],
)
def test_inplace_ops_identity2(self, op):
df = DataFrame({"a": [1.0, 2.0, 3.0], "b": [1, 2, 3]})
operand = 2
if op in ("and", "or", "xor"):
# cannot use floats for boolean ops
df["a"] = [True, False, True]
df_copy = df.copy()
iop = f"__i{op}__"
op = f"__{op}__"
# no id change and value is correct
getattr(df, iop)(operand)
expected = getattr(df_copy, op)(operand)
tm.assert_frame_equal(df, expected)
expected = id(df)
assert id(df) == expected
@pytest.mark.parametrize(
"val",
[
[1, 2, 3],
(1, 2, 3),
np.array([1, 2, 3], dtype=np.int64),
range(1, 4),
],
)
def test_alignment_non_pandas(self, val):
index = ["A", "B", "C"]
columns = ["X", "Y", "Z"]
df = DataFrame(
np.random.default_rng(2).standard_normal((3, 3)),
index=index,
columns=columns,
)
align = DataFrame._align_for_op
expected = DataFrame({"X": val, "Y": val, "Z": val}, index=df.index)
tm.assert_frame_equal(align(df, val, axis=0)[1], expected)
expected = DataFrame(
{"X": [1, 1, 1], "Y": [2, 2, 2], "Z": [3, 3, 3]}, index=df.index
)
tm.assert_frame_equal(align(df, val, axis=1)[1], expected)
@pytest.mark.parametrize("val", [[1, 2], (1, 2), np.array([1, 2]), range(1, 3)])
def test_alignment_non_pandas_length_mismatch(self, val):
index = ["A", "B", "C"]
columns = ["X", "Y", "Z"]
df = DataFrame(
np.random.default_rng(2).standard_normal((3, 3)),
index=index,
columns=columns,
)
align = DataFrame._align_for_op
# length mismatch
msg = "Unable to coerce to Series, length must be 3: given 2"
with pytest.raises(ValueError, match=msg):
align(df, val, axis=0)
with pytest.raises(ValueError, match=msg):
align(df, val, axis=1)
def test_alignment_non_pandas_index_columns(self):
index = ["A", "B", "C"]
columns = ["X", "Y", "Z"]
df = DataFrame(
np.random.default_rng(2).standard_normal((3, 3)),
index=index,
columns=columns,
)
align = DataFrame._align_for_op
val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
tm.assert_frame_equal(
align(df, val, axis=0)[1],
DataFrame(val, index=df.index, columns=df.columns),
)
tm.assert_frame_equal(
align(df, val, axis=1)[1],
DataFrame(val, index=df.index, columns=df.columns),
)
# shape mismatch
msg = "Unable to coerce to DataFrame, shape must be"
val = np.array([[1, 2, 3], [4, 5, 6]])
with pytest.raises(ValueError, match=msg):
align(df, val, axis=0)
with pytest.raises(ValueError, match=msg):
align(df, val, axis=1)
val = np.zeros((3, 3, 3))
msg = re.escape(
"Unable to coerce to Series/DataFrame, dimension must be <= 2: (3, 3, 3)"
)
with pytest.raises(ValueError, match=msg):
align(df, val, axis=0)
with pytest.raises(ValueError, match=msg):
align(df, val, axis=1)
def test_no_warning(self, all_arithmetic_operators):
df = DataFrame({"A": [0.0, 0.0], "B": [0.0, None]})
b = df["B"]
with tm.assert_produces_warning(None):
getattr(df, all_arithmetic_operators)(b)
def test_dunder_methods_binary(self, all_arithmetic_operators):
# GH#??? frame.__foo__ should only accept one argument
df = DataFrame({"A": [0.0, 0.0], "B": [0.0, None]})
b = df["B"]
with pytest.raises(TypeError, match="takes 2 positional arguments"):
getattr(df, all_arithmetic_operators)(b, 0)
def test_align_int_fill_bug(self):
# GH#910
X = np.arange(10 * 10, dtype="float64").reshape(10, 10)
Y = np.ones((10, 1), dtype=int)
df1 = DataFrame(X)
df1["0.X"] = Y.squeeze()
df2 = df1.astype(float)
result = df1 - df1.mean()
expected = df2 - df2.mean()
tm.assert_frame_equal(result, expected)
def test_pow_with_realignment():
# GH#32685 pow has special semantics for operating with null values
left = DataFrame({"A": [0, 1, 2]})
right = DataFrame(index=[0, 1, 2])
result = left**right
expected = DataFrame({"A": [np.nan, 1.0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_dataframe_series_extension_dtypes():
# https://github.com/pandas-dev/pandas/issues/34311
df = DataFrame(
np.random.default_rng(2).integers(0, 100, (10, 3)), columns=["a", "b", "c"]
)
ser = Series([1, 2, 3], index=["a", "b", "c"])
expected = df.to_numpy("int64") + ser.to_numpy("int64").reshape(-1, 3)
expected = DataFrame(expected, columns=df.columns, dtype="Int64")
df_ea = df.astype("Int64")
result = df_ea + ser
tm.assert_frame_equal(result, expected)
result = df_ea + ser.astype("Int64")
tm.assert_frame_equal(result, expected)
def test_dataframe_blockwise_slicelike():
# GH#34367
arr = np.random.default_rng(2).integers(0, 1000, (100, 10))
df1 = DataFrame(arr)
# Explicit cast to float to avoid implicit cast when setting nan
df2 = df1.copy().astype({1: "float", 3: "float", 7: "float"})
df2.iloc[0, [1, 3, 7]] = np.nan
# Explicit cast to float to avoid implicit cast when setting nan
df3 = df1.copy().astype({5: "float"})
df3.iloc[0, [5]] = np.nan
# Explicit cast to float to avoid implicit cast when setting nan
df4 = df1.copy().astype({2: "float", 3: "float", 4: "float"})
df4.iloc[0, np.arange(2, 5)] = np.nan
# Explicit cast to float to avoid implicit cast when setting nan
df5 = df1.copy().astype({4: "float", 5: "float", 6: "float"})
df5.iloc[0, np.arange(4, 7)] = np.nan
for left, right in [(df1, df2), (df2, df3), (df4, df5)]:
res = left + right
expected = DataFrame({i: left[i] + right[i] for i in left.columns})
tm.assert_frame_equal(res, expected)
@pytest.mark.parametrize(
"df, col_dtype",
[
(DataFrame([[1.0, 2.0], [4.0, 5.0]], columns=list("ab")), "float64"),
(
DataFrame([[1.0, "b"], [4.0, "b"]], columns=list("ab")).astype(
{"b": object}
),
"object",
),
],
)
def test_dataframe_operation_with_non_numeric_types(df, col_dtype):
# GH #22663
expected = DataFrame([[0.0, np.nan], [3.0, np.nan]], columns=list("ab"))
expected = expected.astype({"b": col_dtype})
result = df + Series([-1.0], index=list("a"))
tm.assert_frame_equal(result, expected)
def test_arith_reindex_with_duplicates():
# https://github.com/pandas-dev/pandas/issues/35194
df1 = DataFrame(data=[[0]], columns=["second"])
df2 = DataFrame(data=[[0, 0, 0]], columns=["first", "second", "second"])
result = df1 + df2
expected = DataFrame([[np.nan, 0, 0]], columns=["first", "second", "second"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("to_add", [[Series([1, 1])], [Series([1, 1]), Series([1, 1])]])
def test_arith_list_of_arraylike_raise(to_add):
# GH 36702. Raise when trying to add list of array-like to DataFrame
df = DataFrame({"x": [1, 2], "y": [1, 2]})
msg = f"Unable to coerce list of {type(to_add[0])} to Series/DataFrame"
with pytest.raises(ValueError, match=msg):
df + to_add
with pytest.raises(ValueError, match=msg):
to_add + df
def test_inplace_arithmetic_series_update():
# https://github.com/pandas-dev/pandas/issues/36373
df = DataFrame({"A": [1, 2, 3]})
df_orig = df.copy()
series = df["A"]
vals = series._values
series += 1
assert series._values is not vals
tm.assert_frame_equal(df, df_orig)
def test_arithmetic_multiindex_align():
"""
Regression test for: https://github.com/pandas-dev/pandas/issues/33765
"""
df1 = DataFrame(
[[1]],
index=["a"],
columns=MultiIndex.from_product([[0], [1]], names=["a", "b"]),
)
df2 = DataFrame([[1]], index=["a"], columns=Index([0], name="a"))
expected = DataFrame(
[[0]],
index=["a"],
columns=MultiIndex.from_product([[0], [1]], names=["a", "b"]),
)
result = df1 - df2
tm.assert_frame_equal(result, expected)
def test_arithmetic_multiindex_column_align():
# GH#60498
df1 = DataFrame(
data=100,
columns=MultiIndex.from_product(
[["1A", "1B"], ["2A", "2B"]], names=["Lev1", "Lev2"]
),
index=["C1", "C2"],
)
df2 = DataFrame(
data=np.array([[0.1, 0.25], [0.2, 0.45]]),
columns=MultiIndex.from_product([["1A", "1B"]], names=["Lev1"]),
index=["C1", "C2"],
)
expected = DataFrame(
data=np.array([[10.0, 10.0, 25.0, 25.0], [20.0, 20.0, 45.0, 45.0]]),
columns=MultiIndex.from_product(
[["1A", "1B"], ["2A", "2B"]], names=["Lev1", "Lev2"]
),
index=["C1", "C2"],
)
result = df1 * df2
tm.assert_frame_equal(result, expected)
def test_arithmetic_multiindex_column_align_with_fillvalue():
# GH#60903
df1 = DataFrame(
data=[[1.0, 2.0]],
columns=MultiIndex.from_tuples([("A", "one"), ("A", "two")]),
)
df2 = DataFrame(
data=[[3.0, 4.0]],
columns=MultiIndex.from_tuples([("B", "one"), ("B", "two")]),
)
expected = DataFrame(
data=[[1.0, 2.0, 3.0, 4.0]],
columns=MultiIndex.from_tuples(
[("A", "one"), ("A", "two"), ("B", "one"), ("B", "two")]
),
)
result = df1.add(df2, fill_value=0)
tm.assert_frame_equal(result, expected)
def test_bool_frame_mult_float():
# GH 18549
df = DataFrame(True, list("ab"), list("cd"))
result = df * 1.0
expected = DataFrame(np.ones((2, 2)), list("ab"), list("cd"))
tm.assert_frame_equal(result, expected)
def test_frame_sub_nullable_int(any_int_ea_dtype):
# GH 32822
series1 = Series([1, 2, None], dtype=any_int_ea_dtype)
series2 = Series([1, 2, 3], dtype=any_int_ea_dtype)
expected = DataFrame([0, 0, None], dtype=any_int_ea_dtype)
result = series1.to_frame() - series2.to_frame()
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings(
"ignore:Passing a BlockManager|Passing a SingleBlockManager:DeprecationWarning"
)
def test_frame_op_subclass_nonclass_constructor():
# GH#43201 subclass._constructor is a function, not the subclass itself
class SubclassedSeries(Series):
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["my_extra_data"]
def __init__(self, my_extra_data, *args, **kwargs) -> None:
self.my_extra_data = my_extra_data
super().__init__(*args, **kwargs)
@property
def _constructor(self):
return functools.partial(type(self), self.my_extra_data)
@property
def _constructor_sliced(self):
return SubclassedSeries
sdf = SubclassedDataFrame("some_data", {"A": [1, 2, 3], "B": [4, 5, 6]})
result = sdf * 2
expected = SubclassedDataFrame("some_data", {"A": [2, 4, 6], "B": [8, 10, 12]})
tm.assert_frame_equal(result, expected)
result = sdf + sdf
tm.assert_frame_equal(result, expected)
def test_enum_column_equality():
Cols = Enum("Cols", "col1 col2")
q1 = DataFrame({Cols.col1: [1, 2, 3]})
q2 = DataFrame({Cols.col1: [1, 2, 3]})
result = q1[Cols.col1] == q2[Cols.col1]
expected = Series([True, True, True], name=Cols.col1)
tm.assert_series_equal(result, expected)
def test_mixed_col_index_dtype(string_dtype_no_object):
# GH 47382
df1 = DataFrame(columns=list("abc"), data=1.0, index=[0])
df2 = DataFrame(columns=list("abc"), data=0.0, index=[0])
df1.columns = df2.columns.astype(string_dtype_no_object)
result = df1 + df2
expected = DataFrame(columns=list("abc"), data=1.0, index=[0])
expected.columns = expected.columns.astype(string_dtype_no_object)
tm.assert_frame_equal(result, expected)
|
TestFrameArithmeticUnsorted
|
python
|
geekcomputers__Python
|
venv/Lib/site-packages/pip/_vendor/typing_extensions.py
|
{
"start": 75379,
"end": 134499
}
|
class ____(typing._Final, _root=True):
__slots__ = ('_name', '__doc__', '_getitem')
def __init__(self, getitem):
self._getitem = getitem
self._name = getitem.__name__
self.__doc__ = getitem.__doc__
def __getattr__(self, item):
if item in {'__name__', '__qualname__'}:
return self._name
raise AttributeError(item)
def __mro_entries__(self, bases):
raise TypeError(f"Cannot subclass {self!r}")
def __repr__(self):
return f'typing_extensions.{self._name}'
def __reduce__(self):
return self._name
def __call__(self, *args, **kwds):
raise TypeError(f"Cannot instantiate {self!r}")
def __or__(self, other):
return typing.Union[self, other]
def __ror__(self, other):
return typing.Union[other, self]
def __instancecheck__(self, obj):
raise TypeError(f"{self} cannot be used with isinstance()")
def __subclasscheck__(self, cls):
raise TypeError(f"{self} cannot be used with issubclass()")
@typing._tp_cache
def __getitem__(self, parameters):
return self._getitem(self, parameters)
if hasattr(typing, "LiteralString"): # 3.11+
LiteralString = typing.LiteralString
else:
@_SpecialForm
def LiteralString(self, params):
"""Represents an arbitrary literal string.
Example::
from pip._vendor.typing_extensions import LiteralString
def query(sql: LiteralString) -> ...:
...
query("SELECT * FROM table") # ok
query(f"SELECT * FROM {input()}") # not ok
See PEP 675 for details.
"""
raise TypeError(f"{self} is not subscriptable")
if hasattr(typing, "Self"): # 3.11+
Self = typing.Self
else:
@_SpecialForm
def Self(self, params):
"""Used to spell the type of "self" in classes.
Example::
from typing import Self
class ReturnsSelf:
def parse(self, data: bytes) -> Self:
...
return self
"""
raise TypeError(f"{self} is not subscriptable")
if hasattr(typing, "Never"): # 3.11+
Never = typing.Never
else:
@_SpecialForm
def Never(self, params):
"""The bottom type, a type that has no members.
This can be used to define a function that should never be
called, or a function that never returns::
from pip._vendor.typing_extensions import Never
def never_call_me(arg: Never) -> None:
pass
def int_or_str(arg: int | str) -> None:
never_call_me(arg) # type checker error
match arg:
case int():
print("It's an int")
case str():
print("It's a str")
case _:
never_call_me(arg) # ok, arg is of type Never
"""
raise TypeError(f"{self} is not subscriptable")
if hasattr(typing, 'Required'): # 3.11+
Required = typing.Required
NotRequired = typing.NotRequired
elif sys.version_info[:2] >= (3, 9): # 3.9-3.10
@_ExtensionsSpecialForm
def Required(self, parameters):
"""A special typing construct to mark a key of a total=False TypedDict
as required. For example:
class Movie(TypedDict, total=False):
title: Required[str]
year: int
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
There is no runtime checking that a required key is actually provided
when instantiating a related TypedDict.
"""
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
@_ExtensionsSpecialForm
def NotRequired(self, parameters):
"""A special typing construct to mark a key of a TypedDict as
potentially missing. For example:
class Movie(TypedDict):
title: str
year: NotRequired[int]
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
"""
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
else: # 3.8
class _RequiredForm(_ExtensionsSpecialForm, _root=True):
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
Required = _RequiredForm(
'Required',
doc="""A special typing construct to mark a key of a total=False TypedDict
as required. For example:
class Movie(TypedDict, total=False):
title: Required[str]
year: int
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
There is no runtime checking that a required key is actually provided
when instantiating a related TypedDict.
""")
NotRequired = _RequiredForm(
'NotRequired',
doc="""A special typing construct to mark a key of a TypedDict as
potentially missing. For example:
class Movie(TypedDict):
title: str
year: NotRequired[int]
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
""")
if hasattr(typing, 'ReadOnly'):
ReadOnly = typing.ReadOnly
elif sys.version_info[:2] >= (3, 9): # 3.9-3.12
@_ExtensionsSpecialForm
def ReadOnly(self, parameters):
"""A special typing construct to mark an item of a TypedDict as read-only.
For example:
class Movie(TypedDict):
title: ReadOnly[str]
year: int
def mutate_movie(m: Movie) -> None:
m["year"] = 1992 # allowed
m["title"] = "The Matrix" # typechecker error
There is no runtime checking for this property.
"""
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
else: # 3.8
class _ReadOnlyForm(_ExtensionsSpecialForm, _root=True):
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
ReadOnly = _ReadOnlyForm(
'ReadOnly',
doc="""A special typing construct to mark a key of a TypedDict as read-only.
For example:
class Movie(TypedDict):
title: ReadOnly[str]
year: int
def mutate_movie(m: Movie) -> None:
m["year"] = 1992 # allowed
m["title"] = "The Matrix" # typechecker error
There is no runtime checking for this propery.
""")
_UNPACK_DOC = """\
Type unpack operator.
The type unpack operator takes the child types from some container type,
such as `tuple[int, str]` or a `TypeVarTuple`, and 'pulls them out'. For
example:
# For some generic class `Foo`:
Foo[Unpack[tuple[int, str]]] # Equivalent to Foo[int, str]
Ts = TypeVarTuple('Ts')
# Specifies that `Bar` is generic in an arbitrary number of types.
# (Think of `Ts` as a tuple of an arbitrary number of individual
# `TypeVar`s, which the `Unpack` is 'pulling out' directly into the
# `Generic[]`.)
class Bar(Generic[Unpack[Ts]]): ...
Bar[int] # Valid
Bar[int, str] # Also valid
From Python 3.11, this can also be done using the `*` operator:
Foo[*tuple[int, str]]
class Bar(Generic[*Ts]): ...
The operator can also be used along with a `TypedDict` to annotate
`**kwargs` in a function signature. For instance:
class Movie(TypedDict):
name: str
year: int
# This function expects two keyword arguments - *name* of type `str` and
# *year* of type `int`.
def foo(**kwargs: Unpack[Movie]): ...
Note that there is only some runtime checking of this operator. Not
everything the runtime allows may be accepted by static type checkers.
For more information, see PEP 646 and PEP 692.
"""
if sys.version_info >= (3, 12): # PEP 692 changed the repr of Unpack[]
Unpack = typing.Unpack
def _is_unpack(obj):
return get_origin(obj) is Unpack
elif sys.version_info[:2] >= (3, 9): # 3.9+
class _UnpackSpecialForm(_ExtensionsSpecialForm, _root=True):
def __init__(self, getitem):
super().__init__(getitem)
self.__doc__ = _UNPACK_DOC
class _UnpackAlias(typing._GenericAlias, _root=True):
__class__ = typing.TypeVar
@property
def __typing_unpacked_tuple_args__(self):
assert self.__origin__ is Unpack
assert len(self.__args__) == 1
arg, = self.__args__
if isinstance(arg, (typing._GenericAlias, _types.GenericAlias)):
if arg.__origin__ is not tuple:
raise TypeError("Unpack[...] must be used with a tuple type")
return arg.__args__
return None
@_UnpackSpecialForm
def Unpack(self, parameters):
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
return _UnpackAlias(self, (item,))
def _is_unpack(obj):
return isinstance(obj, _UnpackAlias)
else: # 3.8
class _UnpackAlias(typing._GenericAlias, _root=True):
__class__ = typing.TypeVar
class _UnpackForm(_ExtensionsSpecialForm, _root=True):
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type.')
return _UnpackAlias(self, (item,))
Unpack = _UnpackForm('Unpack', doc=_UNPACK_DOC)
def _is_unpack(obj):
return isinstance(obj, _UnpackAlias)
if _PEP_696_IMPLEMENTED:
from typing import TypeVarTuple
elif hasattr(typing, "TypeVarTuple"): # 3.11+
def _unpack_args(*args):
newargs = []
for arg in args:
subargs = getattr(arg, '__typing_unpacked_tuple_args__', None)
if subargs is not None and not (subargs and subargs[-1] is ...):
newargs.extend(subargs)
else:
newargs.append(arg)
return newargs
# Add default parameter - PEP 696
class TypeVarTuple(metaclass=_TypeVarLikeMeta):
"""Type variable tuple."""
_backported_typevarlike = typing.TypeVarTuple
def __new__(cls, name, *, default=NoDefault):
tvt = typing.TypeVarTuple(name)
_set_default(tvt, default)
_set_module(tvt)
def _typevartuple_prepare_subst(alias, args):
params = alias.__parameters__
typevartuple_index = params.index(tvt)
for param in params[typevartuple_index + 1:]:
if isinstance(param, TypeVarTuple):
raise TypeError(
f"More than one TypeVarTuple parameter in {alias}"
)
alen = len(args)
plen = len(params)
left = typevartuple_index
right = plen - typevartuple_index - 1
var_tuple_index = None
fillarg = None
for k, arg in enumerate(args):
if not isinstance(arg, type):
subargs = getattr(arg, '__typing_unpacked_tuple_args__', None)
if subargs and len(subargs) == 2 and subargs[-1] is ...:
if var_tuple_index is not None:
raise TypeError(
"More than one unpacked "
"arbitrary-length tuple argument"
)
var_tuple_index = k
fillarg = subargs[0]
if var_tuple_index is not None:
left = min(left, var_tuple_index)
right = min(right, alen - var_tuple_index - 1)
elif left + right > alen:
raise TypeError(f"Too few arguments for {alias};"
f" actual {alen}, expected at least {plen - 1}")
if left == alen - right and tvt.has_default():
replacement = _unpack_args(tvt.__default__)
else:
replacement = args[left: alen - right]
return (
*args[:left],
*([fillarg] * (typevartuple_index - left)),
replacement,
*([fillarg] * (plen - right - left - typevartuple_index - 1)),
*args[alen - right:],
)
tvt.__typing_prepare_subst__ = _typevartuple_prepare_subst
return tvt
def __init_subclass__(self, *args, **kwds):
raise TypeError("Cannot subclass special typing classes")
else: # <=3.10
class TypeVarTuple(_DefaultMixin):
"""Type variable tuple.
Usage::
Ts = TypeVarTuple('Ts')
In the same way that a normal type variable is a stand-in for a single
type such as ``int``, a type variable *tuple* is a stand-in for a *tuple*
type such as ``Tuple[int, str]``.
Type variable tuples can be used in ``Generic`` declarations.
Consider the following example::
class Array(Generic[*Ts]): ...
The ``Ts`` type variable tuple here behaves like ``tuple[T1, T2]``,
where ``T1`` and ``T2`` are type variables. To use these type variables
as type parameters of ``Array``, we must *unpack* the type variable tuple using
the star operator: ``*Ts``. The signature of ``Array`` then behaves
as if we had simply written ``class Array(Generic[T1, T2]): ...``.
In contrast to ``Generic[T1, T2]``, however, ``Generic[*Shape]`` allows
us to parameterise the class with an *arbitrary* number of type parameters.
Type variable tuples can be used anywhere a normal ``TypeVar`` can.
This includes class definitions, as shown above, as well as function
signatures and variable annotations::
class Array(Generic[*Ts]):
def __init__(self, shape: Tuple[*Ts]):
self._shape: Tuple[*Ts] = shape
def get_shape(self) -> Tuple[*Ts]:
return self._shape
shape = (Height(480), Width(640))
x: Array[Height, Width] = Array(shape)
y = abs(x) # Inferred type is Array[Height, Width]
z = x + x # ... is Array[Height, Width]
x.get_shape() # ... is tuple[Height, Width]
"""
# Trick Generic __parameters__.
__class__ = typing.TypeVar
def __iter__(self):
yield self.__unpacked__
def __init__(self, name, *, default=NoDefault):
self.__name__ = name
_DefaultMixin.__init__(self, default)
# for pickling:
def_mod = _caller()
if def_mod != 'typing_extensions':
self.__module__ = def_mod
self.__unpacked__ = Unpack[self]
def __repr__(self):
return self.__name__
def __hash__(self):
return object.__hash__(self)
def __eq__(self, other):
return self is other
def __reduce__(self):
return self.__name__
def __init_subclass__(self, *args, **kwds):
if '_root' not in kwds:
raise TypeError("Cannot subclass special typing classes")
if hasattr(typing, "reveal_type"): # 3.11+
reveal_type = typing.reveal_type
else: # <=3.10
def reveal_type(obj: T, /) -> T:
"""Reveal the inferred type of a variable.
When a static type checker encounters a call to ``reveal_type()``,
it will emit the inferred type of the argument::
x: int = 1
reveal_type(x)
Running a static type checker (e.g., ``mypy``) on this example
will produce output similar to 'Revealed type is "builtins.int"'.
At runtime, the function prints the runtime type of the
argument and returns it unchanged.
"""
print(f"Runtime type is {type(obj).__name__!r}", file=sys.stderr)
return obj
if hasattr(typing, "_ASSERT_NEVER_REPR_MAX_LENGTH"): # 3.11+
_ASSERT_NEVER_REPR_MAX_LENGTH = typing._ASSERT_NEVER_REPR_MAX_LENGTH
else: # <=3.10
_ASSERT_NEVER_REPR_MAX_LENGTH = 100
if hasattr(typing, "assert_never"): # 3.11+
assert_never = typing.assert_never
else: # <=3.10
def assert_never(arg: Never, /) -> Never:
"""Assert to the type checker that a line of code is unreachable.
Example::
def int_or_str(arg: int | str) -> None:
match arg:
case int():
print("It's an int")
case str():
print("It's a str")
case _:
assert_never(arg)
If a type checker finds that a call to assert_never() is
reachable, it will emit an error.
At runtime, this throws an exception when called.
"""
value = repr(arg)
if len(value) > _ASSERT_NEVER_REPR_MAX_LENGTH:
value = value[:_ASSERT_NEVER_REPR_MAX_LENGTH] + '...'
raise AssertionError(f"Expected code to be unreachable, but got: {value}")
if sys.version_info >= (3, 12): # 3.12+
# dataclass_transform exists in 3.11 but lacks the frozen_default parameter
dataclass_transform = typing.dataclass_transform
else: # <=3.11
def dataclass_transform(
*,
eq_default: bool = True,
order_default: bool = False,
kw_only_default: bool = False,
frozen_default: bool = False,
field_specifiers: typing.Tuple[
typing.Union[typing.Type[typing.Any], typing.Callable[..., typing.Any]],
...
] = (),
**kwargs: typing.Any,
) -> typing.Callable[[T], T]:
"""Decorator that marks a function, class, or metaclass as providing
dataclass-like behavior.
Example:
from pip._vendor.typing_extensions import dataclass_transform
_T = TypeVar("_T")
# Used on a decorator function
@dataclass_transform()
def create_model(cls: type[_T]) -> type[_T]:
...
return cls
@create_model
class CustomerModel:
id: int
name: str
# Used on a base class
@dataclass_transform()
class ModelBase: ...
class CustomerModel(ModelBase):
id: int
name: str
# Used on a metaclass
@dataclass_transform()
class ModelMeta(type): ...
class ModelBase(metaclass=ModelMeta): ...
class CustomerModel(ModelBase):
id: int
name: str
Each of the ``CustomerModel`` classes defined in this example will now
behave similarly to a dataclass created with the ``@dataclasses.dataclass``
decorator. For example, the type checker will synthesize an ``__init__``
method.
The arguments to this decorator can be used to customize this behavior:
- ``eq_default`` indicates whether the ``eq`` parameter is assumed to be
True or False if it is omitted by the caller.
- ``order_default`` indicates whether the ``order`` parameter is
assumed to be True or False if it is omitted by the caller.
- ``kw_only_default`` indicates whether the ``kw_only`` parameter is
assumed to be True or False if it is omitted by the caller.
- ``frozen_default`` indicates whether the ``frozen`` parameter is
assumed to be True or False if it is omitted by the caller.
- ``field_specifiers`` specifies a static list of supported classes
or functions that describe fields, similar to ``dataclasses.field()``.
At runtime, this decorator records its arguments in the
``__dataclass_transform__`` attribute on the decorated object.
See PEP 681 for details.
"""
def decorator(cls_or_fn):
cls_or_fn.__dataclass_transform__ = {
"eq_default": eq_default,
"order_default": order_default,
"kw_only_default": kw_only_default,
"frozen_default": frozen_default,
"field_specifiers": field_specifiers,
"kwargs": kwargs,
}
return cls_or_fn
return decorator
if hasattr(typing, "override"): # 3.12+
override = typing.override
else: # <=3.11
_F = typing.TypeVar("_F", bound=typing.Callable[..., typing.Any])
def override(arg: _F, /) -> _F:
"""Indicate that a method is intended to override a method in a base class.
Usage:
class Base:
def method(self) -> None:
pass
class Child(Base):
@override
def method(self) -> None:
super().method()
When this decorator is applied to a method, the type checker will
validate that it overrides a method with the same name on a base class.
This helps prevent bugs that may occur when a base class is changed
without an equivalent change to a child class.
There is no runtime checking of these properties. The decorator
sets the ``__override__`` attribute to ``True`` on the decorated object
to allow runtime introspection.
See PEP 698 for details.
"""
try:
arg.__override__ = True
except (AttributeError, TypeError):
# Skip the attribute silently if it is not writable.
# AttributeError happens if the object has __slots__ or a
# read-only property, TypeError if it's a builtin class.
pass
return arg
if hasattr(warnings, "deprecated"):
deprecated = warnings.deprecated
else:
_T = typing.TypeVar("_T")
class deprecated:
"""Indicate that a class, function or overload is deprecated.
When this decorator is applied to an object, the type checker
will generate a diagnostic on usage of the deprecated object.
Usage:
@deprecated("Use B instead")
class A:
pass
@deprecated("Use g instead")
def f():
pass
@overload
@deprecated("int support is deprecated")
def g(x: int) -> int: ...
@overload
def g(x: str) -> int: ...
The warning specified by *category* will be emitted at runtime
on use of deprecated objects. For functions, that happens on calls;
for classes, on instantiation and on creation of subclasses.
If the *category* is ``None``, no warning is emitted at runtime.
The *stacklevel* determines where the
warning is emitted. If it is ``1`` (the default), the warning
is emitted at the direct caller of the deprecated object; if it
is higher, it is emitted further up the stack.
Static type checker behavior is not affected by the *category*
and *stacklevel* arguments.
The deprecation message passed to the decorator is saved in the
``__deprecated__`` attribute on the decorated object.
If applied to an overload, the decorator
must be after the ``@overload`` decorator for the attribute to
exist on the overload as returned by ``get_overloads()``.
See PEP 702 for details.
"""
def __init__(
self,
message: str,
/,
*,
category: typing.Optional[typing.Type[Warning]] = DeprecationWarning,
stacklevel: int = 1,
) -> None:
if not isinstance(message, str):
raise TypeError(
"Expected an object of type str for 'message', not "
f"{type(message).__name__!r}"
)
self.message = message
self.category = category
self.stacklevel = stacklevel
def __call__(self, arg: _T, /) -> _T:
# Make sure the inner functions created below don't
# retain a reference to self.
msg = self.message
category = self.category
stacklevel = self.stacklevel
if category is None:
arg.__deprecated__ = msg
return arg
elif isinstance(arg, type):
import functools
from types import MethodType
original_new = arg.__new__
@functools.wraps(original_new)
def __new__(cls, *args, **kwargs):
if cls is arg:
warnings.warn(msg, category=category, stacklevel=stacklevel + 1)
if original_new is not object.__new__:
return original_new(cls, *args, **kwargs)
# Mirrors a similar check in object.__new__.
elif cls.__init__ is object.__init__ and (args or kwargs):
raise TypeError(f"{cls.__name__}() takes no arguments")
else:
return original_new(cls)
arg.__new__ = staticmethod(__new__)
original_init_subclass = arg.__init_subclass__
# We need slightly different behavior if __init_subclass__
# is a bound method (likely if it was implemented in Python)
if isinstance(original_init_subclass, MethodType):
original_init_subclass = original_init_subclass.__func__
@functools.wraps(original_init_subclass)
def __init_subclass__(*args, **kwargs):
warnings.warn(msg, category=category, stacklevel=stacklevel + 1)
return original_init_subclass(*args, **kwargs)
arg.__init_subclass__ = classmethod(__init_subclass__)
# Or otherwise, which likely means it's a builtin such as
# object's implementation of __init_subclass__.
else:
@functools.wraps(original_init_subclass)
def __init_subclass__(*args, **kwargs):
warnings.warn(msg, category=category, stacklevel=stacklevel + 1)
return original_init_subclass(*args, **kwargs)
arg.__init_subclass__ = __init_subclass__
arg.__deprecated__ = __new__.__deprecated__ = msg
__init_subclass__.__deprecated__ = msg
return arg
elif callable(arg):
import functools
@functools.wraps(arg)
def wrapper(*args, **kwargs):
warnings.warn(msg, category=category, stacklevel=stacklevel + 1)
return arg(*args, **kwargs)
arg.__deprecated__ = wrapper.__deprecated__ = msg
return wrapper
else:
raise TypeError(
"@deprecated decorator with non-None category must be applied to "
f"a class or callable, not {arg!r}"
)
# We have to do some monkey patching to deal with the dual nature of
# Unpack/TypeVarTuple:
# - We want Unpack to be a kind of TypeVar so it gets accepted in
# Generic[Unpack[Ts]]
# - We want it to *not* be treated as a TypeVar for the purposes of
# counting generic parameters, so that when we subscript a generic,
# the runtime doesn't try to substitute the Unpack with the subscripted type.
if not hasattr(typing, "TypeVarTuple"):
def _check_generic(cls, parameters, elen=_marker):
"""Check correct count for parameters of a generic cls (internal helper).
This gives a nice error message in case of count mismatch.
"""
if not elen:
raise TypeError(f"{cls} is not a generic class")
if elen is _marker:
if not hasattr(cls, "__parameters__") or not cls.__parameters__:
raise TypeError(f"{cls} is not a generic class")
elen = len(cls.__parameters__)
alen = len(parameters)
if alen != elen:
expect_val = elen
if hasattr(cls, "__parameters__"):
parameters = [p for p in cls.__parameters__ if not _is_unpack(p)]
num_tv_tuples = sum(isinstance(p, TypeVarTuple) for p in parameters)
if (num_tv_tuples > 0) and (alen >= elen - num_tv_tuples):
return
# deal with TypeVarLike defaults
# required TypeVarLikes cannot appear after a defaulted one.
if alen < elen:
# since we validate TypeVarLike default in _collect_type_vars
# or _collect_parameters we can safely check parameters[alen]
if (
getattr(parameters[alen], '__default__', NoDefault)
is not NoDefault
):
return
num_default_tv = sum(getattr(p, '__default__', NoDefault)
is not NoDefault for p in parameters)
elen -= num_default_tv
expect_val = f"at least {elen}"
things = "arguments" if sys.version_info >= (3, 10) else "parameters"
raise TypeError(f"Too {'many' if alen > elen else 'few'} {things}"
f" for {cls}; actual {alen}, expected {expect_val}")
else:
# Python 3.11+
def _check_generic(cls, parameters, elen):
"""Check correct count for parameters of a generic cls (internal helper).
This gives a nice error message in case of count mismatch.
"""
if not elen:
raise TypeError(f"{cls} is not a generic class")
alen = len(parameters)
if alen != elen:
expect_val = elen
if hasattr(cls, "__parameters__"):
parameters = [p for p in cls.__parameters__ if not _is_unpack(p)]
# deal with TypeVarLike defaults
# required TypeVarLikes cannot appear after a defaulted one.
if alen < elen:
# since we validate TypeVarLike default in _collect_type_vars
# or _collect_parameters we can safely check parameters[alen]
if (
getattr(parameters[alen], '__default__', NoDefault)
is not NoDefault
):
return
num_default_tv = sum(getattr(p, '__default__', NoDefault)
is not NoDefault for p in parameters)
elen -= num_default_tv
expect_val = f"at least {elen}"
raise TypeError(f"Too {'many' if alen > elen else 'few'} arguments"
f" for {cls}; actual {alen}, expected {expect_val}")
if not _PEP_696_IMPLEMENTED:
typing._check_generic = _check_generic
def _has_generic_or_protocol_as_origin() -> bool:
try:
frame = sys._getframe(2)
# - Catch AttributeError: not all Python implementations have sys._getframe()
# - Catch ValueError: maybe we're called from an unexpected module
# and the call stack isn't deep enough
except (AttributeError, ValueError):
return False # err on the side of leniency
else:
# If we somehow get invoked from outside typing.py,
# also err on the side of leniency
if frame.f_globals.get("__name__") != "typing":
return False
origin = frame.f_locals.get("origin")
# Cannot use "in" because origin may be an object with a buggy __eq__ that
# throws an error.
return origin is typing.Generic or origin is Protocol or origin is typing.Protocol
_TYPEVARTUPLE_TYPES = {TypeVarTuple, getattr(typing, "TypeVarTuple", None)}
def _is_unpacked_typevartuple(x) -> bool:
if get_origin(x) is not Unpack:
return False
args = get_args(x)
return (
bool(args)
and len(args) == 1
and type(args[0]) in _TYPEVARTUPLE_TYPES
)
# Python 3.11+ _collect_type_vars was renamed to _collect_parameters
if hasattr(typing, '_collect_type_vars'):
def _collect_type_vars(types, typevar_types=None):
"""Collect all type variable contained in types in order of
first appearance (lexicographic order). For example::
_collect_type_vars((T, List[S, T])) == (T, S)
"""
if typevar_types is None:
typevar_types = typing.TypeVar
tvars = []
# A required TypeVarLike cannot appear after a TypeVarLike with a default
# if it was a direct call to `Generic[]` or `Protocol[]`
enforce_default_ordering = _has_generic_or_protocol_as_origin()
default_encountered = False
# Also, a TypeVarLike with a default cannot appear after a TypeVarTuple
type_var_tuple_encountered = False
for t in types:
if _is_unpacked_typevartuple(t):
type_var_tuple_encountered = True
elif isinstance(t, typevar_types) and t not in tvars:
if enforce_default_ordering:
has_default = getattr(t, '__default__', NoDefault) is not NoDefault
if has_default:
if type_var_tuple_encountered:
raise TypeError('Type parameter with a default'
' follows TypeVarTuple')
default_encountered = True
elif default_encountered:
raise TypeError(f'Type parameter {t!r} without a default'
' follows type parameter with a default')
tvars.append(t)
if _should_collect_from_parameters(t):
tvars.extend([t for t in t.__parameters__ if t not in tvars])
return tuple(tvars)
typing._collect_type_vars = _collect_type_vars
else:
def _collect_parameters(args):
"""Collect all type variables and parameter specifications in args
in order of first appearance (lexicographic order).
For example::
assert _collect_parameters((T, Callable[P, T])) == (T, P)
"""
parameters = []
# A required TypeVarLike cannot appear after a TypeVarLike with default
# if it was a direct call to `Generic[]` or `Protocol[]`
enforce_default_ordering = _has_generic_or_protocol_as_origin()
default_encountered = False
# Also, a TypeVarLike with a default cannot appear after a TypeVarTuple
type_var_tuple_encountered = False
for t in args:
if isinstance(t, type):
# We don't want __parameters__ descriptor of a bare Python class.
pass
elif isinstance(t, tuple):
# `t` might be a tuple, when `ParamSpec` is substituted with
# `[T, int]`, or `[int, *Ts]`, etc.
for x in t:
for collected in _collect_parameters([x]):
if collected not in parameters:
parameters.append(collected)
elif hasattr(t, '__typing_subst__'):
if t not in parameters:
if enforce_default_ordering:
has_default = (
getattr(t, '__default__', NoDefault) is not NoDefault
)
if type_var_tuple_encountered and has_default:
raise TypeError('Type parameter with a default'
' follows TypeVarTuple')
if has_default:
default_encountered = True
elif default_encountered:
raise TypeError(f'Type parameter {t!r} without a default'
' follows type parameter with a default')
parameters.append(t)
else:
if _is_unpacked_typevartuple(t):
type_var_tuple_encountered = True
for x in getattr(t, '__parameters__', ()):
if x not in parameters:
parameters.append(x)
return tuple(parameters)
if not _PEP_696_IMPLEMENTED:
typing._collect_parameters = _collect_parameters
# Backport typing.NamedTuple as it exists in Python 3.13.
# In 3.11, the ability to define generic `NamedTuple`s was supported.
# This was explicitly disallowed in 3.9-3.10, and only half-worked in <=3.8.
# On 3.12, we added __orig_bases__ to call-based NamedTuples
# On 3.13, we deprecated kwargs-based NamedTuples
if sys.version_info >= (3, 13):
NamedTuple = typing.NamedTuple
else:
def _make_nmtuple(name, types, module, defaults=()):
fields = [n for n, t in types]
annotations = {n: typing._type_check(t, f"field {n} annotation must be a type")
for n, t in types}
nm_tpl = collections.namedtuple(name, fields,
defaults=defaults, module=module)
nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = annotations
# The `_field_types` attribute was removed in 3.9;
# in earlier versions, it is the same as the `__annotations__` attribute
if sys.version_info < (3, 9):
nm_tpl._field_types = annotations
return nm_tpl
_prohibited_namedtuple_fields = typing._prohibited
_special_namedtuple_fields = frozenset({'__module__', '__name__', '__annotations__'})
class _NamedTupleMeta(type):
def __new__(cls, typename, bases, ns):
assert _NamedTuple in bases
for base in bases:
if base is not _NamedTuple and base is not typing.Generic:
raise TypeError(
'can only inherit from a NamedTuple type and Generic')
bases = tuple(tuple if base is _NamedTuple else base for base in bases)
if "__annotations__" in ns:
types = ns["__annotations__"]
elif "__annotate__" in ns:
# TODO: Use inspect.VALUE here, and make the annotations lazily evaluated
types = ns["__annotate__"](1)
else:
types = {}
default_names = []
for field_name in types:
if field_name in ns:
default_names.append(field_name)
elif default_names:
raise TypeError(f"Non-default namedtuple field {field_name} "
f"cannot follow default field"
f"{'s' if len(default_names) > 1 else ''} "
f"{', '.join(default_names)}")
nm_tpl = _make_nmtuple(
typename, types.items(),
defaults=[ns[n] for n in default_names],
module=ns['__module__']
)
nm_tpl.__bases__ = bases
if typing.Generic in bases:
if hasattr(typing, '_generic_class_getitem'): # 3.12+
nm_tpl.__class_getitem__ = classmethod(typing._generic_class_getitem)
else:
class_getitem = typing.Generic.__class_getitem__.__func__
nm_tpl.__class_getitem__ = classmethod(class_getitem)
# update from user namespace without overriding special namedtuple attributes
for key, val in ns.items():
if key in _prohibited_namedtuple_fields:
raise AttributeError("Cannot overwrite NamedTuple attribute " + key)
elif key not in _special_namedtuple_fields:
if key not in nm_tpl._fields:
setattr(nm_tpl, key, ns[key])
try:
set_name = type(val).__set_name__
except AttributeError:
pass
else:
try:
set_name(val, nm_tpl, key)
except BaseException as e:
msg = (
f"Error calling __set_name__ on {type(val).__name__!r} "
f"instance {key!r} in {typename!r}"
)
# BaseException.add_note() existed on py311,
# but the __set_name__ machinery didn't start
# using add_note() until py312.
# Making sure exceptions are raised in the same way
# as in "normal" classes seems most important here.
if sys.version_info >= (3, 12):
e.add_note(msg)
raise
else:
raise RuntimeError(msg) from e
if typing.Generic in bases:
nm_tpl.__init_subclass__()
return nm_tpl
_NamedTuple = type.__new__(_NamedTupleMeta, 'NamedTuple', (), {})
def _namedtuple_mro_entries(bases):
assert NamedTuple in bases
return (_NamedTuple,)
@_ensure_subclassable(_namedtuple_mro_entries)
def NamedTuple(typename, fields=_marker, /, **kwargs):
"""Typed version of namedtuple.
Usage::
class Employee(NamedTuple):
name: str
id: int
This is equivalent to::
Employee = collections.namedtuple('Employee', ['name', 'id'])
The resulting class has an extra __annotations__ attribute, giving a
dict that maps field names to types. (The field names are also in
the _fields attribute, which is part of the namedtuple API.)
An alternative equivalent functional syntax is also accepted::
Employee = NamedTuple('Employee', [('name', str), ('id', int)])
"""
if fields is _marker:
if kwargs:
deprecated_thing = "Creating NamedTuple classes using keyword arguments"
deprecation_msg = (
"{name} is deprecated and will be disallowed in Python {remove}. "
"Use the class-based or functional syntax instead."
)
else:
deprecated_thing = "Failing to pass a value for the 'fields' parameter"
example = f"`{typename} = NamedTuple({typename!r}, [])`"
deprecation_msg = (
"{name} is deprecated and will be disallowed in Python {remove}. "
"To create a NamedTuple class with 0 fields "
"using the functional syntax, "
"pass an empty list, e.g. "
) + example + "."
elif fields is None:
if kwargs:
raise TypeError(
"Cannot pass `None` as the 'fields' parameter "
"and also specify fields using keyword arguments"
)
else:
deprecated_thing = "Passing `None` as the 'fields' parameter"
example = f"`{typename} = NamedTuple({typename!r}, [])`"
deprecation_msg = (
"{name} is deprecated and will be disallowed in Python {remove}. "
"To create a NamedTuple class with 0 fields "
"using the functional syntax, "
"pass an empty list, e.g. "
) + example + "."
elif kwargs:
raise TypeError("Either list of fields or keywords"
" can be provided to NamedTuple, not both")
if fields is _marker or fields is None:
warnings.warn(
deprecation_msg.format(name=deprecated_thing, remove="3.15"),
DeprecationWarning,
stacklevel=2,
)
fields = kwargs.items()
nt = _make_nmtuple(typename, fields, module=_caller())
nt.__orig_bases__ = (NamedTuple,)
return nt
if hasattr(collections.abc, "Buffer"):
Buffer = collections.abc.Buffer
else:
class Buffer(abc.ABC): # noqa: B024
"""Base class for classes that implement the buffer protocol.
The buffer protocol allows Python objects to expose a low-level
memory buffer interface. Before Python 3.12, it is not possible
to implement the buffer protocol in pure Python code, or even
to check whether a class implements the buffer protocol. In
Python 3.12 and higher, the ``__buffer__`` method allows access
to the buffer protocol from Python code, and the
``collections.abc.Buffer`` ABC allows checking whether a class
implements the buffer protocol.
To indicate support for the buffer protocol in earlier versions,
inherit from this ABC, either in a stub file or at runtime,
or use ABC registration. This ABC provides no methods, because
there is no Python-accessible methods shared by pre-3.12 buffer
classes. It is useful primarily for static checks.
"""
# As a courtesy, register the most common stdlib buffer classes.
Buffer.register(memoryview)
Buffer.register(bytearray)
Buffer.register(bytes)
# Backport of types.get_original_bases, available on 3.12+ in CPython
if hasattr(_types, "get_original_bases"):
get_original_bases = _types.get_original_bases
else:
def get_original_bases(cls, /):
"""Return the class's "original" bases prior to modification by `__mro_entries__`.
Examples::
from typing import TypeVar, Generic
from pip._vendor.typing_extensions import NamedTuple, TypedDict
T = TypeVar("T")
class Foo(Generic[T]): ...
class Bar(Foo[int], float): ...
class Baz(list[str]): ...
Eggs = NamedTuple("Eggs", [("a", int), ("b", str)])
Spam = TypedDict("Spam", {"a": int, "b": str})
assert get_original_bases(Bar) == (Foo[int], float)
assert get_original_bases(Baz) == (list[str],)
assert get_original_bases(Eggs) == (NamedTuple,)
assert get_original_bases(Spam) == (TypedDict,)
assert get_original_bases(int) == (object,)
"""
try:
return cls.__dict__.get("__orig_bases__", cls.__bases__)
except AttributeError:
raise TypeError(
f'Expected an instance of type, not {type(cls).__name__!r}'
) from None
# NewType is a class on Python 3.10+, making it pickleable
# The error message for subclassing instances of NewType was improved on 3.11+
if sys.version_info >= (3, 11):
NewType = typing.NewType
else:
class NewType:
"""NewType creates simple unique types with almost zero
runtime overhead. NewType(name, tp) is considered a subtype of tp
by static type checkers. At runtime, NewType(name, tp) returns
a dummy callable that simply returns its argument. Usage::
UserId = NewType('UserId', int)
def name_by_id(user_id: UserId) -> str:
...
UserId('user') # Fails type check
name_by_id(42) # Fails type check
name_by_id(UserId(42)) # OK
num = UserId(5) + 1 # type: int
"""
def __call__(self, obj, /):
return obj
def __init__(self, name, tp):
self.__qualname__ = name
if '.' in name:
name = name.rpartition('.')[-1]
self.__name__ = name
self.__supertype__ = tp
def_mod = _caller()
if def_mod != 'typing_extensions':
self.__module__ = def_mod
def __mro_entries__(self, bases):
# We defined __mro_entries__ to get a better error message
# if a user attempts to subclass a NewType instance. bpo-46170
supercls_name = self.__name__
class Dummy:
def __init_subclass__(cls):
subcls_name = cls.__name__
raise TypeError(
f"Cannot subclass an instance of NewType. "
f"Perhaps you were looking for: "
f"`{subcls_name} = NewType({subcls_name!r}, {supercls_name})`"
)
return (Dummy,)
def __repr__(self):
return f'{self.__module__}.{self.__qualname__}'
def __reduce__(self):
return self.__qualname__
if sys.version_info >= (3, 10):
# PEP 604 methods
# It doesn't make sense to have these methods on Python <3.10
def __or__(self, other):
return typing.Union[self, other]
def __ror__(self, other):
return typing.Union[other, self]
if hasattr(typing, "TypeAliasType"):
TypeAliasType = typing.TypeAliasType
else:
def _is_unionable(obj):
"""Corresponds to is_unionable() in unionobject.c in CPython."""
return obj is None or isinstance(obj, (
type,
_types.GenericAlias,
_types.UnionType,
TypeAliasType,
))
class TypeAliasType:
"""Create named, parameterized type aliases.
This provides a backport of the new `type` statement in Python 3.12:
type ListOrSet[T] = list[T] | set[T]
is equivalent to:
T = TypeVar("T")
ListOrSet = TypeAliasType("ListOrSet", list[T] | set[T], type_params=(T,))
The name ListOrSet can then be used as an alias for the type it refers to.
The type_params argument should contain all the type parameters used
in the value of the type alias. If the alias is not generic, this
argument is omitted.
Static type checkers should only support type aliases declared using
TypeAliasType that follow these rules:
- The first argument (the name) must be a string literal.
- The TypeAliasType instance must be immediately assigned to a variable
of the same name. (For example, 'X = TypeAliasType("Y", int)' is invalid,
as is 'X, Y = TypeAliasType("X", int), TypeAliasType("Y", int)').
"""
def __init__(self, name: str, value, *, type_params=()):
if not isinstance(name, str):
raise TypeError("TypeAliasType name must be a string")
self.__value__ = value
self.__type_params__ = type_params
parameters = []
for type_param in type_params:
if isinstance(type_param, TypeVarTuple):
parameters.extend(type_param)
else:
parameters.append(type_param)
self.__parameters__ = tuple(parameters)
def_mod = _caller()
if def_mod != 'typing_extensions':
self.__module__ = def_mod
# Setting this attribute closes the TypeAliasType from further modification
self.__name__ = name
def __setattr__(self, name: str, value: object, /) -> None:
if hasattr(self, "__name__"):
self._raise_attribute_error(name)
super().__setattr__(name, value)
def __delattr__(self, name: str, /) -> Never:
self._raise_attribute_error(name)
def _raise_attribute_error(self, name: str) -> Never:
# Match the Python 3.12 error messages exactly
if name == "__name__":
raise AttributeError("readonly attribute")
elif name in {"__value__", "__type_params__", "__parameters__", "__module__"}:
raise AttributeError(
f"attribute '{name}' of 'typing.TypeAliasType' objects "
"is not writable"
)
else:
raise AttributeError(
f"'typing.TypeAliasType' object has no attribute '{name}'"
)
def __repr__(self) -> str:
return self.__name__
def __getitem__(self, parameters):
if not isinstance(parameters, tuple):
parameters = (parameters,)
parameters = [
typing._type_check(
item, f'Subscripting {self.__name__} requires a type.'
)
for item in parameters
]
return typing._GenericAlias(self, tuple(parameters))
def __reduce__(self):
return self.__name__
def __init_subclass__(cls, *args, **kwargs):
raise TypeError(
"type 'typing_extensions.TypeAliasType' is not an acceptable base type"
)
# The presence of this method convinces typing._type_check
# that TypeAliasTypes are types.
def __call__(self):
raise TypeError("Type alias is not callable")
if sys.version_info >= (3, 10):
def __or__(self, right):
# For forward compatibility with 3.12, reject Unions
# that are not accepted by the built-in Union.
if not _is_unionable(right):
return NotImplemented
return typing.Union[self, right]
def __ror__(self, left):
if not _is_unionable(left):
return NotImplemented
return typing.Union[left, self]
if hasattr(typing, "is_protocol"):
is_protocol = typing.is_protocol
get_protocol_members = typing.get_protocol_members
else:
def is_protocol(tp: type, /) -> bool:
"""Return True if the given type is a Protocol.
Example::
>>> from typing_extensions import Protocol, is_protocol
>>> class P(Protocol):
... def a(self) -> str: ...
... b: int
>>> is_protocol(P)
True
>>> is_protocol(int)
False
"""
return (
isinstance(tp, type)
and getattr(tp, '_is_protocol', False)
and tp is not Protocol
and tp is not typing.Protocol
)
def get_protocol_members(tp: type, /) -> typing.FrozenSet[str]:
"""Return the set of members defined in a Protocol.
Example::
>>> from typing_extensions import Protocol, get_protocol_members
>>> class P(Protocol):
... def a(self) -> str: ...
... b: int
>>> get_protocol_members(P)
frozenset({'a', 'b'})
Raise a TypeError for arguments that are not Protocols.
"""
if not is_protocol(tp):
raise TypeError(f'{tp!r} is not a Protocol')
if hasattr(tp, '__protocol_attrs__'):
return frozenset(tp.__protocol_attrs__)
return frozenset(_get_protocol_attrs(tp))
if hasattr(typing, "Doc"):
Doc = typing.Doc
else:
class Doc:
"""Define the documentation of a type annotation using ``Annotated``, to be
used in class attributes, function and method parameters, return values,
and variables.
The value should be a positional-only string literal to allow static tools
like editors and documentation generators to use it.
This complements docstrings.
The string value passed is available in the attribute ``documentation``.
Example::
>>> from typing_extensions import Annotated, Doc
>>> def hi(to: Annotated[str, Doc("Who to say hi to")]) -> None: ...
"""
def __init__(self, documentation: str, /) -> None:
self.documentation = documentation
def __repr__(self) -> str:
return f"Doc({self.documentation!r})"
def __hash__(self) -> int:
return hash(self.documentation)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Doc):
return NotImplemented
return self.documentation == other.documentation
_CapsuleType = getattr(_types, "CapsuleType", None)
if _CapsuleType is None:
try:
import _socket
except ImportError:
pass
else:
_CAPI = getattr(_socket, "CAPI", None)
if _CAPI is not None:
_CapsuleType = type(_CAPI)
if _CapsuleType is not None:
CapsuleType = _CapsuleType
__all__.append("CapsuleType")
# Aliases for items that have always been in typing.
# Explicitly assign these (rather than using `from typing import *` at the top),
# so that we get a CI error if one of these is deleted from typing.py
# in a future version of Python
AbstractSet = typing.AbstractSet
AnyStr = typing.AnyStr
BinaryIO = typing.BinaryIO
Callable = typing.Callable
Collection = typing.Collection
Container = typing.Container
Dict = typing.Dict
ForwardRef = typing.ForwardRef
FrozenSet = typing.FrozenSet
Generic = typing.Generic
Hashable = typing.Hashable
IO = typing.IO
ItemsView = typing.ItemsView
Iterable = typing.Iterable
Iterator = typing.Iterator
KeysView = typing.KeysView
List = typing.List
Mapping = typing.Mapping
MappingView = typing.MappingView
Match = typing.Match
MutableMapping = typing.MutableMapping
MutableSequence = typing.MutableSequence
MutableSet = typing.MutableSet
Optional = typing.Optional
Pattern = typing.Pattern
Reversible = typing.Reversible
Sequence = typing.Sequence
Set = typing.Set
Sized = typing.Sized
TextIO = typing.TextIO
Tuple = typing.Tuple
Union = typing.Union
ValuesView = typing.ValuesView
cast = typing.cast
no_type_check = typing.no_type_check
no_type_check_decorator = typing.no_type_check_decorator
|
_SpecialForm
|
python
|
pypa__warehouse
|
tests/common/db/organizations.py
|
{
"start": 5658,
"end": 5791
}
|
class ____(WarehouseFactory):
class Meta:
model = Team.Event
source = factory.SubFactory(TeamFactory)
|
TeamEventFactory
|
python
|
viewflow__viewflow
|
viewflow/workflow/flow/views/mixins.py
|
{
"start": 2299,
"end": 2770
}
|
class ____(object):
"""List of templates for a process view."""
template_filename = None
def get_template_names(self):
if self.template_name is None:
opts = self.flow_class.instance
return (
f"{opts.app_label}/{opts.flow_label}/{self.template_filename}",
f"viewflow/workflow/{self.template_filename}",
)
else:
return [self.template_name]
|
ProcessViewTemplateNames
|
python
|
PyCQA__flake8
|
src/flake8/plugins/pyflakes.py
|
{
"start": 2174,
"end": 3894
}
|
class ____(pyflakes.checker.Checker):
"""Subclass the Pyflakes checker to conform with the flake8 API."""
with_doctest = False
def __init__(self, tree: ast.AST, filename: str) -> None:
"""Initialize the PyFlakes plugin with an AST tree and filename."""
super().__init__(
tree, filename=filename, withDoctest=self.with_doctest,
)
@classmethod
def add_options(cls, parser: OptionManager) -> None:
"""Register options for PyFlakes on the Flake8 OptionManager."""
parser.add_option(
"--builtins",
parse_from_config=True,
comma_separated_list=True,
help="define more built-ins, comma separated",
)
parser.add_option(
"--doctests",
default=False,
action="store_true",
parse_from_config=True,
help="also check syntax of the doctests",
)
@classmethod
def parse_options(cls, options: argparse.Namespace) -> None:
"""Parse option values from Flake8's OptionManager."""
if options.builtins:
cls.builtIns = cls.builtIns.union(options.builtins)
cls.with_doctest = options.doctests
def run(self) -> Generator[tuple[int, int, str, type[Any]]]:
"""Run the plugin."""
for message in self.messages:
col = getattr(message, "col", 0)
yield (
message.lineno,
col,
"{} {}".format(
FLAKE8_PYFLAKES_CODES.get(type(message).__name__, "F999"),
message.message % message.message_args,
),
message.__class__,
)
|
FlakesChecker
|
python
|
eventlet__eventlet
|
tests/hub_test.py
|
{
"start": 7976,
"end": 8780
}
|
class ____(tests.LimitedTestCase):
TEST_TIMEOUT = 10
def test_block_detect(self):
def look_im_blocking():
import time
time.sleep(2)
from eventlet import debug
debug.hub_blocking_detection(True)
gt = eventlet.spawn(look_im_blocking)
self.assertRaises(RuntimeError, gt.wait)
debug.hub_blocking_detection(False)
@tests.skip_if_no_itimer
def test_block_detect_with_itimer(self):
def look_im_blocking():
import time
time.sleep(0.5)
from eventlet import debug
debug.hub_blocking_detection(True, resolution=0.1)
gt = eventlet.spawn(look_im_blocking)
self.assertRaises(RuntimeError, gt.wait)
debug.hub_blocking_detection(False)
|
TestHubBlockingDetector
|
python
|
pytorch__pytorch
|
torch/_inductor/ops_handler.py
|
{
"start": 34563,
"end": 34947
}
|
class ____(NoopHandler):
def __init__(self, device: Optional[torch.device]):
self.device = device
def constant(self, value: Any, dtype: torch.dtype) -> torch._inductor.ir.Constant:
from torch._inductor import ir
return ir.Constant(
value=value, dtype=dtype, device=self.device or torch.get_default_device()
)
|
ExtractConstantsHandler
|
python
|
joke2k__faker
|
faker/providers/currency/de_AT/__init__.py
|
{
"start": 48,
"end": 293
}
|
class ____(CurrencyProvider):
price_formats = ["#,##", "%#,##", "%##,##", "%.###,##", "%#.###,##"]
def pricetag(self) -> str:
return self.numerify(self.random_element(self.price_formats)) + "\N{NO-BREAK SPACE}\N{EURO SIGN}"
|
Provider
|
python
|
tox-dev__tox
|
src/tox/report.py
|
{
"start": 739,
"end": 2980
}
|
class ____(local):
"""A thread local variable that inherits values from its parent."""
_ident_to_data: ClassVar[dict[int | None, str]] = {}
def __init__(self, out_err: OutErr) -> None:
self.name = self._ident_to_data.get(getattr(current_thread(), "parent_ident", None), "ROOT")
self.out_err = out_err
@staticmethod
@contextmanager
def patch_thread() -> Iterator[None]:
def new_start(self: Thread) -> None: # need to patch this
self.parent_ident = current_thread().ident # type: ignore[attr-defined]
old_start(self)
old_start, Thread.start = Thread.start, new_start # type: ignore[method-assign]
try:
yield
finally:
Thread.start = old_start # type: ignore[method-assign]
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, value: str) -> None:
self._name = value
for ident in self._ident_to_data.keys() - {t.ident for t in enumerate_threads()}:
self._ident_to_data.pop(ident)
self._ident_to_data[current_thread().ident] = value
@contextmanager
def with_name(self, name: str) -> Iterator[None]:
previous, self.name = self.name, name
try:
yield
finally:
self.name = previous
@contextmanager
def suspend_out_err(self, yes: bool, out_err: OutErr | None = None) -> Iterator[OutErr]: # noqa: FBT001
previous_out, previous_err = self.out_err
try:
if yes:
if out_err is None: # pragma: no branch
out = self._make(f"out-{self.name}", previous_out)
err = self._make(f"err-{self.name}", previous_err)
else:
out, err = out_err # pragma: no cover
self.out_err = out, err
yield self.out_err
finally:
if yes:
self.out_err = previous_out, previous_err
@staticmethod
def _make(prefix: str, based_of: TextIOWrapper) -> TextIOWrapper:
return TextIOWrapper(NamedBytesIO(f"{prefix}-{based_of.name}"), encoding=locale.getpreferredencoding(False)) # noqa: FBT003
|
_LogThreadLocal
|
python
|
ray-project__ray
|
python/ray/train/v2/_internal/execution/controller/state.py
|
{
"start": 5142,
"end": 5406
}
|
class ____(TrainControllerState):
def __init__(
self,
training_failed_error: TrainingFailedError,
):
super().__init__(state_type=TrainControllerStateType.ERRORED)
self.training_failed_error = training_failed_error
|
ErroredState
|
python
|
getsentry__sentry
|
tests/sentry/event_manager/test_event_manager.py
|
{
"start": 137311,
"end": 163816
}
|
class ____(TestCase):
def setUp(self) -> None:
self.environment1 = Environment.get_or_create(self.project, "prod")
self.environment2 = Environment.get_or_create(self.project, "staging")
self.timestamp = float(int(time() - 300))
self.redis_client = get_redis_client_for_ds()
def make_transaction_event(self, **kwargs: Any) -> dict[str, Any]:
result = {
"transaction": "wait",
"contexts": {
"trace": {
"parent_span_id": "bce14471e0e9654d",
"op": "foobar",
"trace_id": "a0fa8803753e40fd8124b21eeb2986b5",
"span_id": "bf5be759039ede9a",
}
},
"spans": [],
"timestamp": self.timestamp + 0.23,
"start_timestamp": "2019-06-14T14:01:40Z",
"type": "transaction",
}
result.update(kwargs)
return result
def make_release_transaction(
self,
release_version: str = "1.0",
environment_name: str | None = "prod",
project_id: int = 1,
**kwargs: Any,
) -> Event:
transaction = (
self.make_transaction_event(
release=release_version, environment=environment_name, event_id=uuid.uuid1().hex
)
if environment_name is not None
else self.make_transaction_event(release=release_version, event_id=uuid.uuid1().hex)
)
transaction.update(kwargs)
manager = EventManager(transaction)
with self.tasks():
event = manager.save(project_id)
return event
@freeze_time("2022-11-03 10:00:00")
def test_boost_release_with_non_observed_release(self) -> None:
ts = timezone.now().timestamp()
project = self.create_project(platform="python")
release_1 = Release.get_or_create(project=project, version="1.0", date_added=timezone.now())
release_2 = Release.get_or_create(
project=project, version="2.0", date_added=timezone.now() + timedelta(hours=1)
)
release_3 = Release.get_or_create(
project=project, version="3.0", date_added=timezone.now() + timedelta(hours=2)
)
for release, environment in (
(release_1, None),
(release_2, "prod"),
(release_3, "dev"),
):
self.make_release_transaction(
release_version=release.version,
environment_name=environment,
project_id=project.id,
checksum="a" * 32,
timestamp=self.timestamp,
)
env_postfix = f":e:{environment}" if environment is not None else ""
assert self.redis_client.get(f"ds::p:{project.id}:r:{release.id}{env_postfix}") == "1"
assert self.redis_client.hgetall(f"ds::p:{project.id}:boosted_releases") == {
f"ds::r:{release_1.id}": str(ts),
f"ds::r:{release_2.id}:e:prod": str(ts),
f"ds::r:{release_3.id}:e:dev": str(ts),
}
assert ProjectBoostedReleases(project_id=project.id).get_extended_boosted_releases() == [
ExtendedBoostedRelease(
id=release_1.id,
timestamp=ts,
environment=None,
cache_key=f"ds::r:{release_1.id}",
version=release_1.version,
platform=Platform(project.platform),
),
ExtendedBoostedRelease(
id=release_2.id,
timestamp=ts,
environment="prod",
cache_key=f"ds::r:{release_2.id}:e:prod",
version=release_2.version,
platform=Platform(project.platform),
),
ExtendedBoostedRelease(
id=release_3.id,
timestamp=ts,
environment="dev",
cache_key=f"ds::r:{release_3.id}:e:dev",
version=release_3.version,
platform=Platform(project.platform),
),
]
@freeze_time("2022-11-03 10:00:00")
def test_boost_release_boosts_only_latest_release(self) -> None:
ts = timezone.now().timestamp()
project = self.create_project(platform="python")
release_1 = Release.get_or_create(project=project, version="1.0", date_added=timezone.now())
release_2 = Release.get_or_create(
project=project,
version="2.0",
# We must make sure the new release_2.date_added > release_1.date_added.
date_added=timezone.now() + timedelta(hours=1),
)
# We add a transaction for latest release release_2.
self.make_release_transaction(
release_version=release_2.version,
environment_name=self.environment1.name,
project_id=project.id,
checksum="a" * 32,
timestamp=self.timestamp,
)
# We add a transaction for release_1 which is not anymore the latest release, therefore we should skip this.
self.make_release_transaction(
release_version=release_1.version,
environment_name=self.environment1.name,
project_id=project.id,
checksum="a" * 32,
timestamp=self.timestamp,
)
assert (
self.redis_client.get(f"ds::p:{project.id}:r:{release_2.id}:e:{self.environment1.name}")
== "1"
)
assert self.redis_client.hgetall(f"ds::p:{project.id}:boosted_releases") == {
f"ds::r:{release_2.id}:e:{self.environment1.name}": str(ts),
}
assert ProjectBoostedReleases(project_id=project.id).get_extended_boosted_releases() == [
ExtendedBoostedRelease(
id=release_2.id,
timestamp=ts,
environment=self.environment1.name,
cache_key=f"ds::r:{release_2.id}:e:{self.environment1.name}",
version=release_2.version,
platform=Platform(project.platform),
)
]
@freeze_time("2022-11-03 10:00:00")
def test_boost_release_with_observed_release_and_different_environment(self) -> None:
project = self.create_project(platform="python")
release = Release.get_or_create(project=project, version="1.0", date_added=timezone.now())
self.make_release_transaction(
release_version=release.version,
environment_name=self.environment1.name,
project_id=project.id,
checksum="a" * 32,
timestamp=self.timestamp,
)
ts_1 = time()
assert (
self.redis_client.get(f"ds::p:{project.id}:r:{release.id}:e:{self.environment1.name}")
== "1"
)
assert self.redis_client.hgetall(f"ds::p:{project.id}:boosted_releases") == {
f"ds::r:{release.id}:e:{self.environment1.name}": str(ts_1)
}
assert ProjectBoostedReleases(project_id=project.id).get_extended_boosted_releases() == [
ExtendedBoostedRelease(
id=release.id,
timestamp=ts_1,
environment=self.environment1.name,
cache_key=f"ds::r:{release.id}:e:{self.environment1.name}",
version=release.version,
platform=Platform(project.platform),
)
]
# We simulate that a new transaction with same release but with a different environment value comes after
# 30 minutes to show that we expect the entry for that release-env to be added to the boosted releases.
with freeze_time("2022-11-03 10:30:00"):
self.make_release_transaction(
release_version=release.version,
environment_name=self.environment2.name,
project_id=project.id,
checksum="b" * 32,
timestamp=self.timestamp,
)
ts_2 = time()
assert (
self.redis_client.get(
f"ds::p:{project.id}:r:{release.id}:e:{self.environment2.name}"
)
== "1"
)
assert self.redis_client.hgetall(f"ds::p:{project.id}:boosted_releases") == {
f"ds::r:{release.id}:e:{self.environment1.name}": str(ts_1),
f"ds::r:{release.id}:e:{self.environment2.name}": str(ts_2),
}
assert ProjectBoostedReleases(
project_id=project.id
).get_extended_boosted_releases() == [
ExtendedBoostedRelease(
id=release.id,
timestamp=ts_1,
environment=self.environment1.name,
cache_key=f"ds::r:{release.id}:e:{self.environment1.name}",
version=release.version,
platform=Platform(project.platform),
),
ExtendedBoostedRelease(
id=release.id,
timestamp=ts_2,
environment=self.environment2.name,
cache_key=f"ds::r:{release.id}:e:{self.environment2.name}",
version=release.version,
platform=Platform(project.platform),
),
]
# We also test the case in which no environment is set, which can be the case as per
# https://docs.sentry.io/platforms/javascript/configuration/options/#environment.
with freeze_time("2022-11-03 11:00:00"):
self.make_release_transaction(
release_version=release.version,
environment_name=None,
project_id=project.id,
checksum="b" * 32,
timestamp=self.timestamp,
)
ts_3 = time()
assert self.redis_client.get(f"ds::p:{project.id}:r:{release.id}") == "1"
assert self.redis_client.hgetall(f"ds::p:{project.id}:boosted_releases") == {
f"ds::r:{release.id}:e:{self.environment1.name}": str(ts_1),
f"ds::r:{release.id}:e:{self.environment2.name}": str(ts_2),
f"ds::r:{release.id}": str(ts_3),
}
assert ProjectBoostedReleases(
project_id=project.id
).get_extended_boosted_releases() == [
ExtendedBoostedRelease(
id=release.id,
timestamp=ts_1,
environment=self.environment1.name,
cache_key=f"ds::r:{release.id}:e:{self.environment1.name}",
version=release.version,
platform=Platform(project.platform),
),
ExtendedBoostedRelease(
id=release.id,
timestamp=ts_2,
environment=self.environment2.name,
cache_key=f"ds::r:{release.id}:e:{self.environment2.name}",
version=release.version,
platform=Platform(project.platform),
),
ExtendedBoostedRelease(
id=release.id,
timestamp=ts_3,
environment=None,
cache_key=f"ds::r:{release.id}",
version=release.version,
platform=Platform(project.platform),
),
]
@freeze_time("2022-11-03 10:00:00")
def test_release_not_boosted_with_observed_release_and_same_environment(self) -> None:
project = self.create_project(platform="python")
release = Release.get_or_create(project=project, version="1.0", date_added=timezone.now())
for environment in (self.environment1.name, self.environment2.name):
self.redis_client.set(
f"ds::p:{project.id}:r:{release.id}:e:{environment}", 1, 60 * 60 * 24
)
self.make_release_transaction(
release_version=release.version,
environment_name=environment,
project_id=project.id,
checksum="b" * 32,
timestamp=self.timestamp,
)
assert self.redis_client.hgetall(f"ds::p:{project.id}:boosted_releases") == {}
assert ProjectBoostedReleases(project_id=project.id).get_extended_boosted_releases() == []
@freeze_time("2022-11-03 10:00:00")
def test_release_not_boosted_with_deleted_release_after_event_received(self) -> None:
ts = timezone.now().timestamp()
project = self.create_project(platform="python")
release_1 = Release.get_or_create(project=project, version="1.0", date_added=timezone.now())
release_2 = Release.get_or_create(
project=project, version="2.0", date_added=timezone.now() + timedelta(hours=1)
)
self.make_release_transaction(
release_version=release_1.version,
environment_name=None,
project_id=project.id,
checksum="a" * 32,
timestamp=self.timestamp,
)
assert self.redis_client.get(f"ds::p:{project.id}:r:{release_1.id}") == "1"
self.make_release_transaction(
release_version=release_2.version,
environment_name=None,
project_id=project.id,
checksum="a" * 32,
timestamp=self.timestamp,
)
assert self.redis_client.get(f"ds::p:{project.id}:r:{release_2.id}") == "1"
# We simulate that the release_2 is deleted after the boost has been inserted.
release_2_id = release_2.id
release_2.delete()
# We expect the boosted release to be kept in Redis, if not queried by the ProjectBoostedReleases.
assert self.redis_client.hgetall(f"ds::p:{project.id}:boosted_releases") == {
f"ds::r:{release_1.id}": str(ts),
f"ds::r:{release_2_id}": str(ts),
}
# We expect to not see the release 2 because it will not be in the database anymore, thus we mark it as
# expired.
assert ProjectBoostedReleases(project_id=project.id).get_extended_boosted_releases() == [
ExtendedBoostedRelease(
id=release_1.id,
timestamp=ts,
environment=None,
cache_key=f"ds::r:{release_1.id}",
version=release_1.version,
platform=Platform(project.platform),
),
]
@freeze_time("2022-11-03 10:00:00")
def test_get_boosted_releases_with_old_and_new_cache_keys(self) -> None:
ts = timezone.now().timestamp()
project = self.create_project(platform="python")
# Old cache key
release_1 = Release.get_or_create(project=project, version="1.0", date_added=timezone.now())
self.redis_client.hset(
f"ds::p:{project.id}:boosted_releases",
f"{release_1.id}",
ts,
)
# New cache key
release_2 = Release.get_or_create(
project=project, version="2.0", date_added=timezone.now() + timedelta(hours=1)
)
self.redis_client.hset(
f"ds::p:{project.id}:boosted_releases",
f"ds::r:{release_2.id}",
ts,
)
self.redis_client.hset(
f"ds::p:{project.id}:boosted_releases",
f"ds::r:{release_2.id}:e:{self.environment1.name}",
ts,
)
self.redis_client.hset(
f"ds::p:{project.id}:boosted_releases",
f"ds::r:{release_2.id}:e:{self.environment2.name}",
ts,
)
assert ProjectBoostedReleases(project_id=project.id).get_extended_boosted_releases() == [
ExtendedBoostedRelease(
id=release_1.id,
timestamp=ts,
environment=None,
# This item has the old cache key.
cache_key=f"{release_1.id}",
version=release_1.version,
platform=Platform(project.platform),
),
ExtendedBoostedRelease(
id=release_2.id,
timestamp=ts,
environment=None,
cache_key=f"ds::r:{release_2.id}",
version=release_2.version,
platform=Platform(project.platform),
),
ExtendedBoostedRelease(
id=release_2.id,
timestamp=ts,
environment=self.environment1.name,
cache_key=f"ds::r:{release_2.id}:e:{self.environment1.name}",
version=release_2.version,
platform=Platform(project.platform),
),
ExtendedBoostedRelease(
id=release_2.id,
timestamp=ts,
environment=self.environment2.name,
cache_key=f"ds::r:{release_2.id}:e:{self.environment2.name}",
version=release_2.version,
platform=Platform(project.platform),
),
]
@freeze_time("2022-11-03 10:00:00")
def test_expired_boosted_releases_are_removed(self) -> None:
ts = timezone.now().timestamp()
# We want to test with multiple platforms.
for platform in ("python", "java"):
project = self.create_project(platform=platform)
for index, (release_version, environment) in enumerate(
(
(f"1.0-{platform}", self.environment1.name),
(f"2.0-{platform}", self.environment2.name),
)
):
release = Release.get_or_create(
project=project,
version=release_version,
date_added=timezone.now() + timedelta(hours=index),
)
self.redis_client.set(
f"ds::p:{project.id}:r:{release.id}:e:{environment}", 1, 60 * 60 * 24
)
self.redis_client.hset(
f"ds::p:{project.id}:boosted_releases",
f"ds::r:{release.id}:e:{environment}",
# We set the creation time in order to expire it by 1 second.
ts - Platform(platform).time_to_adoption - 1,
)
# We add a new boosted release that is not expired.
release_3 = Release.get_or_create(
project=project,
version=f"3.0-{platform}",
date_added=timezone.now() + timedelta(hours=2),
)
self.make_release_transaction(
release_version=release_3.version,
environment_name=self.environment1.name,
project_id=project.id,
checksum="b" * 32,
timestamp=self.timestamp,
)
assert (
self.redis_client.get(
f"ds::p:{project.id}:r:{release_3.id}:e:{self.environment1.name}"
)
== "1"
)
assert self.redis_client.hgetall(f"ds::p:{project.id}:boosted_releases") == {
f"ds::r:{release_3.id}:e:{self.environment1.name}": str(ts)
}
assert ProjectBoostedReleases(
project_id=project.id
).get_extended_boosted_releases() == [
ExtendedBoostedRelease(
id=release_3.id,
timestamp=ts,
environment=self.environment1.name,
cache_key=f"ds::r:{release_3.id}:e:{self.environment1.name}",
version=release_3.version,
platform=Platform(project.platform),
)
]
@mock.patch(
"sentry.dynamic_sampling.rules.helpers.latest_releases.schedule_invalidate_project_config"
)
def test_project_config_invalidation_is_triggered_when_new_release_is_observed(
self, mocked_invalidate: mock.MagicMock
) -> None:
self.make_release_transaction(
release_version=self.release.version,
environment_name=self.environment1.name,
project_id=self.project.id,
checksum="a" * 32,
timestamp=self.timestamp,
)
assert any(
o.kwargs["trigger"] == "dynamic_sampling:boost_release"
for o in mocked_invalidate.mock_calls
)
@freeze_time("2022-11-03 10:00:00")
@mock.patch("sentry.dynamic_sampling.rules.helpers.latest_releases.BOOSTED_RELEASES_LIMIT", 2)
def test_least_recently_boosted_release_is_removed_if_limit_is_exceeded(self) -> None:
ts = timezone.now().timestamp()
project = self.create_project(platform="python")
release_1 = Release.get_or_create(
project=project,
version="1.0",
date_added=timezone.now(),
)
release_2 = Release.get_or_create(
project=project,
version="2.0",
date_added=timezone.now() + timedelta(hours=1),
)
# We boost with increasing timestamps, so that we know that the smallest will be evicted.
for release, boost_time in ((release_1, ts - 2), (release_2, ts - 1)):
self.redis_client.set(
f"ds::p:{project.id}:r:{release.id}",
1,
60 * 60 * 24,
)
self.redis_client.hset(
f"ds::p:{project.id}:boosted_releases",
f"ds::r:{release.id}",
boost_time,
)
release_3 = Release.get_or_create(
project=project,
version="3.0",
date_added=timezone.now() + timedelta(hours=2),
)
self.make_release_transaction(
release_version=release_3.version,
environment_name=self.environment1.name,
project_id=project.id,
checksum="b" * 32,
timestamp=self.timestamp,
)
assert (
self.redis_client.get(f"ds::p:{project.id}:r:{release_3.id}:e:{self.environment1.name}")
== "1"
)
assert self.redis_client.hgetall(f"ds::p:{project.id}:boosted_releases") == {
f"ds::r:{release_2.id}": str(ts - 1),
f"ds::r:{release_3.id}:e:{self.environment1.name}": str(ts),
}
assert ProjectBoostedReleases(project_id=project.id).get_extended_boosted_releases() == [
ExtendedBoostedRelease(
id=release_2.id,
timestamp=ts - 1,
environment=None,
cache_key=f"ds::r:{release_2.id}",
version=release_2.version,
platform=Platform(project.platform),
),
ExtendedBoostedRelease(
id=release_3.id,
timestamp=ts,
environment=self.environment1.name,
cache_key=f"ds::r:{release_3.id}:e:{self.environment1.name}",
version=release_3.version,
platform=Platform(project.platform),
),
]
@freeze_time()
@mock.patch("sentry.dynamic_sampling.rules.helpers.latest_releases.BOOSTED_RELEASES_LIMIT", 2)
def test_removed_boost_not_added_again_if_limit_is_exceeded(self) -> None:
ts = timezone.now().timestamp()
project = self.create_project(platform="python")
release_1 = Release.get_or_create(project=project, version="1.0", date_added=timezone.now())
# We want to test that if we have the same release, but we send different environments that go over the
# limit, and we evict an environment, but then we send a transaction with the evicted environment.
#
# As an example suppose the following history of transactions received in the form (release, env) -> None:
# (1, production) -> (1, staging) -> (1, None) -> (1, production)
#
# Once we receive the first two, we have reached maximum capacity. Then we receive (1, None) and evict boost
# for (1, production) which results in the following boosts (1, staging), (1, None). After that we receive
# (1, production) again but in this case we don't want to remove (1, staging) because we will end up in an
# infinite loop. Instead, we expect to mark (1, production) as observed and only un-observe it if it does
# not receive transactions within the next 24 hours.
environments_sequence = [
self.environment1.name,
self.environment2.name,
None,
self.environment1.name,
]
for environment in environments_sequence:
self.make_release_transaction(
release_version=release_1.version,
environment_name=environment,
project_id=project.id,
checksum="b" * 32,
timestamp=self.timestamp,
)
# We assert that all environments have been observed.
assert (
self.redis_client.get(f"ds::p:{project.id}:r:{release_1.id}:e:{self.environment1.name}")
== "1"
)
assert (
self.redis_client.get(f"ds::p:{project.id}:r:{release_1.id}:e:{self.environment2.name}")
== "1"
)
assert self.redis_client.get(f"ds::p:{project.id}:r:{release_1.id}") == "1"
# We assert that only the last 2 unseen (release, env) pairs are boosted.
assert self.redis_client.hgetall(f"ds::p:{project.id}:boosted_releases") == {
f"ds::r:{release_1.id}:e:{self.environment2.name}": str(ts),
f"ds::r:{release_1.id}": str(ts),
}
assert ProjectBoostedReleases(project_id=project.id).get_extended_boosted_releases() == [
ExtendedBoostedRelease(
id=release_1.id,
timestamp=ts,
environment=self.environment2.name,
cache_key=f"ds::r:{release_1.id}:e:{self.environment2.name}",
version=release_1.version,
platform=Platform(project.platform),
),
ExtendedBoostedRelease(
id=release_1.id,
timestamp=ts,
environment=None,
cache_key=f"ds::r:{release_1.id}",
version=release_1.version,
platform=Platform(project.platform),
),
]
|
DSLatestReleaseBoostTest
|
python
|
numpy__numpy
|
numpy/_core/code_generators/genapi.py
|
{
"start": 3934,
"end": 4601
}
|
class ____:
def __init__(self, version):
""" Version should be the normal NumPy version, e.g. "1.25" """
major, minor = version.split(".")
self.version = f"NPY_{major}_{minor}_API_VERSION"
def __str__(self):
# Used by version hashing:
return self.version
def add_guard(self, name, normal_define):
"""Wrap a definition behind a version guard"""
wrap = textwrap.dedent(f"""
#if NPY_FEATURE_VERSION >= {self.version}
{{define}}
#endif""")
# we only insert `define` later to avoid confusing dedent:
return wrap.format(define=normal_define)
|
MinVersion
|
python
|
django__django
|
tests/template_tests/syntax_tests/test_partials.py
|
{
"start": 1384,
"end": 24053
}
|
class ____(SimpleTestCase):
libraries = {"bad_tag": "template_tests.templatetags.bad_tag"}
@setup({name: gen_partial_template(name) for name in valid_partialdef_names})
def test_valid_partialdef_names(self):
for template_name in valid_partialdef_names:
with self.subTest(template_name=template_name):
output = self.engine.render_to_string(template_name)
self.assertEqual(output, f"TEST with {template_name}!")
@setup(
{
"basic": (
"{% partialdef testing-name %}"
"HERE IS THE TEST CONTENT"
"{% endpartialdef %}"
"{% partial testing-name %}"
),
"basic_inline": (
"{% partialdef testing-name inline %}"
"HERE IS THE TEST CONTENT"
"{% endpartialdef %}"
),
"inline_inline": (
"{% partialdef inline inline %}"
"HERE IS THE TEST CONTENT"
"{% endpartialdef %}"
),
"with_newlines": (
"{% partialdef testing-name %}\n"
"HERE IS THE TEST CONTENT\n"
"{% endpartialdef testing-name %}\n"
"{% partial testing-name %}"
),
}
)
def test_basic_usage(self):
for template_name in (
"basic",
"basic_inline",
"inline_inline",
"with_newlines",
):
with self.subTest(template_name=template_name):
output = self.engine.render_to_string(template_name)
self.assertEqual(output.strip(), "HERE IS THE TEST CONTENT")
@setup(
{
"inline_partial_with_context": (
"BEFORE\n"
"{% partialdef testing-name inline %}"
"HERE IS THE TEST CONTENT"
"{% endpartialdef %}\n"
"AFTER"
)
}
)
def test_partial_inline_only_with_before_and_after_content(self):
output = self.engine.render_to_string("inline_partial_with_context")
self.assertEqual(output.strip(), "BEFORE\nHERE IS THE TEST CONTENT\nAFTER")
@setup(
{
"inline_partial_explicit_end": (
"{% partialdef testing-name inline %}"
"HERE IS THE TEST CONTENT"
"{% endpartialdef testing-name %}\n"
"{% partial testing-name %}"
)
}
)
def test_partial_inline_and_used_once(self):
output = self.engine.render_to_string("inline_partial_explicit_end")
self.assertEqual(output, "HERE IS THE TEST CONTENT\nHERE IS THE TEST CONTENT")
@setup(
{
"inline_partial_with_usage": (
"BEFORE\n"
"{% partialdef content_snippet inline %}"
"HERE IS THE TEST CONTENT"
"{% endpartialdef %}\n"
"AFTER\n"
"{% partial content_snippet %}"
)
}
)
def test_partial_inline_and_used_once_with_before_and_after_content(self):
output = self.engine.render_to_string("inline_partial_with_usage")
self.assertEqual(
output.strip(),
"BEFORE\nHERE IS THE TEST CONTENT\nAFTER\nHERE IS THE TEST CONTENT",
)
@setup(
{
"partial_used_before_definition": (
"TEMPLATE START\n"
"{% partial testing-name %}\n"
"MIDDLE CONTENT\n"
"{% partialdef testing-name %}\n"
"THIS IS THE PARTIAL CONTENT\n"
"{% endpartialdef %}\n"
"TEMPLATE END"
),
}
)
def test_partial_used_before_definition(self):
output = self.engine.render_to_string("partial_used_before_definition")
expected = (
"TEMPLATE START\n\nTHIS IS THE PARTIAL CONTENT\n\n"
"MIDDLE CONTENT\n\nTEMPLATE END"
)
self.assertEqual(output, expected)
@setup(
{
"partial_with_extends": (
"{% extends 'partial_base.html' %}"
"{% partialdef testing-name %}Inside Content{% endpartialdef %}"
"{% block main %}"
"Main content with {% partial testing-name %}"
"{% endblock %}"
),
},
partial_templates,
)
def test_partial_defined_outside_main_block(self):
output = self.engine.render_to_string("partial_with_extends")
self.assertIn("<main>Main content with Inside Content</main>", output)
@setup(
{
"partial_with_extends_and_block_super": (
"{% extends 'partial_base.html' %}"
"{% partialdef testing-name %}Inside Content{% endpartialdef %}"
"{% block main %}{{ block.super }} "
"Main content with {% partial testing-name %}"
"{% endblock %}"
),
},
partial_templates,
)
def test_partial_used_with_block_super(self):
output = self.engine.render_to_string("partial_with_extends_and_block_super")
self.assertIn(
"<main>Default main content. Main content with Inside Content</main>",
output,
)
@setup(
{
"partial_with_include": (
"MAIN TEMPLATE START\n"
"{% include 'partial_included.html' %}\n"
"MAIN TEMPLATE END"
)
},
partial_templates,
)
def test_partial_in_included_template(self):
output = self.engine.render_to_string("partial_with_include")
expected = (
"MAIN TEMPLATE START\nINCLUDED TEMPLATE START\n\n\n"
"Now using the partial: \n"
"THIS IS CONTENT FROM THE INCLUDED PARTIAL\n\n"
"INCLUDED TEMPLATE END\n\nMAIN TEMPLATE END"
)
self.assertEqual(output, expected)
@setup(
{
"partial_as_include_in_other_template": (
"MAIN TEMPLATE START\n"
"{% include 'partial_included.html#included-partial' %}\n"
"MAIN TEMPLATE END"
)
},
partial_templates,
)
def test_partial_as_include_in_template(self):
output = self.engine.render_to_string("partial_as_include_in_other_template")
expected = (
"MAIN TEMPLATE START\n\n"
"THIS IS CONTENT FROM THE INCLUDED PARTIAL\n\n"
"MAIN TEMPLATE END"
)
self.assertEqual(output, expected)
@setup(
{
"nested_simple": (
"{% extends 'base.html' %}"
"{% block content %}"
"This is my main page."
"{% partialdef outer inline %}"
" It hosts a couple of partials.\n"
" {% partialdef inner inline %}"
" And an inner one."
" {% endpartialdef inner %}"
"{% endpartialdef outer %}"
"{% endblock content %}"
),
"use_outer": "{% include 'nested_simple#outer' %}",
"use_inner": "{% include 'nested_simple#inner' %}",
}
)
def test_nested_partials(self):
with self.subTest(template_name="use_outer"):
output = self.engine.render_to_string("use_outer")
self.assertEqual(
[line.strip() for line in output.split("\n")],
["It hosts a couple of partials.", "And an inner one."],
)
with self.subTest(template_name="use_inner"):
output = self.engine.render_to_string("use_inner")
self.assertEqual(output.strip(), "And an inner one.")
@setup(
{
"partial_undefined_name": "{% partial undefined %}",
"partial_missing_name": "{% partial %}",
"partial_closing_tag": (
"{% partialdef testing-name %}TEST{% endpartialdef %}"
"{% partial testing-name %}{% endpartial %}"
),
"partialdef_missing_name": "{% partialdef %}{% endpartialdef %}",
"partialdef_missing_close_tag": "{% partialdef name %}TEST",
"partialdef_opening_closing_name_mismatch": (
"{% partialdef testing-name %}TEST{% endpartialdef invalid %}"
),
"partialdef_invalid_name": gen_partial_template("with\nnewline"),
"partialdef_extra_params": (
"{% partialdef testing-name inline extra %}TEST{% endpartialdef %}"
),
"partialdef_duplicated_names": (
"{% partialdef testing-name %}TEST{% endpartialdef %}"
"{% partialdef testing-name %}TEST{% endpartialdef %}"
"{% partial testing-name %}"
),
"partialdef_duplicated_nested_names": (
"{% partialdef testing-name %}"
"TEST"
"{% partialdef testing-name %}TEST{% endpartialdef %}"
"{% endpartialdef %}"
"{% partial testing-name %}"
),
},
)
def test_basic_parse_errors(self):
for template_name, error_msg in (
(
"partial_undefined_name",
"Partial 'undefined' is not defined in the current template.",
),
("partial_missing_name", "'partial' tag requires a single argument"),
("partial_closing_tag", "Invalid block tag on line 1: 'endpartial'"),
("partialdef_missing_name", "'partialdef' tag requires a name"),
("partialdef_missing_close_tag", "Unclosed tag on line 1: 'partialdef'"),
(
"partialdef_opening_closing_name_mismatch",
"expected 'endpartialdef' or 'endpartialdef testing-name'.",
),
("partialdef_invalid_name", "Invalid block tag on line 3: 'endpartialdef'"),
("partialdef_extra_params", "'partialdef' tag takes at most 2 arguments"),
(
"partialdef_duplicated_names",
"Partial 'testing-name' is already defined in the "
"'partialdef_duplicated_names' template.",
),
(
"partialdef_duplicated_nested_names",
"Partial 'testing-name' is already defined in the "
"'partialdef_duplicated_nested_names' template.",
),
):
with (
self.subTest(template_name=template_name),
self.assertRaisesMessage(TemplateSyntaxError, error_msg),
):
self.engine.render_to_string(template_name)
@setup(
{
"with_params": (
"{% partialdef testing-name inline=true %}TEST{% endpartialdef %}"
),
"uppercase": "{% partialdef testing-name INLINE %}TEST{% endpartialdef %}",
}
)
def test_partialdef_invalid_inline(self):
error_msg = "The 'inline' argument does not have any parameters"
for template_name in ("with_params", "uppercase"):
with (
self.subTest(template_name=template_name),
self.assertRaisesMessage(TemplateSyntaxError, error_msg),
):
self.engine.render_to_string(template_name)
@setup(
{
"partial_broken_unclosed": (
"<div>Before partial</div>"
"{% partialdef unclosed_partial %}"
"<p>This partial has no closing tag</p>"
"<div>After partial content</div>"
)
}
)
def test_broken_partial_unclosed_exception_info(self):
with self.assertRaises(TemplateSyntaxError) as cm:
self.engine.get_template("partial_broken_unclosed")
self.assertIn("endpartialdef", str(cm.exception))
self.assertIn("Unclosed tag", str(cm.exception))
reporter = ExceptionReporter(None, cm.exception.__class__, cm.exception, None)
traceback_data = reporter.get_traceback_data()
exception_value = str(traceback_data.get("exception_value", ""))
self.assertIn("Unclosed tag", exception_value)
@setup(
{
"partial_with_variable_error": (
"<h1>Title</h1>\n"
"{% partialdef testing-name %}\n"
"<p>{{ nonexistent|default:alsonotthere }}</p>\n"
"{% endpartialdef %}\n"
"<h2>Sub Title</h2>\n"
"{% partial testing-name %}\n"
),
}
)
def test_partial_runtime_exception_has_debug_info(self):
template = self.engine.get_template("partial_with_variable_error")
context = Context({})
if hasattr(self.engine, "string_if_invalid") and self.engine.string_if_invalid:
output = template.render(context)
# The variable should be replaced with INVALID
self.assertIn("INVALID", output)
else:
with self.assertRaises(VariableDoesNotExist) as cm:
template.render(context)
if self.engine.debug:
exc_info = cm.exception.template_debug
self.assertEqual(
exc_info["during"], "{{ nonexistent|default:alsonotthere }}"
)
self.assertEqual(exc_info["line"], 3)
self.assertEqual(exc_info["name"], "partial_with_variable_error")
self.assertIn("Failed lookup", exc_info["message"])
@setup(
{
"partial_exception_info_test": (
"<h1>Title</h1>\n"
"{% partialdef testing-name %}\n"
"<p>Content</p>\n"
"{% endpartialdef %}\n"
),
}
)
def test_partial_template_get_exception_info_delegation(self):
if self.engine.debug:
template = self.engine.get_template("partial_exception_info_test")
partial_template = template.extra_data["partials"]["testing-name"]
test_exc = Exception("Test exception")
token = Token(
token_type=TokenType.VAR,
contents="test",
position=(0, 4),
)
exc_info = partial_template.get_exception_info(test_exc, token)
self.assertIn("message", exc_info)
self.assertIn("line", exc_info)
self.assertIn("name", exc_info)
self.assertEqual(exc_info["name"], "partial_exception_info_test")
self.assertEqual(exc_info["message"], "Test exception")
@setup(
{
"partial_with_undefined_reference": (
"<h1>Header</h1>\n"
"{% partial undefined %}\n"
"<p>After undefined partial</p>\n"
),
}
)
def test_undefined_partial_exception_info(self):
template = self.engine.get_template("partial_with_undefined_reference")
with self.assertRaises(TemplateSyntaxError) as cm:
template.render(Context())
self.assertIn("undefined", str(cm.exception))
self.assertIn("is not defined", str(cm.exception))
if self.engine.debug:
exc_debug = cm.exception.template_debug
self.assertEqual(exc_debug["during"], "{% partial undefined %}")
self.assertEqual(exc_debug["line"], 2)
self.assertEqual(exc_debug["name"], "partial_with_undefined_reference")
self.assertIn("undefined", exc_debug["message"])
@setup(
{
"existing_template": (
"<h1>Header</h1><p>This template has no partials defined</p>"
),
}
)
def test_undefined_partial_exception_info_template_does_not_exist(self):
with self.assertRaises(TemplateDoesNotExist) as cm:
self.engine.get_template("existing_template#undefined")
self.assertIn("undefined", str(cm.exception))
@setup(
{
"partial_with_syntax_error": (
"<h1>Title</h1>\n"
"{% partialdef syntax_error_partial %}\n"
" {% if user %}\n"
" <p>User: {{ user.name }}</p>\n"
" {% endif\n"
" <p>Missing closing tag above</p>\n"
"{% endpartialdef %}\n"
"{% partial syntax_error_partial %}\n"
),
}
)
def test_partial_with_syntax_error_exception_info(self):
with self.assertRaises(TemplateSyntaxError) as cm:
self.engine.get_template("partial_with_syntax_error")
self.assertIn("endif", str(cm.exception).lower())
if self.engine.debug:
exc_debug = cm.exception.template_debug
self.assertIn("endpartialdef", exc_debug["during"])
self.assertEqual(exc_debug["name"], "partial_with_syntax_error")
self.assertIn("endif", exc_debug["message"].lower())
@setup(
{
"partial_with_runtime_error": (
"<h1>Title</h1>\n"
"{% load bad_tag %}\n"
"{% partialdef runtime_error_partial %}\n"
" <p>This will raise an error:</p>\n"
" {% badsimpletag %}\n"
"{% endpartialdef %}\n"
"{% partial runtime_error_partial %}\n"
),
}
)
def test_partial_runtime_error_exception_info(self):
template = self.engine.get_template("partial_with_runtime_error")
context = Context()
with self.assertRaises(RuntimeError) as cm:
template.render(context)
if self.engine.debug:
exc_debug = cm.exception.template_debug
self.assertIn("badsimpletag", exc_debug["during"])
self.assertEqual(exc_debug["line"], 5) # Line 5 is where badsimpletag is
self.assertEqual(exc_debug["name"], "partial_with_runtime_error")
self.assertIn("bad simpletag", exc_debug["message"])
@setup(
{
"nested_partial_with_undefined_var": (
"<h1>Title</h1>\n"
"{% partialdef outer_partial %}\n"
' <div class="outer">\n'
" {% partialdef inner_partial %}\n"
" <p>{{ undefined_var }}</p>\n"
" {% endpartialdef %}\n"
" {% partial inner_partial %}\n"
" </div>\n"
"{% endpartialdef %}\n"
"{% partial outer_partial %}\n"
),
}
)
def test_nested_partial_error_exception_info(self):
template = self.engine.get_template("nested_partial_with_undefined_var")
context = Context()
output = template.render(context)
# When string_if_invalid is set, it will show INVALID
# When not set, undefined variables just render as empty string
if hasattr(self.engine, "string_if_invalid") and self.engine.string_if_invalid:
self.assertIn("INVALID", output)
else:
self.assertIn("<p>", output)
self.assertIn("</p>", output)
@setup(
{
"parent.html": (
"<!DOCTYPE html>\n"
"<html>\n"
"<head>{% block title %}Default Title{% endblock %}</head>\n"
"<body>\n"
" {% block content %}{% endblock %}\n"
"</body>\n"
"</html>\n"
),
"child.html": (
"{% extends 'parent.html' %}\n"
"{% block content %}\n"
" {% partialdef content_partial %}\n"
" <p>{{ missing_variable|undefined_filter }}</p>\n"
" {% endpartialdef %}\n"
" {% partial content_partial %}\n"
"{% endblock %}\n"
),
}
)
def test_partial_in_extended_template_error(self):
with self.assertRaises(TemplateSyntaxError) as cm:
self.engine.get_template("child.html")
self.assertIn("undefined_filter", str(cm.exception))
if self.engine.debug:
exc_debug = cm.exception.template_debug
self.assertIn("undefined_filter", exc_debug["during"])
self.assertEqual(exc_debug["name"], "child.html")
self.assertIn("undefined_filter", exc_debug["message"])
@setup(
{
"partial_broken_nesting": (
"<div>Before partial</div>\n"
"{% partialdef outer %}\n"
"{% partialdef inner %}...{% endpartialdef outer %}\n"
"{% endpartialdef inner %}\n"
"<div>After partial content</div>"
)
}
)
def test_broken_partial_nesting(self):
with self.assertRaises(TemplateSyntaxError) as cm:
self.engine.get_template("partial_broken_nesting")
self.assertIn("endpartialdef", str(cm.exception))
self.assertIn("Invalid block tag", str(cm.exception))
self.assertIn("'endpartialdef inner'", str(cm.exception))
reporter = ExceptionReporter(None, cm.exception.__class__, cm.exception, None)
traceback_data = reporter.get_traceback_data()
exception_value = str(traceback_data.get("exception_value", ""))
self.assertIn("Invalid block tag", exception_value)
self.assertIn("'endpartialdef inner'", str(cm.exception))
@setup(
{
"partial_broken_nesting_mixed": (
"<div>Before partial</div>\n"
"{% partialdef outer %}\n"
"{% partialdef inner %}...{% endpartialdef %}\n"
"{% endpartialdef inner %}\n"
"<div>After partial content</div>"
)
}
)
def test_broken_partial_nesting_mixed(self):
with self.assertRaises(TemplateSyntaxError) as cm:
self.engine.get_template("partial_broken_nesting_mixed")
self.assertIn("endpartialdef", str(cm.exception))
self.assertIn("Invalid block tag", str(cm.exception))
self.assertIn("'endpartialdef outer'", str(cm.exception))
reporter = ExceptionReporter(None, cm.exception.__class__, cm.exception, None)
traceback_data = reporter.get_traceback_data()
exception_value = str(traceback_data.get("exception_value", ""))
self.assertIn("Invalid block tag", exception_value)
self.assertIn("'endpartialdef outer'", str(cm.exception))
|
PartialTagTests
|
python
|
pandas-dev__pandas
|
pandas/io/pytables.py
|
{
"start": 163323,
"end": 165471
}
|
class ____(AppendableFrameTable):
"""a table that read/writes the generic pytables table format"""
pandas_kind = "frame_table"
table_type = "generic_table"
ndim = 2
obj_type = DataFrame
levels: list[Hashable]
@property
def pandas_type(self) -> str:
return self.pandas_kind
@property
def storable(self):
return getattr(self.group, "table", None) or self.group
def get_attrs(self) -> None:
"""retrieve our attributes"""
self.non_index_axes = []
self.nan_rep = None
self.levels = []
self.index_axes = [a for a in self.indexables if a.is_an_indexable]
self.values_axes = [a for a in self.indexables if not a.is_an_indexable]
self.data_columns = [a.name for a in self.values_axes]
@cache_readonly
def indexables(self):
"""create the indexables from the table description"""
d = self.description
# TODO: can we get a typ for this? AFAICT it is the only place
# where we aren't passing one
# the index columns is just a simple index
md = self.read_metadata("index")
meta = "category" if md is not None else None
index_col = GenericIndexCol(
name="index", axis=0, table=self.table, meta=meta, metadata=md
)
_indexables: list[GenericIndexCol | GenericDataIndexableCol] = [index_col]
for i, n in enumerate(d._v_names):
assert isinstance(n, str)
atom = getattr(d, n)
md = self.read_metadata(n)
meta = "category" if md is not None else None
dc = GenericDataIndexableCol(
name=n,
pos=i,
values=[n],
typ=atom,
table=self.table,
meta=meta,
metadata=md,
)
_indexables.append(dc)
return _indexables
# error: Signature of "write" incompatible with supertype "AppendableTable"
def write(self, **kwargs) -> None: # type: ignore[override]
raise NotImplementedError("cannot write on a generic table")
|
GenericTable
|
python
|
langchain-ai__langchain
|
libs/langchain/langchain_classic/chains/query_constructor/parser.py
|
{
"start": 1799,
"end": 8560
}
|
class ____(Transformer):
"""Transform a query string into an intermediate representation."""
def __init__(
self,
*args: Any,
allowed_comparators: Sequence[Comparator] | None = None,
allowed_operators: Sequence[Operator] | None = None,
allowed_attributes: Sequence[str] | None = None,
**kwargs: Any,
):
"""Initialize the QueryTransformer.
Args:
*args: Positional arguments.
allowed_comparators: Optional sequence of allowed comparators.
allowed_operators: Optional sequence of allowed operators.
allowed_attributes: Optional sequence of allowed attributes for comparators.
**kwargs: Additional keyword arguments.
"""
super().__init__(*args, **kwargs)
self.allowed_comparators = allowed_comparators
self.allowed_operators = allowed_operators
self.allowed_attributes = allowed_attributes
def program(self, *items: Any) -> tuple:
"""Transform the items into a tuple."""
return items
def func_call(self, func_name: Any, args: list) -> FilterDirective:
"""Transform a function name and args into a FilterDirective.
Args:
func_name: The name of the function.
args: The arguments passed to the function.
Returns:
The filter directive.
Raises:
ValueError: If the function is a comparator and the first arg is not in the
allowed attributes.
"""
func = self._match_func_name(str(func_name))
if isinstance(func, Comparator):
if self.allowed_attributes and args[0] not in self.allowed_attributes:
msg = (
f"Received invalid attributes {args[0]}. Allowed attributes are "
f"{self.allowed_attributes}"
)
raise ValueError(msg)
return Comparison(comparator=func, attribute=args[0], value=args[1])
if len(args) == 1 and func in (Operator.AND, Operator.OR):
return args[0]
return Operation(operator=func, arguments=args)
def _match_func_name(self, func_name: str) -> Operator | Comparator:
if func_name in set(Comparator):
if (
self.allowed_comparators is not None
and func_name not in self.allowed_comparators
):
msg = (
f"Received disallowed comparator {func_name}. Allowed "
f"comparators are {self.allowed_comparators}"
)
raise ValueError(msg)
return Comparator(func_name)
if func_name in set(Operator):
if (
self.allowed_operators is not None
and func_name not in self.allowed_operators
):
msg = (
f"Received disallowed operator {func_name}. Allowed operators"
f" are {self.allowed_operators}"
)
raise ValueError(msg)
return Operator(func_name)
msg = (
f"Received unrecognized function {func_name}. Valid functions are "
f"{list(Operator) + list(Comparator)}"
)
raise ValueError(msg)
def args(self, *items: Any) -> tuple:
"""Transforms items into a tuple.
Args:
items: The items to transform.
"""
return items
def false(self) -> bool:
"""Returns false."""
return False
def true(self) -> bool:
"""Returns true."""
return True
def list(self, item: Any) -> list:
"""Transforms an item into a list.
Args:
item: The item to transform.
"""
if item is None:
return []
return list(item)
def int(self, item: Any) -> int:
"""Transforms an item into an int.
Args:
item: The item to transform.
"""
return int(item)
def float(self, item: Any) -> float:
"""Transforms an item into a float.
Args:
item: The item to transform.
"""
return float(item)
def date(self, item: Any) -> ISO8601Date:
"""Transforms an item into a ISO8601Date object.
Args:
item: The item to transform.
Raises:
ValueError: If the item is not in ISO 8601 date format.
"""
item = str(item).strip("\"'")
try:
datetime.datetime.strptime(item, "%Y-%m-%d") # noqa: DTZ007
except ValueError:
warnings.warn(
"Dates are expected to be provided in ISO 8601 date format "
"(YYYY-MM-DD).",
stacklevel=3,
)
return {"date": item, "type": "date"}
def datetime(self, item: Any) -> ISO8601DateTime:
"""Transforms an item into a ISO8601DateTime object.
Args:
item: The item to transform.
Raises:
ValueError: If the item is not in ISO 8601 datetime format.
"""
item = str(item).strip("\"'")
try:
# Parse full ISO 8601 datetime format
datetime.datetime.strptime(item, "%Y-%m-%dT%H:%M:%S%z")
except ValueError:
try:
datetime.datetime.strptime(item, "%Y-%m-%dT%H:%M:%S") # noqa: DTZ007
except ValueError as e:
msg = "Datetime values are expected to be in ISO 8601 format."
raise ValueError(msg) from e
return {"datetime": item, "type": "datetime"}
def string(self, item: Any) -> str:
"""Transforms an item into a string.
Removes escaped quotes.
Args:
item: The item to transform.
"""
return str(item).strip("\"'")
def get_parser(
allowed_comparators: Sequence[Comparator] | None = None,
allowed_operators: Sequence[Operator] | None = None,
allowed_attributes: Sequence[str] | None = None,
) -> Lark:
"""Return a parser for the query language.
Args:
allowed_comparators: The allowed comparators.
allowed_operators: The allowed operators.
allowed_attributes: The allowed attributes.
Returns:
Lark parser for the query language.
"""
if not _HAS_LARK:
msg = "Cannot import lark, please install it with 'pip install lark'."
raise ImportError(msg)
transformer = QueryTransformer(
allowed_comparators=allowed_comparators,
allowed_operators=allowed_operators,
allowed_attributes=allowed_attributes,
)
return Lark(GRAMMAR, parser="lalr", transformer=transformer, start="program")
|
QueryTransformer
|
python
|
ray-project__ray
|
python/ray/train/v2/_internal/execution/checkpoint/checkpoint_manager.py
|
{
"start": 2346,
"end": 16381
}
|
class ____(_CheckpointManager, ReportCallback, WorkerGroupCallback):
def __init__(
self,
checkpoint_config: CheckpointConfig,
storage_context: StorageContext,
):
self._storage_context = storage_context
self._checkpoint_config = checkpoint_config
# This tracks the number of report calls that have been processed
# for the current worker group.
self._current_report_index = 0
# Map from checkpoint to training result
self._pending_training_results = {}
# Map from checkpoint to report index. Used to order checkpoints.
self._checkpoint_to_report_index = {}
self._condition = asyncio.Condition()
super().__init__(checkpoint_config)
# If the snapshot is found, the checkpoint manager will restore its state.
# TODO(xgui): CheckpointManager is used to save or restore the checkpoint manager state.
# We should sanity check if we should see old state in the storage folder.
self._maybe_load_state_from_storage()
def register_checkpoint(
self,
checkpoint_result: _TrainingResult,
is_result_pending: bool,
):
"""Register new checkpoint and add to bookkeeping.
This method will register a new checkpoint and add it to the internal
bookkeeping logic. This means the checkpoint manager will decide if
this checkpoint should be kept, and if older or worse performing
checkpoints should be deleted.
Args:
checkpoint_result: Tracked checkpoint and associated metrics to add to bookkeeping.
is_result_pending: Whether the result is pending or fully ready.
"""
self._latest_checkpoint_result = checkpoint_result
self._checkpoint_to_report_index[
checkpoint_result.checkpoint
] = self._current_report_index
if self._checkpoint_config.checkpoint_score_attribute is not None:
# If we're ordering by a score, insert the checkpoint
# so that the list remains sorted.
_insert_into_sorted_list(
self._checkpoint_results,
checkpoint_result,
key=self._get_checkpoint_score,
checkpoint_to_report_index=self._checkpoint_to_report_index,
)
else:
# If no metric is provided, just append (ordering by time of registration).
self._checkpoint_results.append(checkpoint_result)
if is_result_pending:
self._pending_training_results[
checkpoint_result.checkpoint
] = checkpoint_result
self._save_state_and_delete_old_checkpoints()
self._current_report_index += 1
self._notify()
def update_checkpoints_with_metrics(
self, checkpoint_to_metrics: Dict[Checkpoint, Dict[str, Any]]
):
"""Update the checkpoints with the metrics."""
for checkpoint, metrics in checkpoint_to_metrics.items():
if checkpoint not in self._pending_training_results:
logger.warning(
f"Checkpoint {checkpoint} not found in pending training results. "
)
continue
checkpoint_result = self._pending_training_results[checkpoint]
checkpoint_result.metrics.update(metrics)
if checkpoint_result not in self._checkpoint_results:
raise ValueError(
f"Checkpoint {checkpoint} was in pending training results but not "
"checkpoint results. "
)
self._checkpoint_results.remove(checkpoint_result)
_insert_into_sorted_list(
self._checkpoint_results,
checkpoint_result,
key=self._get_checkpoint_score,
checkpoint_to_report_index=self._checkpoint_to_report_index,
)
self._pending_training_results.pop(checkpoint)
self._save_state_and_delete_old_checkpoints()
self._notify()
def _notify(self):
"""Notify condition so all listeners know state has changed."""
async def async_notify():
async with self._condition:
self._condition.notify_all()
asyncio.create_task(async_notify())
def _save_state_and_delete_old_checkpoints(self):
"""Delete the old checkpoints."""
# Get checkpoints to delete
results_to_delete = set()
if self._checkpoint_config.num_to_keep is not None:
# Delete the bottom (N - K) checkpoints
worst_results = set(
self._checkpoint_results[: -self._checkpoint_config.num_to_keep]
)
# Except for the latest checkpoint and pending checkpoints
results_to_delete = worst_results - {self._latest_checkpoint_result}
results_to_delete = results_to_delete - set(
self._pending_training_results.values()
)
# Update internal state before actually deleting them.
self._checkpoint_results = [
checkpoint_result
for checkpoint_result in self._checkpoint_results
if checkpoint_result not in results_to_delete
]
# Save the checkpoint manager state to storage.
# Note: We save the state before deleting the old checkpoints.
# If deletion happens first and the process crashes, our snapshot
# may point to some stale checkpoints that are already deleted.
# TODO: Make this writing operation non-blocking.
self._write_state_to_storage()
# Delete the old checkpoints.
for checkpoint_result in results_to_delete:
checkpoint = checkpoint_result.checkpoint
logger.debug("Deleting checkpoint: ", checkpoint)
delete_fs_path(fs=checkpoint.filesystem, fs_path=checkpoint.path)
# --------------------------
# CheckpointManager state
# --------------------------
def _save_state(self) -> str:
"""Save the checkpoint manager state to a JSON str."""
checkpoint_results = [
_get_state_from_training_result(checkpoint_result, self._storage_context)
for checkpoint_result in self._checkpoint_results
]
latest_checkpoint_result = (
_get_state_from_training_result(
self._latest_checkpoint_result, self._storage_context
)
if self._latest_checkpoint_result is not None
else None
)
manager_snapshot = _CheckpointManagerState(
checkpoint_results=checkpoint_results,
latest_checkpoint_result=latest_checkpoint_result,
)
return manager_snapshot.json()
def _load_state(self, json_state: str):
"""Load the checkpoint manager state from a JSON str."""
try:
json_dict = json.loads(json_state)
manager_snapshot = _CheckpointManagerState.parse_obj(json_dict)
except Exception as e:
raise CheckpointManagerInitializationError(repr(e)) from e
self._assert_checkpoints_exist()
self._checkpoint_results = [
_get_training_result_from_state(
training_result_state, self._storage_context
)
for training_result_state in manager_snapshot.checkpoint_results
]
self._latest_checkpoint_result = (
_get_training_result_from_state(
manager_snapshot.latest_checkpoint_result, self._storage_context
)
if manager_snapshot.latest_checkpoint_result is not None
else None
)
def _maybe_load_state_from_storage(self):
"""Load the checkpoint manager state from storage.
If no snapshot is found, start with a clean state.
"""
if not _exists_at_fs_path(
fs=self._storage_context.storage_filesystem,
fs_path=self._storage_context.checkpoint_manager_snapshot_path,
):
logger.debug(
"No checkpoint manager snapshot found. "
"No checkpoint will be available via `ray.train.get_checkpoint`, "
"so training will start from scratch."
)
return
with self._storage_context.storage_filesystem.open_input_stream(
self._storage_context.checkpoint_manager_snapshot_path
) as f:
logger.info(
"A run snapshot was found in storage folder at: "
f"'{self._storage_context.experiment_fs_path}'\n"
"This snapshot contains a list of checkpoints reported via "
"`ray.train.report` and will be loaded. "
"This allows the latest checkpoint found in the snapshot to be "
"accessible within your training function via "
"`ray.train.get_checkpoint`.\n"
"If you meant to start a brand new training job without any "
"information about previous checkpoints found in this directory, "
"please configure a new, unique `RunConfig(name)` or delete the "
f"existing folder at '{self._storage_context.experiment_fs_path}'."
)
json_state = f.read().decode("utf-8")
self._load_state(json_state)
def _write_state_to_storage(self):
"""Write the checkpoint manager state to storage."""
checkpoint_manager_snapshot = self._save_state()
with self._storage_context.storage_filesystem.open_output_stream(
self._storage_context.checkpoint_manager_snapshot_path
) as f:
f.write(checkpoint_manager_snapshot.encode("utf-8"))
def _assert_checkpoints_exist(self):
"""Validate the checkpoint manager state.
This method will validate the checkpoint manager state by checking if
the checkpoints specified in manager snapshot is compatible with the
checkpoint folders of the experiment storage filesystem.
Raises:
CheckpointManagerInitializationError: If the checkpoint manager snapshot
is not consistent with the stored checkpoints.
"""
for checkpoint_result in self._checkpoint_results:
checkpoint = checkpoint_result.checkpoint
assert checkpoint is not None
if not _exists_at_fs_path(
fs=checkpoint.filesystem, fs_path=checkpoint.path
):
raise CheckpointManagerInitializationError(
message=(
"The run snapshot contains a reference to a checkpoint "
f"that does not exist anymore ({checkpoint}). You are "
"running in a corrupted run directory `experiment_fs_path`."
"Please configure a new, unique `RunConfig(name)` "
"or delete the existing folder at "
f"`{self._storage_context.experiment_fs_path}`."
)
)
# --------------------------
# ReportCallback
# --------------------------
def after_report(
self,
training_report: _TrainingReport,
metrics: List[Dict[str, Any]],
):
if not training_report.checkpoint:
self._current_report_index += 1
self._notify()
return
self.register_checkpoint(
_TrainingResult(
checkpoint=training_report.checkpoint, metrics=training_report.metrics
),
bool(training_report.validation_spec),
)
# --------------------------
# WorkerGroupCallback
# --------------------------
def before_init_train_context(self, workers: List[Worker]) -> Dict[str, List[Any]]:
self._current_report_index = 0
latest_checkpoint = (
self.latest_checkpoint_result.checkpoint
if self.latest_checkpoint_result
else None
)
train_context_args = {
"checkpoint": [latest_checkpoint] * len(workers),
}
return train_context_args
# --------------------------------
# Get all reported checkpoints API
# --------------------------------
async def get_all_reported_checkpoints(
self,
current_report_index: int,
consistency_mode: CheckpointConsistencyMode = CheckpointConsistencyMode.VALIDATED,
) -> List[ReportedCheckpoint]:
"""Get all the reported checkpoints so far.
Args:
current_report_index: The current report index.
consistency_mode: Read semantics for checkpoint retrieval. Defaults to VALIDATED.
Returns:
A list of ReportedCheckpoint objects that represent the checkpoints and
corresponding metrics reported by the workers.
"""
if consistency_mode == CheckpointConsistencyMode.COMMITTED:
async with self._condition:
await self._condition.wait_for(
lambda: self._current_report_index == current_report_index
)
elif consistency_mode == CheckpointConsistencyMode.VALIDATED:
async with self._condition:
await self._condition.wait_for(
lambda: self._current_report_index == current_report_index
and not self._pending_training_results
)
else:
raise ValueError(
f"Unexpected CheckpointConsistencyMode: {consistency_mode}"
)
# TODO: might be nice for CheckpointManager to manage ReportedCheckpoint
# instead of _TrainingResult but that is a large refactor.
return [
ReportedCheckpoint(
checkpoint=tr.checkpoint,
metrics=tr.metrics,
)
for tr in self._checkpoint_results
]
|
CheckpointManager
|
python
|
pytorch__pytorch
|
test/functorch/dim/test_getsetitem.py
|
{
"start": 171,
"end": 8171
}
|
class ____(TestCase):
"""Comprehensive tests for first-class dimension indexing operations."""
def setUp(self):
super().setUp()
"""Set up common test fixtures."""
self.batch, self.height, self.width = dims(3)
def test_basic_dim_indexing(self):
"""Test basic indexing with a single Dim."""
tensor = torch.randn(3, 4, 5)
x, y, z = dims(3)
# Test indexing with each dim
result1 = tensor[x]
self.assertIsInstance(result1, Tensor)
result2 = tensor[y]
self.assertIsInstance(result2, Tensor)
result3 = tensor[z]
self.assertIsInstance(result3, Tensor)
def test_multiple_dim_indexing(self):
"""Test indexing with multiple Dims."""
tensor = torch.randn(3, 4, 5)
x, y, z = dims(3)
# Test multiple dims in one indexing operation
result = tensor[x, y]
self.assertIsInstance(result, Tensor)
result = tensor[x, y, z]
self.assertIsInstance(result, Tensor)
def test_mixed_indexing(self):
"""Test mixing Dims with regular indexing."""
tensor = torch.randn(3, 4, 5)
x, y, z = dims(3)
# Mix dim with slice
result1 = tensor[x, :]
self.assertIsInstance(result1, Tensor)
result2 = tensor[:, y]
self.assertIsInstance(result2, Tensor)
# Mix dim with integer
result3 = tensor[x, 0]
self.assertIsInstance(result3, Tensor)
result4 = tensor[0, y]
self.assertIsInstance(result4, Tensor)
def test_ellipsis_indexing(self):
"""Test indexing with ellipsis (...)."""
tensor = torch.randn(3, 4, 5, 6)
x, y, z, w = dims(4)
# Test ellipsis with dims
result1 = tensor[x, ...]
self.assertIsInstance(result1, Tensor)
result2 = tensor[..., y]
self.assertIsInstance(result2, Tensor)
result3 = tensor[x, ..., y]
self.assertIsInstance(result3, Tensor)
def test_none_indexing(self):
"""Test indexing with None (newaxis)."""
tensor = torch.randn(3, 4)
x, y = dims(2)
# Test None with dims
result1 = tensor[x, None, y]
self.assertIsInstance(result1, Tensor)
result2 = tensor[None, x]
self.assertIsInstance(result2, Tensor)
def test_slice_indexing(self):
"""Test indexing with slices mixed with dims."""
tensor = torch.randn(6, 8, 10)
x, y, z = dims(3)
# Test various slice patterns with dims
result1 = tensor[x, 1:5]
self.assertIsInstance(result1, Tensor)
result2 = tensor[1:3, y]
self.assertIsInstance(result2, Tensor)
result3 = tensor[x, 1:5, z]
self.assertIsInstance(result3, Tensor)
def test_tensor_indexing(self):
"""Test indexing with tensor indices."""
tensor = torch.randn(5, 6, 7)
x, y, z = dims(3)
# Create index tensors
idx = torch.tensor([0, 2, 4])
# Test tensor indexing with dims
result1 = tensor[x, idx]
self.assertIsInstance(result1, Tensor)
result2 = tensor[idx, y]
self.assertIsInstance(result2, Tensor)
def test_boolean_indexing(self):
"""Test boolean indexing with dims."""
tensor = torch.randn(4, 5)
x, y = dims(2)
# Create boolean mask
mask = torch.tensor([True, False, True, False, True])
# Test boolean indexing
result = tensor[x, mask]
self.assertIsInstance(result, Tensor)
def test_dim_pack_indexing(self):
"""Test indexing with dimension packs (tuples/lists of dims)."""
tensor = torch.randn(3, 4) # Need 2D tensor for 2 dims
# Create dims for dim pack
a, b = dims(2)
# Test dim pack indexing - using separate dimensions
result = tensor[a, b]
self.assertIsInstance(result, Tensor)
def test_unbound_dim_binding(self):
"""Test automatic binding of unbound dimensions during indexing."""
tensor = torch.randn(6, 8)
x = Dim("x") # unbound
y = Dim("y") # unbound
# Should automatically bind dimensions
result = tensor[x, y]
self.assertIsInstance(result, Tensor)
self.assertEqual(x.size, 6)
self.assertEqual(y.size, 8)
def test_dimlist_indexing(self):
"""Test indexing with DimList objects."""
tensor = torch.randn(3, 4, 5)
# Create a bound dimlist
dl = DimList(dims(2))
# Test dimlist indexing
result = tensor[dl, :]
self.assertIsInstance(result, Tensor)
def test_unbound_dimlist_indexing(self):
"""Test indexing with unbound DimList."""
tensor = torch.randn(3, 4, 5)
# Create unbound dimlist
dl = DimList()
# Should bind to remaining dimensions
result = tensor[0, dl]
self.assertIsInstance(result, Tensor)
def test_repeated_dim_usage(self):
"""Test using the same dim multiple times in indexing."""
tensor = torch.randn(4, 4, 4)
x, y, z = dims(3)
# This should trigger advanced indexing for repeated dims
result = tensor[x, x]
self.assertIsInstance(result, Tensor)
def test_complex_mixed_indexing(self):
"""Test complex combinations of different indexing types."""
tensor = torch.randn(3, 4, 5, 6, 7)
a, b, c, d, e = dims(5)
# Complex mixed indexing
idx = torch.tensor([0, 2])
result1 = tensor[a, 1:3, None, idx, :]
self.assertIsInstance(result1, Tensor)
# Use mask with correct shape
correct_mask = torch.tensor([True, False, True, False, False, True, True])
result2 = tensor[..., correct_mask]
self.assertIsInstance(result2, torch.Tensor)
def test_edge_cases(self):
"""Test edge cases and boundary conditions."""
x, y, z = dims(3)
# Single dimension tensor
vec = torch.randn(5)
a = Dim("a")
result1 = vec[a]
self.assertIsInstance(result1, Tensor)
self.assertEqual(a.size, 5) # Should bind to tensor size
# Empty tensor indexing
empty = torch.empty(0, 3, 4)
result2 = empty[x, :]
self.assertIsInstance(result2, Tensor)
def test_error_conditions(self):
"""Test conditions that should raise errors."""
tensor = torch.randn(3, 4)
x, y, z = dims(3)
# Too many indices
with self.assertRaises(ValueError):
_ = tensor[x, y, z] # 3 indices for 2D tensor
# Multiple unbound dim lists
dl1 = DimList()
dl2 = DimList()
with self.assertRaises(Exception): # Should raise DimensionBindError
_ = tensor[dl1, dl2]
# Multiple ellipsis
with self.assertRaises(Exception):
_ = tensor[..., x, ...]
def test_inferred_dimension_binding(self):
"""Test dimension binding inference with dim packs."""
# Skip this test for now as it requires more complex dim pack functionality
def test_stride_calculation(self):
"""Test that stride calculations work correctly with dim packs."""
tensor = torch.randn(6, 8)
# Test basic indexing instead of complex dim packs
a, b = dims(2)
result1 = tensor[a, b]
self.assertIsInstance(result1, Tensor)
# Test with different tensor
tensor2 = torch.randn(2, 3, 4)
c, d, e = dims(3)
result2 = tensor2[c, d, e]
self.assertIsInstance(result2, Tensor)
def test_device_handling_cpu(self):
"""Test indexing behavior with CPU tensors."""
# CPU tensor
cpu_tensor = torch.randn(3, 4)
x, y = dims(2)
result_cpu = cpu_tensor[x, y]
self.assertIsInstance(result_cpu, Tensor)
self.assertEqual(result_cpu.device, torch.device("cpu"))
if __name__ == "__main__":
run_tests()
|
TestGetSetItem
|
python
|
scipy__scipy
|
scipy/io/_idl.py
|
{
"start": 17887,
"end": 27000
}
|
class ____(dict):
'''
A case-insensitive dictionary with access via item, attribute, and call
notations:
>>> from scipy.io._idl import AttrDict
>>> d = AttrDict()
>>> d['Variable'] = 123
>>> d['Variable']
123
>>> d.Variable
123
>>> d.variable
123
>>> d('VARIABLE')
123
>>> d['missing']
Traceback (most recent error last):
...
KeyError: 'missing'
>>> d.missing
Traceback (most recent error last):
...
AttributeError: 'AttrDict' object has no attribute 'missing'
'''
def __init__(self, init=None):
if init is None:
init = {}
dict.__init__(self, init)
def __getitem__(self, name):
return super().__getitem__(name.lower())
def __setitem__(self, key, value):
return super().__setitem__(key.lower(), value)
def __getattr__(self, name):
try:
return self.__getitem__(name)
except KeyError:
raise AttributeError(
f"'{type(self)}' object has no attribute '{name}'") from None
__setattr__ = __setitem__
__call__ = __getitem__
def readsav(file_name, idict=None, python_dict=False,
uncompressed_file_name=None, verbose=False):
"""
Read an IDL .sav file.
Parameters
----------
file_name : str
Name of the IDL save file.
idict : dict, optional
Dictionary in which to insert .sav file variables.
python_dict : bool, optional
By default, the object return is not a Python dictionary, but a
case-insensitive dictionary with item, attribute, and call access
to variables. To get a standard Python dictionary, set this option
to True.
uncompressed_file_name : str, optional
This option only has an effect for .sav files written with the
/compress option. If a file name is specified, compressed .sav
files are uncompressed to this file. Otherwise, readsav will use
the `tempfile` module to determine a temporary filename
automatically, and will remove the temporary file upon successfully
reading it in.
verbose : bool, optional
Whether to print out information about the save file, including
the records read, and available variables.
Returns
-------
idl_dict : AttrDict or dict
If `python_dict` is set to False (default), this function returns a
case-insensitive dictionary with item, attribute, and call access
to variables. If `python_dict` is set to True, this function
returns a Python dictionary with all variable names in lowercase.
If `idict` was specified, then variables are written to the
dictionary specified, and the updated dictionary is returned.
Examples
--------
>>> from os.path import dirname, join as pjoin
>>> import scipy.io as sio
>>> from scipy.io import readsav
Get the filename for an example .sav file from the tests/data directory.
>>> data_dir = pjoin(dirname(sio.__file__), 'tests', 'data')
>>> sav_fname = pjoin(data_dir, 'array_float32_1d.sav')
Load the .sav file contents.
>>> sav_data = readsav(sav_fname)
Get keys of the .sav file contents.
>>> print(sav_data.keys())
dict_keys(['array1d'])
Access a content with a key.
>>> print(sav_data['array1d'])
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0.]
"""
# Initialize record and variable holders
records = []
if python_dict or idict:
variables = {}
else:
variables = AttrDict()
# Open the IDL file
f = open(file_name, 'rb')
# Read the signature, which should be 'SR'
signature = _read_bytes(f, 2)
if signature != b'SR':
raise Exception(f"Invalid SIGNATURE: {signature}")
# Next, the record format, which is '\x00\x04' for normal .sav
# files, and '\x00\x06' for compressed .sav files.
recfmt = _read_bytes(f, 2)
if recfmt == b'\x00\x04':
pass
elif recfmt == b'\x00\x06':
if verbose:
print("IDL Save file is compressed")
if uncompressed_file_name:
fout = open(uncompressed_file_name, 'w+b')
else:
fout = tempfile.NamedTemporaryFile(suffix='.sav')
if verbose:
print(f" -> expanding to {fout.name}")
# Write header
fout.write(b'SR\x00\x04')
# Cycle through records
while True:
# Read record type
rectype = _read_long(f)
fout.write(struct.pack('>l', int(rectype)))
# Read position of next record and return as int
nextrec = _read_uint32(f)
nextrec += _read_uint32(f).astype(np.int64) * 2**32
# Read the unknown 4 bytes
unknown = f.read(4)
# Check if the end of the file has been reached
if RECTYPE_DICT[rectype] == 'END_MARKER':
modval = np.int64(2**32)
fout.write(struct.pack('>I', int(nextrec) % modval))
fout.write(
struct.pack('>I', int((nextrec - (nextrec % modval)) / modval))
)
fout.write(unknown)
break
# Find current position
pos = f.tell()
# Decompress record
rec_string = zlib.decompress(f.read(nextrec-pos))
# Find new position of next record
nextrec = fout.tell() + len(rec_string) + 12
# Write out record
fout.write(struct.pack('>I', int(nextrec % 2**32)))
fout.write(struct.pack('>I', int((nextrec - (nextrec % 2**32)) / 2**32)))
fout.write(unknown)
fout.write(rec_string)
# Close the original compressed file
f.close()
# Set f to be the decompressed file, and skip the first four bytes
f = fout
f.seek(4)
else:
raise Exception(f"Invalid RECFMT: {recfmt}")
# Loop through records, and add them to the list
while True:
r = _read_record(f)
records.append(r)
if 'end' in r:
if r['end']:
break
# Close the file
f.close()
# Find heap data variables
heap = {}
for r in records:
if r['rectype'] == "HEAP_DATA":
heap[r['heap_index']] = r['data']
# Find all variables
for r in records:
if r['rectype'] == "VARIABLE":
replace, new = _replace_heap(r['data'], heap)
if replace:
r['data'] = new
variables[r['varname'].lower()] = r['data']
if verbose:
# Print out timestamp info about the file
for record in records:
if record['rectype'] == "TIMESTAMP":
print("-"*50)
print(f"Date: {record['date']}")
print(f"User: {record['user']}")
print(f"Host: {record['host']}")
break
# Print out version info about the file
for record in records:
if record['rectype'] == "VERSION":
print("-"*50)
print(f"Format: {record['format']}")
print(f"Architecture: {record['arch']}")
print(f"Operating System: {record['os']}")
print(f"IDL Version: {record['release']}")
break
# Print out identification info about the file
for record in records:
if record['rectype'] == "IDENTIFICATON":
print("-"*50)
print(f"Author: {record['author']}")
print(f"Title: {record['title']}")
print(f"ID Code: {record['idcode']}")
break
# Print out descriptions saved with the file
for record in records:
if record['rectype'] == "DESCRIPTION":
print("-"*50)
print(f"Description: {record['description']}")
break
print("-"*50)
print(f"Successfully read {len(records)} records of which:")
# Create convenience list of record types
rectypes = [r['rectype'] for r in records]
for rt in set(rectypes):
if rt != 'END_MARKER':
print(f" - {rectypes.count(rt)} are of type {rt}")
print("-"*50)
if 'VARIABLE' in rectypes:
print("Available variables:")
for var in variables:
print(f" - {var} [{type(variables[var])}]")
print("-"*50)
if idict:
for var in variables:
idict[var] = variables[var]
return idict
else:
return variables
|
AttrDict
|
python
|
numpy__numpy
|
benchmarks/benchmarks/bench_io.py
|
{
"start": 2084,
"end": 3113
}
|
class ____(Benchmark):
# benchmarks for np.loadtxt comment handling
# when reading in CSV files
params = [10, int(1e2), int(1e4), int(1e5)]
param_names = ['num_lines']
def setup(self, num_lines):
data = ['1,2,3 # comment'] * num_lines
# unfortunately, timeit will only run setup()
# between repeat events, but not for iterations
# within repeats, so the StringIO object
# will have to be rewound in the benchmark proper
self.data_comments = StringIO('\n'.join(data))
def time_comment_loadtxt_csv(self, num_lines):
# benchmark handling of lines with comments
# when loading in from csv files
# inspired by similar benchmark in pandas
# for read_csv
# need to rewind StringIO object (unfortunately
# confounding timing result somewhat) for every
# call to timing test proper
np.loadtxt(self.data_comments,
delimiter=',')
self.data_comments.seek(0)
|
LoadtxtCSVComments
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/types/beta_error_response.py
|
{
"start": 255,
"end": 378
}
|
class ____(BaseModel):
error: BetaError
request_id: Optional[str] = None
type: Literal["error"]
|
BetaErrorResponse
|
python
|
facelessuser__pymdown-extensions
|
tests/test_extensions/test_superfences.py
|
{
"start": 4125,
"end": 5507
}
|
class ____(util.MdCase):
"""Test title cases."""
extension = ['pymdownx.highlight', 'pymdownx.superfences']
extension_configs = {
'pymdownx.highlight': {
'auto_title': True,
"auto_title_map": {
"Python Console Session": "Python"
}
}
}
def test_auto_tile(self):
"""Test auto title."""
self.check_markdown(
r'''
```{.python title="My Title"}
import test
```
''',
r'''
<div class="highlight"><span class="filename">My Title</span><pre><span></span><code><span class="kn">import</span><span class="w"> </span><span class="nn">test</span>
</code></pre></div>
''', # noqa: E501
True
)
def test_auto_tile_map(self):
"""Test auto title."""
self.check_markdown(
r'''
```{.pycon title="My Title"}
>>> import test
```
''',
r'''
<div class="highlight"><span class="filename">My Title</span><pre><span></span><code><span class="gp">>>> </span><span class="kn">import</span><span class="w"> </span><span class="nn">test</span>
</code></pre></div>
''', # noqa: E501
True
)
|
TestHighlightAutoTitleOverride
|
python
|
apache__airflow
|
airflow-core/tests/unit/always/test_connection.py
|
{
"start": 2983,
"end": 32728
}
|
class ____:
def setup_method(self):
self.patcher = mock.patch("airflow.models.connection.mask_secret", autospec=True)
self.mask_secret = self.patcher.start()
def teardown_method(self):
self.patcher.stop()
@conf_vars({("core", "fernet_key"): ""})
def test_connection_extra_no_encryption(self):
"""
Tests extras on a new connection without encryption. The fernet key
is set to a non-base64-encoded string and the extra is stored without
encryption.
"""
crypto.get_fernet.cache_clear()
test_connection = Connection(extra='{"apache": "airflow"}')
assert not test_connection.is_extra_encrypted
assert test_connection.extra == '{"apache": "airflow"}'
@conf_vars({("core", "fernet_key"): Fernet.generate_key().decode()})
def test_connection_extra_with_encryption(self):
"""
Tests extras on a new connection with encryption.
"""
crypto.get_fernet.cache_clear()
test_connection = Connection(extra='{"apache": "airflow"}')
assert test_connection.is_extra_encrypted
assert test_connection.extra == '{"apache": "airflow"}'
def test_connection_extra_with_encryption_rotate_fernet_key(self):
"""
Tests rotating encrypted extras.
"""
key1 = Fernet.generate_key()
key2 = Fernet.generate_key()
with conf_vars({("core", "fernet_key"): key1.decode()}):
crypto.get_fernet.cache_clear()
test_connection = Connection(extra='{"apache": "airflow"}')
assert test_connection.is_extra_encrypted
assert test_connection.extra == '{"apache": "airflow"}'
assert Fernet(key1).decrypt(test_connection._extra.encode()) == b'{"apache": "airflow"}'
# Test decrypt of old value with new key
with conf_vars({("core", "fernet_key"): f"{key2.decode()},{key1.decode()}"}):
crypto.get_fernet.cache_clear()
assert test_connection.extra == '{"apache": "airflow"}'
# Test decrypt of new value with new key
test_connection.rotate_fernet_key()
assert test_connection.is_extra_encrypted
assert test_connection.extra == '{"apache": "airflow"}'
assert Fernet(key2).decrypt(test_connection._extra.encode()) == b'{"apache": "airflow"}'
test_from_uri_params = [
UriTestCaseConfig(
test_conn_uri="scheme://user:password@host%2Flocation:1234/schema",
test_conn_attributes=dict(
conn_type="scheme",
host="host/location",
schema="schema",
login="user",
password="password",
port=1234,
extra=None,
),
description="without extras",
),
UriTestCaseConfig(
test_conn_uri="scheme://user:password@host%2Flocation:1234/schema?"
"extra1=a%20value&extra2=%2Fpath%2F",
test_conn_attributes=dict(
conn_type="scheme",
host="host/location",
schema="schema",
login="user",
password="password",
port=1234,
extra_dejson={"extra1": "a value", "extra2": "/path/"},
),
description="with extras",
),
UriTestCaseConfig(
test_conn_uri="scheme://user:password@host%2Flocation:1234/schema?"
"__extra__=%7B%22my_val%22%3A+%5B%22list%22%2C+%22of%22%2C+%22values%22%5D%2C+%22extra%22%3A+%7B%22nested%22%3A+%7B%22json%22%3A+%22val%22%7D%7D%7D",
test_conn_attributes=dict(
conn_type="scheme",
host="host/location",
schema="schema",
login="user",
password="password",
port=1234,
extra_dejson={"my_val": ["list", "of", "values"], "extra": {"nested": {"json": "val"}}},
),
description="with nested json",
),
UriTestCaseConfig(
test_conn_uri="scheme://user:password@host%2Flocation:1234/schema?extra1=a%20value&extra2=",
test_conn_attributes=dict(
conn_type="scheme",
host="host/location",
schema="schema",
login="user",
password="password",
port=1234,
extra_dejson={"extra1": "a value", "extra2": ""},
),
description="with empty extras",
),
UriTestCaseConfig(
test_conn_uri="scheme://user:password@host%2Flocation%3Ax%3Ay:1234/schema?"
"extra1=a%20value&extra2=%2Fpath%2F",
test_conn_attributes=dict(
conn_type="scheme",
host="host/location:x:y",
schema="schema",
login="user",
password="password",
port=1234,
extra_dejson={"extra1": "a value", "extra2": "/path/"},
),
description="with colon in hostname",
),
UriTestCaseConfig(
test_conn_uri="scheme://user:password%20with%20space@host%2Flocation%3Ax%3Ay:1234/schema",
test_conn_attributes=dict(
conn_type="scheme",
host="host/location:x:y",
schema="schema",
login="user",
password="password with space",
port=1234,
),
description="with encoded password",
),
UriTestCaseConfig(
test_conn_uri="scheme://domain%2Fuser:password@host%2Flocation%3Ax%3Ay:1234/schema",
test_conn_attributes=dict(
conn_type="scheme",
host="host/location:x:y",
schema="schema",
login="domain/user",
password="password",
port=1234,
),
description="with encoded user",
),
UriTestCaseConfig(
test_conn_uri="scheme://user:password%20with%20space@host:1234/schema%2Ftest",
test_conn_attributes=dict(
conn_type="scheme",
host="host",
schema="schema/test",
login="user",
password="password with space",
port=1234,
),
description="with encoded schema",
),
UriTestCaseConfig(
test_conn_uri="scheme://user:password%20with%20space@host:1234",
test_conn_attributes=dict(
conn_type="scheme",
host="host",
schema="",
login="user",
password="password with space",
port=1234,
),
description="no schema",
),
UriTestCaseConfig(
test_conn_uri="google-cloud-platform://?key_path=%2Fkeys%2Fkey.json&scope="
"https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fcloud-platform&project=airflow",
test_conn_attributes=dict(
conn_type="google_cloud_platform",
host="",
schema="",
login=None,
password=None,
port=None,
extra_dejson=dict(
key_path="/keys/key.json",
scope="https://www.googleapis.com/auth/cloud-platform",
project="airflow",
),
),
description="with underscore",
),
UriTestCaseConfig(
test_conn_uri="scheme://host:1234",
test_conn_attributes=dict(
conn_type="scheme",
host="host",
schema="",
login=None,
password=None,
port=1234,
),
description="without auth info",
),
UriTestCaseConfig(
test_conn_uri="scheme://%2FTmP%2F:1234",
test_conn_attributes=dict(
conn_type="scheme",
host="/TmP/",
schema="",
login=None,
password=None,
port=1234,
),
description="with path",
),
UriTestCaseConfig(
test_conn_uri="scheme:///airflow",
test_conn_attributes=dict(
conn_type="scheme",
schema="airflow",
),
description="schema only",
),
UriTestCaseConfig(
test_conn_uri="scheme://@:1234",
test_conn_attributes=dict(
conn_type="scheme",
port=1234,
),
description="port only",
),
UriTestCaseConfig(
test_conn_uri="scheme://:password%2F%21%40%23%24%25%5E%26%2A%28%29%7B%7D@",
test_conn_attributes=dict(
conn_type="scheme",
password="password/!@#$%^&*(){}",
),
description="password only",
),
UriTestCaseConfig(
test_conn_uri="scheme://login%2F%21%40%23%24%25%5E%26%2A%28%29%7B%7D@",
test_conn_attributes=dict(
conn_type="scheme",
login="login/!@#$%^&*(){}",
),
description="login only",
),
]
@pytest.mark.parametrize("test_config", test_from_uri_params)
def test_connection_from_uri(self, test_config: UriTestCaseConfig):
connection = Connection(uri=test_config.test_uri)
for conn_attr, expected_val in test_config.test_conn_attributes.items():
actual_val = getattr(connection, conn_attr)
if expected_val is None:
assert expected_val is None
if isinstance(expected_val, dict):
assert expected_val == actual_val
else:
assert expected_val == actual_val
expected_calls = []
if test_config.test_conn_attributes.get("password"):
expected_calls.append(mock.call(test_config.test_conn_attributes["password"]))
expected_calls.append(mock.call(quote(test_config.test_conn_attributes["password"])))
if test_config.test_conn_attributes.get("extra_dejson"):
expected_calls.append(mock.call(test_config.test_conn_attributes["extra_dejson"]))
self.mask_secret.assert_has_calls(expected_calls)
@pytest.mark.parametrize("test_config", test_from_uri_params)
def test_connection_get_uri_from_uri(self, test_config: UriTestCaseConfig):
"""
This test verifies that when we create a conn_1 from URI, and we generate a URI from that conn, that
when we create a conn_2 from the generated URI, we get an equivalent conn.
1. Parse URI to create `Connection` object, `connection`.
2. Using this connection, generate URI `generated_uri`..
3. Using this`generated_uri`, parse and create new Connection `new_conn`.
4. Verify that `new_conn` has same attributes as `connection`.
"""
connection = Connection(uri=test_config.test_uri)
generated_uri = connection.get_uri()
new_conn = Connection(uri=generated_uri)
assert connection.conn_type == new_conn.conn_type
assert connection.login == new_conn.login
assert connection.password == new_conn.password
assert connection.host == new_conn.host
assert connection.port == new_conn.port
assert connection.schema == new_conn.schema
assert connection.extra_dejson == new_conn.extra_dejson
@pytest.mark.parametrize("test_config", test_from_uri_params)
def test_connection_get_uri_from_conn(self, test_config: UriTestCaseConfig):
"""
This test verifies that if we create conn_1 from attributes (rather than from URI), and we generate a
URI, that when we create conn_2 from this URI, we get an equivalent conn.
1. Build conn init params using `test_conn_attributes` and store in `conn_kwargs`
2. Instantiate conn `connection` from `conn_kwargs`.
3. Generate uri `get_uri` from this conn.
4. Create conn `new_conn` from this uri.
5. Verify `new_conn` has same attributes as `connection`.
"""
conn_kwargs = {}
for k, v in test_config.test_conn_attributes.items():
if k == "extra_dejson":
conn_kwargs.update({"extra": json.dumps(v)})
else:
conn_kwargs.update({k: v})
connection = Connection(conn_id="test_conn", **conn_kwargs) # type: ignore
gen_uri = connection.get_uri()
new_conn = Connection(conn_id="test_conn", uri=gen_uri)
for conn_attr, expected_val in test_config.test_conn_attributes.items():
actual_val = getattr(new_conn, conn_attr)
if expected_val is None:
assert actual_val is None
else:
assert actual_val == expected_val
@pytest.mark.parametrize(
("uri", "uri_parts"),
[
(
"http://:password@host:80/database",
ConnectionParts(
conn_type="http", login="", password="password", host="host", port=80, schema="database"
),
),
(
"http://user:@host:80/database",
ConnectionParts(
conn_type="http", login="user", password=None, host="host", port=80, schema="database"
),
),
(
"http://user:password@/database",
ConnectionParts(
conn_type="http", login="user", password="password", host="", port=None, schema="database"
),
),
(
"http://user:password@host:80/",
ConnectionParts(
conn_type="http", login="user", password="password", host="host", port=80, schema=""
),
),
(
"http://user:password@/",
ConnectionParts(
conn_type="http", login="user", password="password", host="", port=None, schema=""
),
),
(
"postgresql://user:password@%2Ftmp%2Fz6rqdzqh%2Fexample%3Awest1%3Atestdb/testdb",
ConnectionParts(
conn_type="postgres",
login="user",
password="password",
host="/tmp/z6rqdzqh/example:west1:testdb",
port=None,
schema="testdb",
),
),
(
"postgresql://user@%2Ftmp%2Fz6rqdzqh%2Fexample%3Aeurope-west1%3Atestdb/testdb",
ConnectionParts(
conn_type="postgres",
login="user",
password=None,
host="/tmp/z6rqdzqh/example:europe-west1:testdb",
port=None,
schema="testdb",
),
),
(
"postgresql://%2Ftmp%2Fz6rqdzqh%2Fexample%3Aeurope-west1%3Atestdb",
ConnectionParts(
conn_type="postgres",
login=None,
password=None,
host="/tmp/z6rqdzqh/example:europe-west1:testdb",
port=None,
schema="",
),
),
(
"spark://k8s%3a%2F%2F100.68.0.1:443?deploy-mode=cluster",
ConnectionParts(
conn_type="spark",
login=None,
password=None,
host="k8s://100.68.0.1",
port=443,
schema="",
),
),
(
"spark://user:password@k8s%3a%2F%2F100.68.0.1:443?deploy-mode=cluster",
ConnectionParts(
conn_type="spark",
login="user",
password="password",
host="k8s://100.68.0.1",
port=443,
schema="",
),
),
(
"spark://user@k8s%3a%2F%2F100.68.0.1:443?deploy-mode=cluster",
ConnectionParts(
conn_type="spark",
login="user",
password=None,
host="k8s://100.68.0.1",
port=443,
schema="",
),
),
(
"spark://k8s%3a%2F%2Fno.port.com?deploy-mode=cluster",
ConnectionParts(
conn_type="spark",
login=None,
password=None,
host="k8s://no.port.com",
port=None,
schema="",
),
),
],
)
def test_connection_from_with_auth_info(self, uri, uri_parts):
connection = Connection(uri=uri)
assert connection.conn_type == uri_parts.conn_type
assert connection.login == uri_parts.login
assert connection.password == uri_parts.password
assert connection.host == uri_parts.host
assert connection.port == uri_parts.port
assert connection.schema == uri_parts.schema
@pytest.mark.parametrize(
("extra", "expected"),
[
('{"extra": null}', None),
('{"extra": {"yo": "hi"}}', '{"yo": "hi"}'),
('{"extra": "{\\"yo\\": \\"hi\\"}"}', '{"yo": "hi"}'),
],
)
def test_from_json_extra(self, extra, expected):
"""Json serialization should support extra stored as object _or_ as object string representation"""
assert Connection.from_json(extra).extra == expected
@pytest.mark.parametrize(
("val", "expected"),
[
('{"conn_type": "abc-abc"}', "abc_abc"),
('{"conn_type": "abc_abc"}', "abc_abc"),
('{"conn_type": "postgresql"}', "postgres"),
],
)
def test_from_json_conn_type(self, val, expected):
"""Two conn_type normalizations are applied: replace - with _ and postgresql with postgres"""
assert Connection.from_json(val).conn_type == expected
@pytest.mark.parametrize(
("val", "expected"),
[
('{"port": 1}', 1),
('{"port": "1"}', 1),
('{"port": null}', None),
],
)
def test_from_json_port(self, val, expected):
"""Two conn_type normalizations are applied: replace - with _ and postgresql with postgres"""
assert Connection.from_json(val).port == expected
@pytest.mark.parametrize(
("val", "expected"),
[
('pass :/!@#$%^&*(){}"', 'pass :/!@#$%^&*(){}"'), # these are the same
(None, None),
("", None), # this is a consequence of the password getter
],
)
def test_from_json_special_characters(self, val, expected):
"""Two conn_type normalizations are applied: replace - with _ and postgresql with postgres"""
json_val = json.dumps(dict(password=val))
assert Connection.from_json(json_val).password == expected
@mock.patch.dict(
"os.environ",
{
"AIRFLOW_CONN_TEST_URI": "postgresql://username:password%21@ec2.compute.com:5432/the_database",
},
)
def test_using_env_var(self):
from airflow.providers.sqlite.hooks.sqlite import SqliteHook
conn = SqliteHook.get_connection(conn_id="test_uri")
assert conn.host == "ec2.compute.com"
assert conn.schema == "the_database"
assert conn.login == "username"
assert conn.password == "password!"
assert conn.port == 5432
self.mask_secret.assert_has_calls([mock.call("password!"), mock.call(quote("password!"))])
@mock.patch.dict(
"os.environ",
{
"AIRFLOW_CONN_TEST_URI_NO_CREDS": "postgresql://ec2.compute.com/the_database",
},
)
def test_using_unix_socket_env_var(self):
from airflow.providers.sqlite.hooks.sqlite import SqliteHook
conn = SqliteHook.get_connection(conn_id="test_uri_no_creds")
assert conn.host == "ec2.compute.com"
assert conn.schema == "the_database"
assert conn.login is None
assert conn.password is None
assert conn.port is None
def test_param_setup(self):
conn = Connection(
conn_id="local_mysql",
conn_type="mysql",
host="localhost",
login="airflow",
password="airflow",
schema="airflow",
)
assert conn.host == "localhost"
assert conn.schema == "airflow"
assert conn.login == "airflow"
assert conn.password == "airflow"
assert conn.port is None
@pytest.mark.db_test
def test_env_var_priority(self):
from airflow.providers.sqlite.hooks.sqlite import SqliteHook
conn = SqliteHook.get_connection(conn_id="airflow_db")
assert conn.host != "ec2.compute.com"
with mock.patch.dict(
"os.environ",
{
"AIRFLOW_CONN_AIRFLOW_DB": "postgresql://username:password@ec2.compute.com:5432/the_database",
},
):
conn = SqliteHook.get_connection(conn_id="airflow_db")
assert conn.host == "ec2.compute.com"
assert conn.schema == "the_database"
assert conn.login == "username"
assert conn.password == "password"
assert conn.port == 5432
@mock.patch.dict(
"os.environ",
{
"AIRFLOW_CONN_TEST_URI": "postgresql://username:password@ec2.compute.com:5432/the_database",
"AIRFLOW_CONN_TEST_URI_NO_CREDS": "postgresql://ec2.compute.com/the_database",
},
)
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id="test_uri")
hook = conn.get_hook()
ppg3_mode: bool = SQLALCHEMY_V_2_0 and "psycopg" in hook.get_uri()
if ppg3_mode:
assert (
hook.get_uri() == "postgresql+psycopg://username:password@ec2.compute.com:5432/the_database"
)
else:
assert hook.get_uri() == "postgresql://username:password@ec2.compute.com:5432/the_database"
conn2 = BaseHook.get_connection(conn_id="test_uri_no_creds")
hook2 = conn2.get_hook()
if ppg3_mode:
assert hook2.get_uri() == "postgresql+psycopg://ec2.compute.com/the_database"
else:
assert hook2.get_uri() == "postgresql://ec2.compute.com/the_database"
@mock.patch.dict(
"os.environ",
{
"AIRFLOW_CONN_TEST_URI": "postgresql://username:password@ec2.compute.com:5432/the_database",
"AIRFLOW_CONN_TEST_URI_NO_CREDS": "postgresql://ec2.compute.com/the_database",
},
)
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id="test_uri")
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
if SQLALCHEMY_V_2_0 and "psycopg" in hook.get_uri():
expected = "postgresql+psycopg://username:password@ec2.compute.com:5432/the_database"
else:
expected = "postgresql://username:password@ec2.compute.com:5432/the_database"
assert isinstance(engine, sqlalchemy.engine.Engine)
if SQLALCHEMY_V_1_4:
assert str(engine.url) == expected
else:
assert engine.url.render_as_string(hide_password=False) == expected
@mock.patch.dict(
"os.environ",
{
"AIRFLOW_CONN_TEST_URI": "postgresql://username:password@ec2.compute.com:5432/the_database",
"AIRFLOW_CONN_TEST_URI_NO_CREDS": "postgresql://ec2.compute.com/the_database",
},
)
def test_get_connections_env_var(self):
from airflow.providers.sqlite.hooks.sqlite import SqliteHook
conns = SqliteHook.get_connection(conn_id="test_uri")
assert conns.host == "ec2.compute.com"
assert conns.schema == "the_database"
assert conns.login == "username"
assert conns.password == "password"
assert conns.port == 5432
def test_connection_mixed(self):
with pytest.raises(
AirflowException,
match=re.escape(
"You must create an object using the URI or individual values (conn_type, host, login, "
"password, schema, port or extra).You can't mix these two ways to create this object."
),
):
Connection(conn_id="TEST_ID", uri="mysql://", schema="AAA")
@pytest.mark.db_test
def test_masking_from_db(self):
"""Test secrets are masked when loaded directly from the DB"""
from airflow.settings import Session
session = Session()
try:
conn = Connection(
conn_id=f"test-{os.getpid()}",
conn_type="http",
password="s3cr3t!",
extra='{"apikey":"masked too"}',
)
session.add(conn)
session.flush()
# Make sure we re-load it, not just get the cached object back
session.expunge(conn)
self.mask_secret.reset_mock()
from_db = session.get(Connection, conn.id)
from_db.extra_dejson
assert self.mask_secret.mock_calls == [
# We should have called it _again_ when loading from the DB
mock.call("s3cr3t!"),
mock.call(quote("s3cr3t!")),
mock.call({"apikey": "masked too"}),
]
finally:
session.rollback()
@mock.patch.dict(
"os.environ",
{
"AIRFLOW_CONN_TEST_URI": "sqlite://",
},
)
def test_connection_test_success(self):
conn = Connection(conn_id="test_uri", conn_type="sqlite")
res = conn.test_connection()
assert res[0] is True
assert res[1] == "Connection successfully tested"
@mock.patch.dict(
"os.environ",
{
"AIRFLOW_CONN_TEST_URI_NO_HOOK": "unknown://",
},
)
def test_connection_test_no_hook(self):
conn = Connection(conn_id="test_uri_no_hook", conn_type="unknown")
res = conn.test_connection()
assert res[0] is False
assert res[1] == 'Unknown hook type "unknown"'
@mock.patch.dict(
"os.environ",
{
"AIRFLOW_CONN_TEST_URI_HOOK_METHOD_MISSING": "grpc://",
},
)
def test_connection_test_hook_method_missing(self):
conn = Connection(conn_id="test_uri_hook_method_missing", conn_type="grpc")
res = conn.test_connection()
assert res[0] is False
assert res[1] == "Hook GrpcHook doesn't implement or inherit test_connection method"
def test_extra_warnings_non_json(self):
with pytest.raises(ValueError, match="non-JSON"):
Connection(conn_id="test_extra", conn_type="none", extra="hi")
def test_extra_warnings_non_dict_json(self):
with pytest.raises(ValueError, match="not parse as a dictionary"):
Connection(conn_id="test_extra", conn_type="none", extra='"hi"')
def test_get_uri_no_conn_type(self):
# no conn type --> scheme-relative URI
assert Connection().get_uri() == "//"
# with host, still works
assert Connection(host="abc").get_uri() == "//abc"
# parsing back as conn still works
assert Connection(uri="//abc").host == "abc"
@pytest.mark.parametrize(
("conn", "expected_json"),
[
pytest.param("get_connection1", "{}", id="empty"),
pytest.param("get_connection2", '{"host": "apache.org"}', id="empty-extra"),
pytest.param(
"get_connection3",
'{"conn_type": "foo", "login": "", "password": "p@$$"}',
id="some-fields",
),
pytest.param(
"get_connection4",
json.dumps(
{
"conn_type": "bar",
"description": "Sample Description",
"host": "example.org",
"login": "user",
"password": "p@$$",
"schema": "schema",
"port": 777,
"extra": {"foo": "bar", "answer": 42},
}
),
id="all-fields",
),
pytest.param(
"get_connection5",
# During parsing URI some of the fields evaluated as an empty strings
'{"conn_type": "aws", "host": "", "schema": ""}',
id="uri",
),
],
)
def test_as_json_from_connection(self, conn: Connection, expected_json, request):
conn = request.getfixturevalue(conn)
result = conn.as_json()
assert result == expected_json
restored_conn = Connection.from_json(result)
assert restored_conn.conn_type == conn.conn_type
assert restored_conn.description == conn.description
assert restored_conn.host == conn.host
assert restored_conn.password == conn.password
assert restored_conn.schema == conn.schema
assert restored_conn.port == conn.port
assert restored_conn.extra_dejson == conn.extra_dejson
|
TestConnection
|
python
|
getsentry__sentry
|
src/sentry/flags/endpoints/secrets.py
|
{
"start": 741,
"end": 919
}
|
class ____(TypedDict):
createdAt: str
createdBy: int
id: int
provider: str
secret: str
@register(FlagWebHookSigningSecretModel)
|
FlagWebhookSigningSecretResponse
|
python
|
uqfoundation__dill
|
dill/_shims.py
|
{
"start": 3549,
"end": 6635
}
|
class ____(Reduce):
# A version of Reduce for functions. Used to trick pickler.save_reduce into
# thinking that Reduce objects of functions are themselves meaningful functions.
def __call__(self, *args, **kwargs):
reduction = self.__reduce__()
func = reduction[0]
f_args = reduction[1]
obj = func(*f_args)
return obj(*args, **kwargs)
__NO_DEFAULT = _dill.Sentinel('Getattr.NO_DEFAULT')
def Getattr(object, name, default=__NO_DEFAULT):
"""
A Reduce object that represents the getattr operation. When unpickled, the
Getattr will access an attribute 'name' of 'object' and return the value
stored there. If the attribute doesn't exist, the default value will be
returned if present.
The following statements are equivalent:
Getattr(collections, 'OrderedDict')
Getattr(collections, 'spam', None)
Getattr(*args)
Reduce(getattr, (collections, 'OrderedDict'))
Reduce(getattr, (collections, 'spam', None))
Reduce(getattr, args)
During unpickling, the first two will result in collections.OrderedDict and
None respectively because the first attribute exists and the second one does
not, forcing it to use the default value given in the third argument.
"""
if default is Getattr.NO_DEFAULT:
reduction = (getattr, (object, name))
else:
reduction = (getattr, (object, name, default))
return Reduce(*reduction, is_callable=callable(default))
Getattr.NO_DEFAULT = __NO_DEFAULT
del __NO_DEFAULT
def move_to(module, name=None):
def decorator(func):
if name is None:
fname = func.__name__
else:
fname = name
module.__dict__[fname] = func
func.__module__ = module.__name__
return func
return decorator
def register_shim(name, default):
"""
A easier to understand and more compact way of "softly" defining a function.
These two pieces of code are equivalent:
if _dill.OLD3X:
def _create_class():
...
_create_class = register_shim('_create_class', types.new_class)
if _dill.OLD3X:
@move_to(_dill)
def _create_class():
...
_create_class = Getattr(_dill, '_create_class', types.new_class)
Intuitively, it creates a function or object in the versions of dill/python
that require special reimplementations, and use a core library or default
implementation if that function or object does not exist.
"""
func = globals().get(name)
if func is not None:
_dill.__dict__[name] = func
func.__module__ = _dill.__name__
if default is Getattr.NO_DEFAULT:
reduction = (getattr, (_dill, name))
else:
reduction = (getattr, (_dill, name, default))
return Reduce(*reduction, is_callable=callable(default))
######################
## Compatibility Shims are defined below
######################
_CELL_EMPTY = register_shim('_CELL_EMPTY', None)
_setattr = register_shim('_setattr', setattr)
_delattr = register_shim('_delattr', delattr)
|
_CallableReduce
|
python
|
getsentry__sentry
|
src/sentry/snuba/metrics/query.py
|
{
"start": 1267,
"end": 3158
}
|
class ____:
op: MetricOperationType | None
metric_mri: str
params: dict[str, None | str | int | float | Sequence[tuple[str | int, ...]]] | None = None
alias: str = ""
def __post_init__(self) -> None:
# Validate that it is a valid MRI format
parsed_mri = parse_mri(self.metric_mri)
if parsed_mri is None:
raise InvalidParams(f"Invalid Metric MRI: {self.metric_mri}")
# We compute the metric name before the alias, since we want to make sure it's a public facing metric.
metric_name = self._metric_name
if not self.alias:
key = f"{self.op}({metric_name})" if self.op is not None else metric_name
object.__setattr__(self, "alias", key)
@property
def _metric_name(self) -> str:
return get_public_name_from_mri(self.metric_mri)
def __str__(self) -> str:
return f"{self.op}({self._metric_name})" if self.op else self._metric_name
def __eq__(self, other: object) -> bool:
# The equal method is called after the hash method to verify for equality of objects to insert
# into the set. Because by default "__eq__()" does use the "is" operator we want to override it and
# model MetricField's equivalence as having the same hash value, in order to reuse the comparison logic defined
# in the "__hash__()" method.
return bool(self.__hash__() == other.__hash__())
def __hash__(self) -> int:
hashable_list: list[MetricOperationType | str] = []
if self.op is not None:
hashable_list.append(self.op)
hashable_list.append(self.metric_mri)
if self.params is not None:
hashable_list.append(
",".join(sorted(":".join((x, str(y))) for x, y in self.params.items()))
)
return hash(tuple(hashable_list))
@dataclass(frozen=True)
|
MetricField
|
python
|
numpy__numpy
|
tools/swig/test/testFortran.py
|
{
"start": 1674,
"end": 1941
}
|
class ____(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "uchar"
self.typeCode = "B"
######################################################################
|
ucharTestCase
|
python
|
networkx__networkx
|
networkx/algorithms/tests/test_euler.py
|
{
"start": 1334,
"end": 3712
}
|
class ____:
def test_eulerian_circuit_cycle(self):
G = nx.cycle_graph(4)
edges = list(nx.eulerian_circuit(G, source=0))
nodes = [u for u, v in edges]
assert nodes == [0, 3, 2, 1]
assert edges == [(0, 3), (3, 2), (2, 1), (1, 0)]
edges = list(nx.eulerian_circuit(G, source=1))
nodes = [u for u, v in edges]
assert nodes == [1, 2, 3, 0]
assert edges == [(1, 2), (2, 3), (3, 0), (0, 1)]
G = nx.complete_graph(3)
edges = list(nx.eulerian_circuit(G, source=0))
nodes = [u for u, v in edges]
assert nodes == [0, 2, 1]
assert edges == [(0, 2), (2, 1), (1, 0)]
edges = list(nx.eulerian_circuit(G, source=1))
nodes = [u for u, v in edges]
assert nodes == [1, 2, 0]
assert edges == [(1, 2), (2, 0), (0, 1)]
def test_eulerian_circuit_digraph(self):
G = nx.DiGraph()
nx.add_cycle(G, [0, 1, 2, 3])
edges = list(nx.eulerian_circuit(G, source=0))
nodes = [u for u, v in edges]
assert nodes == [0, 1, 2, 3]
assert edges == [(0, 1), (1, 2), (2, 3), (3, 0)]
edges = list(nx.eulerian_circuit(G, source=1))
nodes = [u for u, v in edges]
assert nodes == [1, 2, 3, 0]
assert edges == [(1, 2), (2, 3), (3, 0), (0, 1)]
def test_multigraph(self):
G = nx.MultiGraph()
nx.add_cycle(G, [0, 1, 2, 3])
G.add_edge(1, 2)
G.add_edge(1, 2)
edges = list(nx.eulerian_circuit(G, source=0))
nodes = [u for u, v in edges]
assert nodes == [0, 3, 2, 1, 2, 1]
assert edges == [(0, 3), (3, 2), (2, 1), (1, 2), (2, 1), (1, 0)]
def test_multigraph_with_keys(self):
G = nx.MultiGraph()
nx.add_cycle(G, [0, 1, 2, 3])
G.add_edge(1, 2)
G.add_edge(1, 2)
edges = list(nx.eulerian_circuit(G, source=0, keys=True))
nodes = [u for u, v, k in edges]
assert nodes == [0, 3, 2, 1, 2, 1]
assert edges[:2] == [(0, 3, 0), (3, 2, 0)]
assert collections.Counter(edges[2:5]) == collections.Counter(
[(2, 1, 0), (1, 2, 1), (2, 1, 2)]
)
assert edges[5:] == [(1, 0, 0)]
def test_not_eulerian(self):
with pytest.raises(nx.NetworkXError):
f = list(nx.eulerian_circuit(nx.complete_graph(4)))
|
TestEulerianCircuit
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/both_link_and_build_dep_c/package.py
|
{
"start": 217,
"end": 689
}
|
class ____(Package):
"""
Structure where c occurs as a build dep down the line and as a direct
link dep. Useful for testing situations where you copy the parent spec
just with link deps, and you want to make sure b is not part of that.
a <--build-- b <-link-- c
a <--link--- c
"""
homepage = "http://www.example.com"
url = "http://www.example.com/1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
|
BothLinkAndBuildDepC
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-shared/dagster_shared_tests/test_check.py
|
{
"start": 33930,
"end": 52286
}
|
class ____: ...
def test_sequence_param():
assert check.sequence_param([], "sequence_param") == []
assert check.sequence_param(tuple(), "sequence_param") == tuple()
assert check.sequence_param(["foo"], "sequence_param", of_type=str) == ["foo"]
with pytest.raises(ParameterCheckError):
check.sequence_param(None, "sequence_param") # pyright: ignore[reportArgumentType]
with pytest.raises(CheckError):
check.sequence_param(1, "sequence_param", of_type=int) # pyright: ignore[reportArgumentType]
with pytest.raises(CheckError):
check.sequence_param(["foo"], "sequence_param", of_type=int)
with pytest.raises(CheckError):
check.sequence_param("foo", "sequence_param")
with pytest.raises(CheckError, match="str is a disallowed Sequence type"):
check.sequence_param("foo", "sequence_param", of_type=str)
with pytest.raises(CheckError):
check.sequence_param(SomeRecord(), "sequence_param") # pyright: ignore[reportArgumentType]
def test_opt_sequence_param():
assert check.opt_sequence_param([], "sequence_param") == []
assert check.opt_sequence_param(tuple(), "sequence_param") == tuple()
assert check.opt_sequence_param(["foo"], "sequence_param", of_type=str) == ["foo"]
assert check.opt_sequence_param(None, "sequence_param") == []
with pytest.raises(CheckError):
check.opt_sequence_param(1, "sequence_param", of_type=int) # pyright: ignore[reportArgumentType]
with pytest.raises(CheckError):
check.opt_sequence_param(["foo"], "sequence_param", of_type=int)
with pytest.raises(CheckError):
check.opt_sequence_param("foo", "sequence_param")
with pytest.raises(CheckError, match="str is a disallowed Sequence type"):
check.opt_sequence_param("foo", "sequence_param", of_type=str)
with pytest.raises(CheckError):
check.opt_sequence_param(SomeRecord(), "sequence_param") # pyright: ignore[reportArgumentType]
def test_opt_nullable_sequence_param():
assert check.opt_nullable_sequence_param([], "sequence_param") == []
assert check.opt_nullable_sequence_param(tuple(), "sequence_param") == tuple()
assert check.opt_nullable_sequence_param(["foo"], "sequence_param", of_type=str) == ["foo"]
assert check.opt_nullable_sequence_param(None, "sequence_param") is None
with pytest.raises(CheckError):
check.opt_nullable_sequence_param(1, "sequence_param", of_type=int) # pyright: ignore[reportCallIssue,reportArgumentType]
with pytest.raises(CheckError):
check.opt_nullable_sequence_param(["foo"], "sequence_param", of_type=int)
with pytest.raises(CheckError, match="str is a disallowed Sequence type"):
assert check.opt_nullable_sequence_param("foo", "sequence_param", of_type=str)
with pytest.raises(CheckError):
check.opt_nullable_sequence_param(SomeRecord(), "sequence_param") # pyright: ignore[reportCallIssue,reportArgumentType]
# ########################
# ##### STR
# ########################
def test_str_param():
assert check.str_param("a", "str_param") == "a"
assert check.str_param("", "str_param") == ""
assert check.str_param("a", "unicode_param") == "a"
with pytest.raises(ParameterCheckError):
check.str_param(None, "str_param")
with pytest.raises(ParameterCheckError):
check.str_param(0, "str_param")
with pytest.raises(ParameterCheckError):
check.str_param(1, "str_param")
def test_opt_str_param():
assert check.opt_str_param("a", "str_param") == "a"
assert check.opt_str_param("", "str_param") == ""
assert check.opt_str_param("a", "unicode_param") == "a"
assert check.opt_str_param(None, "str_param") is None
assert check.opt_str_param(None, "str_param", "foo") == "foo"
with pytest.raises(ParameterCheckError):
check.opt_str_param(0, "str_param")
with pytest.raises(ParameterCheckError):
check.opt_str_param(1, "str_param")
def test_opt_nonempty_str_param():
assert check.opt_nonempty_str_param("a", "str_param") == "a"
assert check.opt_nonempty_str_param("", "str_param") is None
assert check.opt_nonempty_str_param("", "str_param", "foo") == "foo"
assert check.opt_nonempty_str_param("a", "unicode_param") == "a"
assert check.opt_nonempty_str_param(None, "str_param") is None
assert check.opt_nonempty_str_param(None, "str_param", "foo") == "foo"
with pytest.raises(ParameterCheckError):
check.opt_nonempty_str_param(0, "str_param")
with pytest.raises(ParameterCheckError):
check.opt_nonempty_str_param(1, "str_param")
def test_string_elem():
ddict = {"a_str": "a", "a_num": 1, "a_none": None}
assert check.str_elem(ddict, "a_str") == "a"
with pytest.raises(ElementCheckError):
assert check.str_elem(ddict, "a_none")
with pytest.raises(ElementCheckError):
check.str_elem(ddict, "a_num")
def test_opt_string_elem():
ddict = {"a_str": "a", "a_num": 1, "a_none": None}
assert check.opt_str_elem(ddict, "a_str") == "a"
assert check.opt_str_elem(ddict, "a_none") is None
assert check.opt_str_elem(ddict, "nonexistentkey") is None
with pytest.raises(ElementCheckError):
check.opt_str_elem(ddict, "a_num")
# ########################
# ##### TUPLE
# ########################
def test_tuple_param():
assert check.tuple_param((1, 2), "something")
with pytest.raises(CheckError):
assert check.tuple_param(None, "something") # pyright: ignore[reportArgumentType]
with pytest.raises(CheckError):
assert check.tuple_param(1, "something") # pyright: ignore[reportArgumentType]
with pytest.raises(CheckError):
assert check.tuple_param([1], "something") # pyright: ignore[reportArgumentType]
with pytest.raises(CheckError):
assert check.tuple_param({1: 2}, "something") # pyright: ignore[reportArgumentType]
with pytest.raises(CheckError):
assert check.tuple_param("kdjfkd", "something") # pyright: ignore[reportArgumentType]
assert check.tuple_param((3, 4), "something", of_type=int)
assert check.tuple_param(("foo", "bar"), "something", of_type=str)
assert check.tuple_param((3, 4), "something", of_type=(int,))
assert check.tuple_param((3, 4), "something", of_type=(int, str))
assert check.tuple_param((3, "bar"), "something", of_type=(int, str))
with pytest.raises(CheckError):
check.tuple_param((3, 4, 5), "something", of_type=str)
with pytest.raises(CheckError):
check.tuple_param((3, 4), "something", of_type=(str,))
assert check.tuple_param((3, "a"), "something", of_shape=(int, str))
with pytest.raises(CheckError):
check.tuple_param((3, "a"), "something", of_shape=(int, str, int))
with pytest.raises(CheckError):
check.tuple_param((3, "a"), "something", of_shape=(str, int))
with pytest.raises(CheckError):
check.is_tuple((3, 4), of_shape=(int, int), of_type=int)
def test_opt_tuple_param():
assert check.opt_tuple_param((1, 2), "something")
assert check.opt_tuple_param(None, "something") == tuple()
with pytest.raises(CheckError):
check.opt_tuple_param(1, "something")
with pytest.raises(CheckError):
check.opt_tuple_param([1], "something")
with pytest.raises(CheckError):
check.opt_tuple_param({1: 2}, "something")
with pytest.raises(CheckError):
check.opt_tuple_param("kdjfkd", "something")
assert check.opt_tuple_param((3, 4), "something", of_type=int)
assert check.opt_tuple_param(("foo", "bar"), "something", of_type=str)
assert check.opt_tuple_param((3, 4), "something", of_type=(int,))
assert check.opt_tuple_param((3, 4), "something", of_type=(int, str))
assert check.opt_tuple_param((3, "bar"), "something", of_type=(int, str))
with pytest.raises(CheckError):
check.opt_tuple_param((3, 4, 5), "something", of_type=str)
with pytest.raises(CheckError):
check.opt_tuple_param((3, 4), "something", of_type=(str,))
assert check.opt_tuple_param((3, "a"), "something", of_shape=(int, str))
with pytest.raises(CheckError):
check.opt_tuple_param((3, "a"), "something", of_shape=(int, str, int))
with pytest.raises(CheckError):
check.opt_tuple_param((3, "a"), "something", of_shape=(str, int))
with pytest.raises(CheckError):
check.opt_tuple_param((3, 4), "something", of_shape=(int, int), of_type=int)
def test_opt_nullable_tuple_param():
assert check.opt_nullable_tuple_param((1, 2), "something")
assert check.opt_nullable_tuple_param(None, "something") is None
with pytest.raises(CheckError):
check.opt_nullable_tuple_param([3, 4], "something", of_shape=(int, int), of_type=int) # pyright: ignore[reportCallIssue,reportArgumentType]
def test_is_tuple():
assert check.is_tuple(()) == ()
with pytest.raises(CheckError):
check.is_tuple(None)
with pytest.raises(CheckError):
check.is_tuple("3u4")
with pytest.raises(CheckError, match="Did you pass a class"):
check.is_tuple((str,), of_type=int)
with pytest.raises(CheckError):
check.is_tuple(SomeRecord())
def test_tuple_elem():
tuple_value = ("blah", "blahblah")
ddict = {"tuplekey": tuple_value, "stringkey": "A", "nonekey": None, "reckey": SomeRecord()}
assert check.tuple_elem(ddict, "tuplekey") == tuple_value
assert check.tuple_elem(ddict, "tuplekey", of_type=str) == tuple_value
with pytest.raises(CheckError):
check.tuple_elem(ddict, "nonekey")
with pytest.raises(CheckError):
check.tuple_elem(ddict, "nonexistantkey")
with pytest.raises(CheckError):
check.tuple_elem(ddict, "stringkey")
with pytest.raises(CheckError):
check.tuple_elem(ddict, "tuplekey", of_type=int)
with pytest.raises(CheckError):
check.tuple_elem(ddict, "reckey")
def test_opt_tuple_elem():
tuple_value = ("blah", "blahblah")
ddict = {"tuplekey": tuple_value, "stringkey": "A", "nonekey": None, "reckey": SomeRecord()}
assert check.opt_tuple_elem(ddict, "tuplekey") == tuple_value
assert check.opt_tuple_elem(ddict, "tuplekey", of_type=str) == tuple_value
assert check.opt_tuple_elem(ddict, "nonekey") == tuple()
assert check.opt_tuple_elem(ddict, "nonexistantkey") == tuple()
with pytest.raises(CheckError):
check.opt_tuple_elem(ddict, "stringkey")
with pytest.raises(CheckError):
check.opt_tuple_elem(ddict, "tuplekey", of_type=int)
with pytest.raises(CheckError):
check.opt_tuple_elem(ddict, "reckey")
def test_typed_is_tuple():
class Foo:
pass
class Bar:
pass
assert check.is_tuple((), Foo) == ()
foo_tuple = (Foo(),)
assert check.is_tuple(foo_tuple, Foo) == foo_tuple
assert check.is_tuple(foo_tuple, (Foo, Bar))
with pytest.raises(CheckError):
check.is_tuple((Bar(),), Foo)
with pytest.raises(CheckError):
check.is_tuple((None,), Foo)
assert check.is_tuple((Foo(), Bar()), of_shape=(Foo, Bar))
with pytest.raises(CheckError):
check.is_tuple((Foo(),), of_shape=(Foo, Bar))
with pytest.raises(CheckError):
check.is_tuple((Foo(), Foo()), of_shape=(Foo, Bar))
with pytest.raises(CheckError):
check.is_tuple((Foo(), Foo()), of_shape=(Foo, Foo), of_type=Foo)
# ###################################################################################################
# ##### OTHER CHECKS
# ###################################################################################################
def test_param_invariant():
check.param_invariant(True, "some_param")
num_to_check = 1
check.param_invariant(num_to_check == 1, "some_param")
with pytest.raises(ParameterCheckError):
check.param_invariant(num_to_check == 2, "some_param")
with pytest.raises(ParameterCheckError):
check.param_invariant(False, "some_param")
with pytest.raises(ParameterCheckError):
check.param_invariant(0, "some_param")
check.param_invariant(1, "some_param")
with pytest.raises(ParameterCheckError):
check.param_invariant("", "some_param")
check.param_invariant("1kjkjsf", "some_param")
with pytest.raises(ParameterCheckError):
check.param_invariant({}, "some_param")
check.param_invariant({234: "1kjkjsf"}, "some_param")
with pytest.raises(ParameterCheckError):
check.param_invariant([], "some_param")
check.param_invariant([234], "some_param")
def test_invariant():
assert check.invariant(True)
with pytest.raises(CheckError):
check.invariant(False)
with pytest.raises(CheckError, match="Some Unique String"):
check.invariant(False, "Some Unique String")
empty_list = []
with pytest.raises(CheckError, match="Invariant failed"):
check.invariant(empty_list)
def test_failed():
with pytest.raises(CheckError, match="some desc"):
check.failed("some desc")
with pytest.raises(CheckError, match="must be a string"):
check.failed(0) # pyright: ignore[reportArgumentType]
def test_not_implemented():
with pytest.raises(NotImplementedCheckError, match="some string"):
check.not_implemented("some string")
with pytest.raises(CheckError, match="desc argument must be a string"):
check.not_implemented(None) # pyright: ignore[reportArgumentType]
def test_iterable():
assert check.iterable_param([], "thisisfine") == []
assert check.iterable_param([1], "thisisfine") == [1]
assert check.iterable_param([1], "thisisfine", of_type=int) == [1]
assert check.iterable_param((i for i in [1, 2]), "thisisfine")
# assert that it does not coerce generator to list
assert check.iterable_param((i for i in [1, 2]), "thisisfine") != [1, 2]
assert list(check.iterable_param((i for i in [1, 2]), "thisisfine")) == [1, 2]
with pytest.raises(CheckError, match="Iterable.*str"):
check.iterable_param("lkjsdkf", "stringisiterable")
with pytest.raises(CheckError, match="Iterable.*None"):
check.iterable_param(None, "nonenotallowed") # pyright: ignore[reportArgumentType]
with pytest.raises(CheckError, match="Iterable.*int"):
check.iterable_param(1, "intnotallowed") # pyright: ignore[reportArgumentType]
with pytest.raises(CheckError, match="Member of iterable mismatches type"):
check.iterable_param([1], "typemismatch", of_type=str)
with pytest.raises(CheckError, match="Member of iterable mismatches type"):
check.iterable_param(["atr", 2], "typemismatchmixed", of_type=str)
with pytest.raises(CheckError, match="Member of iterable mismatches type"):
check.iterable_param(["atr", None], "nonedoesntcount", of_type=str)
with pytest.raises(CheckError):
check.iterable_param(SomeRecord(), "nonenotallowed") # pyright: ignore[reportArgumentType]
def test_opt_iterable():
assert check.opt_iterable_param(None, "thisisfine") == []
assert check.opt_iterable_param([], "thisisfine") == []
assert check.opt_iterable_param([1], "thisisfine") == [1]
assert check.opt_iterable_param((i for i in [1, 2]), "thisisfine")
# assert that it does not coerce generator to list
assert check.opt_iterable_param((i for i in [1, 2]), "thisisfine") != [1, 2]
# not_none coerces to Iterable[T] so
assert list(check.not_none(check.opt_iterable_param((i for i in [1, 2]), "thisisfine"))) == [
1,
2,
]
check.opt_iterable_param(None, "noneisallowed")
with pytest.raises(CheckError, match="Iterable.*str"):
check.opt_iterable_param("lkjsdkf", "stringisiterable")
with pytest.raises(CheckError, match="Iterable.*int"):
check.opt_iterable_param(1, "intnotallowed") # pyright: ignore[reportArgumentType]
with pytest.raises(CheckError, match="Member of iterable mismatches type"):
check.opt_iterable_param([1], "typemismatch", of_type=str)
with pytest.raises(CheckError, match="Member of iterable mismatches type"):
check.opt_iterable_param(["atr", 2], "typemismatchmixed", of_type=str)
with pytest.raises(CheckError, match="Member of iterable mismatches type"):
check.opt_iterable_param(["atr", None], "nonedoesntcount", of_type=str)
with pytest.raises(CheckError):
check.opt_iterable_param(SomeRecord(), "nonenotallowed") # pyright: ignore[reportArgumentType]
def test_is_iterable() -> None:
assert check.is_iterable([]) == []
assert check.is_iterable((1, 2)) == tuple([1, 2])
assert check.is_iterable("foo") == "foo" # str is iterable
assert check.is_iterable({"a": 1}) == {"a": 1} # dict is iterable
assert check.is_iterable([1, "str"]) == [1, "str"]
with pytest.raises(CheckError):
check.is_iterable([1, "str"], of_type=int)
with pytest.raises(CheckError):
check.is_iterable([1, "str"], of_type=str)
with pytest.raises(CheckError):
check.is_iterable(None)
with pytest.raises(CheckError):
check.is_iterable(1)
def test_is_iterable_typing() -> None:
def returns_iterable_of_int_but_typed_any() -> Any:
return [1, 2]
def returns_iterable_of_t() -> Iterable[int]:
any_typed = returns_iterable_of_int_but_typed_any()
retval = check.is_iterable(any_typed, of_type=str)
# That the type: ignore is necessary is proof that
# is_iterable flows type information correctly
return retval # type: ignore
# meaningless assert. The test is show the typechecker working
assert returns_iterable_of_t
# ###################################################################################################
# ##### CHECK BUILDER
# ###################################################################################################
def build_check_call(ttype, name, eval_ctx: EvalContext):
body = build_check_call_str(ttype, name, eval_ctx)
lazy_import_str = "\n ".join(
f"from {module} import {t}" for t, module in eval_ctx.lazy_imports.items()
)
eval_ctx.local_ns[INJECTED_CHECK_VAR] = check
fn = f"""
def _check({name}):
{lazy_import_str}
return {body}
"""
return eval_ctx.compile_fn(fn, "_check")
|
SomeRecord
|
python
|
getsentry__sentry
|
src/sentry/tagstore/snuba/backend.py
|
{
"start": 4345,
"end": 5167
}
|
class ____[U](Protocol):
def __call__(
self, *, key: str, value: object, times_seen: int, first_seen: datetime, last_seen: datetime
) -> U: ...
def _make_result[T, U](
key: str,
totals: dict[str, int],
result: dict[str, dict[str, Any]],
key_ctor: _KeyCallable[T, U],
value_ctor: _ValueCallable[U],
) -> T:
top_values = tuple(
value_ctor(
key=key,
value=value,
times_seen=data["count"],
first_seen=parse_datetime(data["first_seen"]),
last_seen=parse_datetime(data["last_seen"]),
)
for value, data in result.items()
)
return key_ctor(
key=key,
values_seen=totals.get("values_seen", 0),
count=totals.get("count", 0),
top_values=top_values,
)
|
_ValueCallable
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/antlr_asset_selection/generated/AssetSelectionLexer.py
|
{
"start": 24983,
"end": 27734
}
|
class ____(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [DFA(ds, i) for i, ds in enumerate(atn.decisionToState)]
EQUAL = 1
AND = 2
OR = 3
NOT = 4
STAR = 5
PLUS = 6
DIGITS = 7
COLON = 8
LPAREN = 9
RPAREN = 10
COMMA = 11
KEY = 12
OWNER = 13
GROUP = 14
TAG = 15
KIND = 16
CODE_LOCATION = 17
STATUS = 18
COLUMN = 19
TABLE_NAME = 20
COLUMN_TAG = 21
CHANGED_IN_BRANCH = 22
SINKS = 23
ROOTS = 24
QUOTED_STRING = 25
UNQUOTED_STRING = 26
UNQUOTED_WILDCARD_STRING = 27
NULL_STRING = 28
WS = 29
channelNames = ["DEFAULT_TOKEN_CHANNEL", "HIDDEN"]
modeNames = ["DEFAULT_MODE"]
literalNames = [
"<INVALID>",
"'='",
"'*'",
"'+'",
"':'",
"'('",
"')'",
"','",
"'key'",
"'owner'",
"'group'",
"'tag'",
"'kind'",
"'code_location'",
"'status'",
"'column'",
"'table_name'",
"'column_tag'",
"'changed_in_branch'",
"'sinks'",
"'roots'",
"'<null>'",
]
symbolicNames = [
"<INVALID>",
"EQUAL",
"AND",
"OR",
"NOT",
"STAR",
"PLUS",
"DIGITS",
"COLON",
"LPAREN",
"RPAREN",
"COMMA",
"KEY",
"OWNER",
"GROUP",
"TAG",
"KIND",
"CODE_LOCATION",
"STATUS",
"COLUMN",
"TABLE_NAME",
"COLUMN_TAG",
"CHANGED_IN_BRANCH",
"SINKS",
"ROOTS",
"QUOTED_STRING",
"UNQUOTED_STRING",
"UNQUOTED_WILDCARD_STRING",
"NULL_STRING",
"WS",
]
ruleNames = [
"EQUAL",
"AND",
"OR",
"NOT",
"STAR",
"PLUS",
"DIGITS",
"COLON",
"LPAREN",
"RPAREN",
"COMMA",
"KEY",
"OWNER",
"GROUP",
"TAG",
"KIND",
"CODE_LOCATION",
"STATUS",
"COLUMN",
"TABLE_NAME",
"COLUMN_TAG",
"CHANGED_IN_BRANCH",
"SINKS",
"ROOTS",
"QUOTED_STRING",
"UNQUOTED_STRING",
"UNQUOTED_WILDCARD_STRING",
"NULL_STRING",
"WS",
]
grammarFileName = "AssetSelection.g4"
def __init__(self, input=None, output: TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.13.2")
self._interp = LexerATNSimulator(
self, self.atn, self.decisionsToDFA, PredictionContextCache()
)
self._actions = None
self._predicates = None
|
AssetSelectionLexer
|
python
|
kamyu104__LeetCode-Solutions
|
Python/ways-to-express-an-integer-as-sum-of-powers.py
|
{
"start": 47,
"end": 490
}
|
class ____(object):
def numberOfWays(self, n, x):
"""
:type n: int
:type x: int
:rtype: int
"""
MOD = 10**9+7
dp = [0]*(n+1)
dp[0] = 1
for i in xrange(1, n+1):
i_pow_x = i**x
if i_pow_x > n:
break
for j in reversed(xrange(i_pow_x, n+1)):
dp[j] = (dp[j]+dp[j-i_pow_x])%MOD
return dp[-1]
|
Solution
|
python
|
PyCQA__pyflakes
|
pyflakes/checker.py
|
{
"start": 11336,
"end": 12020
}
|
class ____(Importation):
"""A binding created by a 'from x import *' statement."""
def __init__(self, name, source):
super().__init__('*', source)
# Each star importation needs a unique name, and
# may not be the module name otherwise it will be deemed imported
self.name = name + '.*'
self.fullName = name
@property
def source_statement(self):
return 'from ' + self.fullName + ' import *'
def __str__(self):
# When the module ends with a ., avoid the ambiguous '..*'
if self.fullName.endswith('.'):
return self.source_statement
else:
return self.name
|
StarImportation
|
python
|
falconry__falcon
|
falcon/routing/compiled.py
|
{
"start": 39488,
"end": 39629
}
|
class ____:
# This a base element only to aid pep484
def src(self, indentation: int) -> str:
raise NotImplementedError
|
_CxChild
|
python
|
jazzband__django-formtools
|
formtools/preview.py
|
{
"start": 273,
"end": 6251
}
|
class ____:
preview_template = 'formtools/preview.html'
form_template = 'formtools/form.html'
# METHODS SUBCLASSES SHOULDN'T OVERRIDE ###################################
def __init__(self, form):
# form should be a Form class, not an instance.
self.form, self.state = form, {}
def __call__(self, request, *args, **kwargs):
stage = {
'1': 'preview',
'2': 'post',
}.get(request.POST.get(self.unused_name('stage')), 'preview')
self.parse_params(request, *args, **kwargs)
try:
method = getattr(self, stage + '_' + request.method.lower())
except AttributeError:
raise Http404
return method(request)
def unused_name(self, name):
"""
Given a first-choice name, adds an underscore to the name until it
reaches a name that isn't claimed by any field in the form.
This is calculated rather than being hard-coded so that no field names
are off-limits for use in the form.
"""
while 1:
try:
self.form.base_fields[name]
except KeyError:
break # This field name isn't being used by the form.
name += '_'
return name
def preview_get(self, request):
"Displays the form"
f = self.form(auto_id=self.get_auto_id(),
initial=self.get_initial(request))
return render(request, self.form_template, self.get_context(request, f))
def preview_post(self, request):
"""
Validates the POST data. If valid, displays the preview page.
Else, redisplays form.
"""
# Even if files are not supported in preview, we still initialize files
# to give a chance to process_preview to access files content.
f = self.form(data=request.POST, files=request.FILES, auto_id=self.get_auto_id())
context = self.get_context(request, f)
if f.is_valid():
self.process_preview(request, f, context)
context['hash_field'] = self.unused_name('hash')
context['hash_value'] = self.security_hash(request, f)
return render(request, self.preview_template, context)
else:
return render(request, self.form_template, context)
def _check_security_hash(self, token, request, form):
expected = self.security_hash(request, form)
return constant_time_compare(token, expected)
def post_post(self, request):
"""
Validates the POST data. If valid, calls done(). Else, redisplays form.
"""
form = self.form(request.POST, auto_id=self.get_auto_id())
if form.is_valid():
if not self._check_security_hash(
request.POST.get(self.unused_name('hash'), ''),
request, form):
return self.failed_hash(request) # Security hash failed.
return self.done(request, form.cleaned_data)
else:
return render(request, self.form_template, self.get_context(request, form))
# METHODS SUBCLASSES MIGHT OVERRIDE IF APPROPRIATE ########################
def get_auto_id(self):
"""
Hook to override the ``auto_id`` kwarg for the form. Needed when
rendering two form previews in the same template.
"""
return AUTO_ID
def get_initial(self, request):
"""
Takes a request argument and returns a dictionary to pass to the form's
``initial`` kwarg when the form is being created from an HTTP get.
"""
return {}
def get_context(self, request, form):
"Context for template rendering."
return {
'form': form,
'stage_field': self.unused_name('stage'),
'state': self.state,
}
def parse_params(self, request, *args, **kwargs):
"""
Given captured args and kwargs from the URLconf, saves something in
self.state and/or raises :class:`~django.http.Http404` if necessary.
For example, this URLconf captures a user_id variable::
path('contact/<int:user_id>/', MyFormPreview(MyForm)),
In this case, the kwargs variable in parse_params would be
``{'user_id': 32}`` for a request to ``'/contact/32/'``. You can use
that ``user_id`` to make sure it's a valid user and/or save it for
later, for use in :meth:`~formtools.preview.FormPreview.done()`.
"""
pass
def process_preview(self, request, form, context):
"""
Given a validated form, performs any extra processing before displaying
the preview page, and saves any extra data in context.
By default, this method is empty. It is called after the form is
validated, but before the context is modified with hash information
and rendered.
"""
pass
def security_hash(self, request, form):
"""
Calculates the security hash for the given
:class:`~django.http.HttpRequest` and :class:`~django.forms.Form`
instances.
Subclasses may want to take into account request-specific information,
such as the IP address.
"""
return form_hmac(form)
def failed_hash(self, request):
"""
Returns an :class:`~django.http.HttpResponse` in the case of
an invalid security hash.
"""
return self.preview_post(request)
# METHODS SUBCLASSES MUST OVERRIDE ########################################
def done(self, request, cleaned_data):
"""
Does something with the ``cleaned_data`` data and then needs to
return an :class:`~django.http.HttpResponseRedirect`, e.g. to a
success page.
"""
raise NotImplementedError('You must define a done() method on your '
'%s subclass.' % self.__class__.__name__)
|
FormPreview
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/utils/test_credentials_provider.py
|
{
"start": 3659,
"end": 4226
}
|
class ____:
def test_build_gcp_conn_path(self):
value = "test"
conn = build_gcp_conn(key_file_path=value)
assert conn == "google-cloud-platform://?key_path=test"
def test_build_gcp_conn_scopes(self):
value = ["test", "test2"]
conn = build_gcp_conn(scopes=value)
assert conn == "google-cloud-platform://?scope=test%2Ctest2"
def test_build_gcp_conn_project(self):
value = "test"
conn = build_gcp_conn(project_id=value)
assert conn == "google-cloud-platform://?projects=test"
|
TestHelper
|
python
|
django__django
|
tests/admin_views/models.py
|
{
"start": 25542,
"end": 25640
}
|
class ____(models.Model):
name = models.CharField(max_length=20, unique=True)
|
ReferencedByParent
|
python
|
MorvanZhou__Reinforcement-learning-with-tensorflow
|
contents/3_Sarsa_maze/RL_brain.py
|
{
"start": 1529,
"end": 2131
}
|
class ____(RL):
def __init__(self, actions, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9):
super(QLearningTable, self).__init__(actions, learning_rate, reward_decay, e_greedy)
def learn(self, s, a, r, s_):
self.check_state_exist(s_)
q_predict = self.q_table.loc[s, a]
if s_ != 'terminal':
q_target = r + self.gamma * self.q_table.loc[s_, :].max() # next state is not terminal
else:
q_target = r # next state is terminal
self.q_table.loc[s, a] += self.lr * (q_target - q_predict) # update
# on-policy
|
QLearningTable
|
python
|
openai__openai-python
|
examples/parsing_tools.py
|
{
"start": 140,
"end": 242
}
|
class ____(str, Enum):
orders = "orders"
customers = "customers"
products = "products"
|
Table
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/executors/utils/base_config_keys.py
|
{
"start": 822,
"end": 1169
}
|
class ____:
"""Base Implementation of the Config Keys class. Implements iteration for child classes to inherit."""
def __iter__(self):
"""Return an iterator of values of non dunder attributes of Config Keys."""
return iter({value for (key, value) in self.__class__.__dict__.items() if not key.startswith("__")})
|
BaseConfigKeys
|
python
|
sympy__sympy
|
sympy/stats/crv_types.py
|
{
"start": 47816,
"end": 50322
}
|
class ____(SingleContinuousDistribution):
_argnames = ('k', 'theta')
set = Interval(0, oo)
@staticmethod
def check(k, theta):
_value_check(k > 0, "k must be positive")
_value_check(theta > 0, "Theta must be positive")
def pdf(self, x):
k, theta = self.k, self.theta
return x**(k - 1) * exp(-x/theta) / (gamma(k)*theta**k)
def _cdf(self, x):
k, theta = self.k, self.theta
return Piecewise(
(lowergamma(k, S(x)/theta)/gamma(k), x > 0),
(S.Zero, True))
def _characteristic_function(self, t):
return (1 - self.theta*I*t)**(-self.k)
def _moment_generating_function(self, t):
return (1- self.theta*t)**(-self.k)
def Gamma(name, k, theta):
r"""
Create a continuous random variable with a Gamma distribution.
Explanation
===========
The density of the Gamma distribution is given by
.. math::
f(x) := \frac{1}{\Gamma(k) \theta^k} x^{k - 1} e^{-\frac{x}{\theta}}
with :math:`x \in [0,1]`.
Parameters
==========
k : Real number, `k > 0`, a shape
theta : Real number, `\theta > 0`, a scale
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Gamma, density, cdf, E, variance
>>> from sympy import Symbol, pprint, simplify
>>> k = Symbol("k", positive=True)
>>> theta = Symbol("theta", positive=True)
>>> z = Symbol("z")
>>> X = Gamma("x", k, theta)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
-z
-----
-k k - 1 theta
theta *z *e
---------------------
Gamma(k)
>>> C = cdf(X, meijerg=True)(z)
>>> pprint(C, use_unicode=False)
/ / z \
|k*lowergamma|k, -----|
| \ theta/
<---------------------- for z >= 0
| Gamma(k + 1)
|
\ 0 otherwise
>>> E(X)
k*theta
>>> V = simplify(variance(X))
>>> pprint(V, use_unicode=False)
2
k*theta
References
==========
.. [1] https://en.wikipedia.org/wiki/Gamma_distribution
.. [2] https://mathworld.wolfram.com/GammaDistribution.html
"""
return rv(name, GammaDistribution, (k, theta))
#-------------------------------------------------------------------------------
# Inverse Gamma distribution ---------------------------------------------------
|
GammaDistribution
|
python
|
pytorch__pytorch
|
test/inductor/test_triton_extension_backend.py
|
{
"start": 2226,
"end": 4015
}
|
class ____(BaseExtensionBackendTests):
"""
Test creating a backend for inductor with Triton scheduling.
"""
def test_open_device_registration(self):
torch._register_device_module("privateuseone", self.module)
register_backend_for_device(
"privateuseone", ExtensionScheduling, ExtensionWrapperCodegen
)
register_device_op_overrides("privateuseone", CPUDeviceOpOverrides())
device_interface.register_interface_for_device("privateuseone", DeviceInterface)
self.assertEqual(
get_scheduling_for_device("privateuseone"), ExtensionScheduling
)
self.assertEqual(
get_wrapper_codegen_for_device("privateuseone"), ExtensionWrapperCodegen
)
self.assertEqual(
device_interface.get_interface_for_device("privateuseone"), DeviceInterface
)
device = torch.device("privateuseone")
x = torch.empty(2, 16).fill_(1).to(device)
def foo(x):
return torch.sin(x) + x.min()
metrics.reset()
opt_fn = torch.compile(foo)
# Since we don't have a triton backend, we need to mock the triton_hash_with_backend
# function
with unittest.mock.patch(
"torch.utils._triton.triton_hash_with_backend",
new=mock_triton_hash_with_backend,
):
code = get_triton_code(opt_fn, x)
FileCheck().check("import triton").check("@triton.jit").check(
"tl_math.sin"
).check("device_str='privateuseone'").run(code)
if __name__ == "__main__":
from torch._inductor.test_case import run_tests
from torch.testing._internal.inductor_utils import HAS_CPU
if HAS_CPU and not IS_MACOS:
run_tests()
|
TritonExtensionBackendTests
|
python
|
keras-team__keras
|
guides/making_new_layers_and_models_via_subclassing.py
|
{
"start": 3309,
"end": 4076
}
|
class ____(keras.layers.Layer):
def __init__(self, units=32, input_dim=32):
super().__init__()
self.w = self.add_weight(
shape=(input_dim, units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(units,), initializer="zeros", trainable=True
)
def call(self, inputs):
return ops.matmul(inputs, self.w) + self.b
"""
In many cases, you may not know in advance the size of your inputs, and you
would like to lazily create weights when that value becomes known, some time
after instantiating the layer.
In the Keras API, we recommend creating layer weights in the
`build(self, inputs_shape)` method of your layer. Like this:
"""
|
Linear
|
python
|
nedbat__coveragepy
|
coverage/exceptions.py
|
{
"start": 1092,
"end": 1172
}
|
class ____(NoSource):
"""We couldn't find any code at all."""
pass
|
NoCode
|
python
|
simplejson__simplejson
|
simplejson/tests/test_for_json.py
|
{
"start": 372,
"end": 447
}
|
class ____(list):
def for_json(self):
return ['list']
|
ListForJson
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1303152,
"end": 1303456
}
|
class ____(
sgqlc.types.Type, Node, AuditEntry, EnterpriseAuditEntryData, OrganizationAuditEntryData, RepositoryAuditEntryData
):
"""Audit log entry for a private_repository_forking.disable event."""
__schema__ = github_schema
__field_names__ = ()
|
PrivateRepositoryForkingDisableAuditEntry
|
python
|
walkccc__LeetCode
|
solutions/1876. Substrings of Size Three with Distinct Characters/1876.py
|
{
"start": 0,
"end": 203
}
|
class ____:
def countGoodSubstrings(self, s: str) -> int:
ans = 0
for a, b, c in zip(s, s[1:], s[2:]):
if a == b or a == c or b == c:
continue
ans += 1
return ans
|
Solution
|
python
|
pandas-dev__pandas
|
pandas/tests/io/formats/test_to_string.py
|
{
"start": 4426,
"end": 6460
}
|
class ____:
def test_to_string_with_column_specific_col_space_raises(self):
df = DataFrame(
np.random.default_rng(2).random(size=(3, 3)), columns=["a", "b", "c"]
)
msg = (
"Col_space length\\(\\d+\\) should match "
"DataFrame number of columns\\(\\d+\\)"
)
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40])
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40, 50, 60])
msg = "unknown column"
with pytest.raises(ValueError, match=msg):
df.to_string(col_space={"a": "foo", "b": 23, "d": 34})
def test_to_string_with_column_specific_col_space(self):
df = DataFrame(
np.random.default_rng(2).random(size=(3, 3)), columns=["a", "b", "c"]
)
result = df.to_string(col_space={"a": 10, "b": 11, "c": 12})
# 3 separating space + each col_space for (id, a, b, c)
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
result = df.to_string(col_space=[10, 11, 12])
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
def test_to_string_with_col_space(self):
df = DataFrame(np.random.default_rng(2).random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
assert c10 < c20 < c30
# GH#8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
assert len(with_header_row1) == len(no_header)
def test_to_string_repr_tuples(self):
buf = StringIO()
df = DataFrame({"tups": list(zip(range(10), range(10)))})
repr(df)
df.to_string(col_space=10, buf=buf)
|
TestDataFrameToStringColSpace
|
python
|
django__django
|
django/contrib/gis/db/backends/oracle/introspection.py
|
{
"start": 145,
"end": 1910
}
|
class ____(DatabaseIntrospection):
# Associating any OBJECTVAR instances with GeometryField. This won't work
# right on Oracle objects that aren't MDSYS.SDO_GEOMETRY, but it is the
# only object type supported within Django anyways.
@cached_property
def data_types_reverse(self):
return {
**super().data_types_reverse,
oracledb.DB_TYPE_OBJECT: "GeometryField",
}
def get_geometry_type(self, table_name, description):
with self.connection.cursor() as cursor:
# Querying USER_SDO_GEOM_METADATA to get the SRID and dimension
# information.
try:
cursor.execute(
'SELECT "DIMINFO", "SRID" FROM "USER_SDO_GEOM_METADATA" '
'WHERE "TABLE_NAME"=%s AND "COLUMN_NAME"=%s',
(table_name.upper(), description.name.upper()),
)
row = cursor.fetchone()
except Exception as exc:
raise Exception(
"Could not find entry in USER_SDO_GEOM_METADATA "
'corresponding to "%s"."%s"' % (table_name, description.name)
) from exc
# TODO: Research way to find a more specific geometry field type
# for the column's contents.
field_type = "GeometryField"
# Getting the field parameters.
field_params = {}
dim, srid = row
if srid != 4326:
field_params["srid"] = srid
# Size of object array (SDO_DIM_ARRAY) is number of dimensions.
dim = dim.size()
if dim != 2:
field_params["dim"] = dim
return field_type, field_params
|
OracleIntrospection
|
python
|
tensorflow__tensorflow
|
tensorflow/python/checkpoint/restore.py
|
{
"start": 1815,
"end": 35555
}
|
class ____(object):
"""Indicates a position within a `_CheckpointRestoreCoordinator`."""
__slots__ = ["_checkpoint", "_proto_id", "skip_restore", "callback"]
def __init__(self, checkpoint, proto_id):
"""Specify an object within a checkpoint.
Args:
checkpoint: A _CheckpointRestoreCoordinator object.
proto_id: The index of this object in TrackableObjectGraph.nodes.
"""
self._checkpoint = checkpoint
self._proto_id = proto_id
# This may be set to True if the registered saver cannot be used with this
# object.
self.skip_restore = False
self.callback = checkpoint_adapter.ReshardCallback()
def restore(self, trackable, reader=None):
"""Restore this value into `trackable`."""
with ops.init_scope():
if self.bind_object(trackable):
# This object's correspondence with a checkpointed object is new, so
# process deferred restorations for it and its dependencies.
restore_ops = self._restore_descendants(reader)
if restore_ops:
self._checkpoint.new_restore_ops(restore_ops)
def bind_object(self, trackable):
"""Set a checkpoint<->object correspondence.
Args:
trackable: The object to record a correspondence for.
Returns:
True if this is a new assignment, False if this object has already been
mapped to a checkpointed `Object` proto.
Raises:
AssertionError: If another object is already bound to the `Object` proto.
"""
checkpoint = self.checkpoint
checkpoint.all_python_objects.add(trackable)
current_assignment = checkpoint.object_by_proto_id.get(self._proto_id, None)
checkpoint.matched_proto_ids.add(self._proto_id)
if current_assignment is None:
checkpoint.object_by_proto_id[self._proto_id] = trackable
return True # New assignment
else:
# The object was already mapped for this checkpoint load, which means
# we don't need to do anything besides check that the mapping is
# consistent (if the dependency DAG is not a tree then there are
# multiple paths to the same object).
if current_assignment is not trackable:
logging.warning(
"Inconsistent references when loading the checkpoint into this "
"object graph. For example, in the saved checkpoint object, "
"`model.layer.weight` and `model.layer_copy.weight` reference the "
"same variable, while in the current object these are two different"
" variables. The referenced variables are:"
f"({current_assignment} and {trackable}).")
return False # Not a new assignment
def update_resharding_callback(
self, callback: checkpoint_adapter.ReshardCallback
):
"""Add a resharding callback to the checkpoint.
This will be applied to the checkpoint value before being supplied to the
restore ops.
Args:
callback: Reshard callback for resharding this checkpoint position. Maybe
None.
"""
if not issubclass(checkpoint_adapter.ReshardCallback, type(self.callback)):
raise TypeError(
"Cannot override resharding callback, already set to non trivial."
)
self.callback = callback
def has_non_trivial_reshard_callback(self) -> bool:
"""Determine whether this value has a non-trivial resharding callback."""
return not issubclass(
checkpoint_adapter.ReshardCallback, type(self.callback)
)
def is_simple_variable(self) -> bool:
"""Determine whether this value is restorable with a Tensor initializer."""
attributes = self.object_proto.attributes
return (
len(attributes) == 1
and attributes[0].name == constants.VARIABLE_VALUE_KEY
and not self.object_proto.children
)
def value_tensors(
self, shape_and_slices: Optional[str] = None
) -> Mapping[str, tensor.Tensor]:
"""Create value `Tensor`s for this object's attributes.
Does not require that the Python object has been created. Used for
restore-on-create when executing eagerly.
Args:
shape_and_slices: A dict mapping from object attribute names to a shape
and slice string that will be passed to a RestoreV2 op. If the dict is
None or if an object attribute is not in the dict, the full tensor will
be restored.
Returns:
A dictionary mapping from object attribute names to `Tensor`s.
"""
value_tensors = {}
for serialized_tensor in self.object_proto.attributes:
checkpoint_key = serialized_tensor.checkpoint_key
io_device = self._checkpoint.options.experimental_io_device or "cpu:0"
with ops.init_scope():
with ops.device(io_device):
# Run the restore itself on the io_device(CPU or specified).
if (
shape_and_slices is not None
and serialized_tensor.name in shape_and_slices
):
shape_and_slice = shape_and_slices[serialized_tensor.name]
else:
shape_and_slice = ""
checkpoint_keys, full_shape_and_slices = (
self.callback.update_restore_inputs(
checkpoint_key, shape_and_slice
)
)
dtypes = []
for key in checkpoint_keys:
dtype = self._checkpoint.dtype_map[key]
dtypes.append(dtype.base_dtype)
restored_values = io_ops.restore_v2(
prefix=self._checkpoint.save_path_tensor,
tensor_names=checkpoint_keys,
shape_and_slices=full_shape_and_slices,
dtypes=dtypes,
name="%s_checkpoint_read" % (serialized_tensor.name,),
)
value = self.callback.reshard(
restored_values, shape_and_slice
)
# Copy the value to the current device if necessary.
value_tensors[serialized_tensor.name] = array_ops.identity(value)
return value_tensors
def gather_ops_or_named_saveables(self):
"""Looks up or creates SaveableObjects which don't have cached ops.
Returns:
A tuple of (
existing_restore_ops: list,
named_saveables: dict,
python_positions: list,
registered_savers: dict)
"""
recorded_registered_saver = self.get_registered_saver_name()
if not (self.object_proto.attributes or recorded_registered_saver):
return [], {}, [], {}
existing_restore_ops = []
named_saveables = {}
python_positions = []
registered_savers = collections.defaultdict(dict)
saveable_factories = saveable_object_util.saveable_objects_from_trackable(
self.trackable)
saver_name = registration.get_registered_saver_name(self.trackable)
if recorded_registered_saver:
if not self.skip_restore:
name = self.object_proto.registered_saver.object_name
registered_savers[recorded_registered_saver][name] = self.trackable
# Else: Skip restoration of this Trackable. This skip only happens if the
# registered saver has enabled `option_restore`. Otherwise, an error would
# have been raised at `self.get_registered_saver_name()`.
elif saver_name:
# In this case, the checkpoint has a recorded serialized tensor but no
# registered saver, while the Trackable loading the checkpoint has
# migrated to the registered checkpoint functionality (TPUEmbedding is an
# example of this).
# Set the Trackable's object name to the first checkpoint key that is
# stored in checkpoint. If there is a use case that requires the other
# keys, then we can take another look at this.
registered_savers[saver_name] = {
self.object_proto.attributes[0].checkpoint_key: self.trackable
}
elif isinstance(self.trackable, python_state.PythonState):
python_positions.append(self)
elif saveable_factories.keys() == {
trackable_utils.SERIALIZE_TO_TENSORS_NAME
}:
existing_restore_ops, named_saveables = (
self._create_serialize_to_tensor_saveable(saveable_factories))
elif saveable_factories:
existing_restore_ops, named_saveables = (
self._create_saveables_by_attribute_name(saveable_factories))
else:
# If no registered savers were found, then it means that one or more
# serialized tensors were never used.
for serialized_tensor in self.object_proto.attributes:
self._checkpoint.unused_attributes.setdefault(
self._proto_id, []).append(serialized_tensor.name)
return (existing_restore_ops, named_saveables, python_positions,
registered_savers)
def _create_serialize_to_tensor_saveable(self, saveable_factories):
"""Creates a saveable using the _serialize_to_tensor method."""
# Extract the saveable name from the checkpoint key. This will be used as
# the cache key or the name to pass to the saveable factory.
suffix = saveable_compat.get_saveable_name(self.trackable) or ""
saveable_name = _extract_saveable_name(
self.object_proto.attributes[0].checkpoint_key) + suffix
# Try to find the cached saveable (only in graph mode).
if not context.executing_eagerly():
existing_op = self._checkpoint.restore_ops_by_name.get(
saveable_name, None)
if existing_op is not None:
return [existing_op], {}
saveables_cache = self._checkpoint.saveables_cache.setdefault(
self.trackable, {})
if saveable_name in saveables_cache:
return [], {saveable_name: saveables_cache[saveable_name]}
saveable = saveable_factories[trackable_utils.SERIALIZE_TO_TENSORS_NAME](
name=saveable_name)
if not context.executing_eagerly():
saveables_cache[saveable_name] = saveable
return [], {saveable_name: saveable}
def _create_saveables_by_attribute_name(self, saveable_factories):
"""Creates or caches SaveableObjects by matching the attribute names.
The attribute name keys in the `saveable_factories` is used to find the
corresponding attribute in the object proto. Attributes contain checkpoint
keys which are passed to the factory function to generate the
SaveableObject.
Args:
saveable_factories: a dict mapping attribute name to a callable factory
function that produces a SaveableObject.
Returns:
A tuple of (
existing_restore_ops: list,
named_saveables: dict)
"""
# Name saveables based on the name this object had when it was checkpointed.
named_saveables = {}
existing_restore_ops = []
# Forward compatibility code: when loading a future checkpoint, there may
# be multiple SerializedTensors mapped to a single saveable.
created_compat_names = set()
for serialized_tensor in self.object_proto.attributes:
if context.executing_eagerly():
existing_op = None
else:
existing_op = self._checkpoint.restore_ops_by_name.get(
serialized_tensor.checkpoint_key, None)
if existing_op is not None:
existing_restore_ops.append(existing_op)
continue
if any(serialized_tensor.name.startswith(name)
for name in created_compat_names):
continue # Saveable has already been created for this tensor.
# Only if we don't have cached ops for this SaveableObject, we'll see if
# the SaveableObject itself has been cached. If not, we'll make it, and
# either way we'll extract new ops from it (or if it has Python state to
# restore, we'll run that).
saveables_cache = self._checkpoint.saveables_cache
if saveables_cache is None:
# No SaveableObject caching when executing eagerly.
saveable = None
else:
# If we've already created and cached a SaveableObject for this
# attribute, we can re-use it to avoid re-creating some ops when graph
# building.
saveable_list = saveables_cache.get(self.trackable,
{}).get(serialized_tensor.name,
(None,))
if len(saveable_list) == 1:
# Almost every attribute will have exactly one SaveableObject.
saveable, = saveable_list
else:
# Don't use cached SaveableObjects for partitioned variables, which is
# the only case where we'd have a list of SaveableObjects. Op caching
# will catch them.
saveable = None
if saveable is not None:
# The name of this attribute has changed, so we need to re-generate
# the SaveableObject.
if serialized_tensor.checkpoint_key not in saveable.name:
saveable = None
del saveables_cache[self.trackable]
if saveable is None:
# If there was no cached SaveableObject, create one.
# Use the name to check if the Python object has the same attribute.
saveable = _get_saveable_from_factory(saveable_factories,
serialized_tensor,
created_compat_names)
if saveable is None:
# Purposefully does not throw an exception if attributes have been
# added or deleted. Stores unused attributes so an exception can be
# raised if the user decides to check that everything in the
# checkpoint was loaded.
self._checkpoint.unused_attributes.setdefault(
self._proto_id, []).append(serialized_tensor.name)
continue
if saveables_cache is not None:
saveables_cache.setdefault(self.trackable,
{})[serialized_tensor.name] = [saveable]
named_saveables[serialized_tensor.checkpoint_key] = saveable
return existing_restore_ops, named_saveables
def restore_ops(self, reader=None):
"""Create or fetch restore ops for this object's attributes.
Requires that the `Trackable` Python object has been bound to an object
ID in the checkpoint.
Args:
reader: A `CheckpointReader`. If None, a new instance will be created.
Returns:
A list of operations when graph building, or an empty list when executing
eagerly.
"""
if self._has_registered_saver():
raise ValueError("Unable to run individual checkpoint restore for objects"
" with registered savers.")
(restore_ops, tensor_saveables, python_positions,
_) = self.gather_ops_or_named_saveables()
restore_ops.extend(
self._checkpoint.restore_saveables(
tensor_saveables, python_positions, reader=reader))
return restore_ops
@property
def checkpoint(self):
return self._checkpoint
@property
def trackable(self):
return self._checkpoint.object_by_proto_id[self._proto_id]
@property
def object_proto(self):
return self._checkpoint.object_graph_proto.nodes[self._proto_id]
@property
def proto_id(self):
return self._proto_id
@property
def restore_uid(self):
return self._checkpoint.restore_uid
def __repr__(self):
return repr(self.object_proto)
def value_shape(self):
"""The shape of the VARIABLE_VALUE tensor.
Returns:
If found a TensorShape object, otherwise None.
"""
for serialized_tensor in self.object_proto.attributes:
if serialized_tensor.name == constants.VARIABLE_VALUE_KEY:
return self._checkpoint.shape_map[serialized_tensor.checkpoint_key]
return None
def _has_registered_saver(self):
return bool(self.object_proto.registered_saver.name)
def get_registered_saver_name(self):
"""Returns the registered saver name defined in the Checkpoint."""
if self._has_registered_saver():
saver_name = self.object_proto.registered_saver.name
try:
registration.validate_restore_function(self.trackable, saver_name)
except ValueError as e:
if registration.get_strict_predicate_restore(saver_name):
raise e
self.skip_restore = True
return saver_name
return None
def create_slot_variable_position(
self,
optimizer_object: Any,
variable: base.Trackable,
slot_variable_id: str,
slot_name: str,
reshard_callback: Optional[checkpoint_adapter.ReshardCallback] = None,
):
"""Generates CheckpointPosition for a slot variable.
Args:
optimizer_object: Optimizer that owns the slot variable.
variable: Variable associated with the slot variable.
slot_variable_id: ID of the slot variable.
slot_name: Name of the slot variable.
reshard_callback: A callback object for resharding value from checkpoint
at restore.
Returns:
If there is a slot variable in the `optimizer_object` that has not been
bound to the checkpoint, this function returns a tuple of (
new `CheckpointPosition` for the slot variable,
the slot variable itself).
"""
slot_variable_position = CheckpointPosition(
checkpoint=self.checkpoint, proto_id=slot_variable_id
)
# pylint: disable=protected-access
if reshard_callback is not None:
# slot_variable_shape kwarg is available only for optimizer_v2 objects.
slot_variable_position.update_resharding_callback(reshard_callback)
slot_variable = optimizer_object._create_or_restore_slot_variable(
slot_variable_position=slot_variable_position,
variable=variable,
slot_name=slot_name,
slot_variable_shape=variable.shape,
)
else:
slot_variable = optimizer_object._create_or_restore_slot_variable(
slot_variable_position=slot_variable_position,
variable=variable,
slot_name=slot_name,
)
# pylint: enable=protected-access
if slot_variable is not None and slot_variable_position.bind_object(
slot_variable
):
return slot_variable_position, slot_variable
else:
return None, None
def create_child_position(self, node_id):
return CheckpointPosition(checkpoint=self.checkpoint, proto_id=node_id)
def _restore_descendants(self, reader=None):
"""Restore the bound Trackable and dependencies (may be deferred)."""
# Attempt a breadth-first traversal, since presumably the user has more
# control over shorter paths. If we don't have all of the dependencies at
# this point, the end result is not breadth-first (since other deferred
# traversals will happen later).
# You may be wondering why elements in the `visit_queue` are tuples that
# contains both CheckpointPositions and their Trackable. The reason is that
# Optimizers will not keep a strong reference to slot vars for
# ShardedVariables. The slot variable must be kept in memory until the
# restore saveables have been created.
visit_queue = collections.deque([(self, self.trackable)])
restore_ops = []
tensor_saveables = {}
python_positions = []
registered_savers = collections.defaultdict(dict)
while visit_queue:
current_position, _ = visit_queue.popleft()
# Restore using the ops defined in a Saveable or registered function.
(
new_restore_ops,
new_tensor_saveables,
new_python_positions,
new_registered_savers,
) = current_position._single_restore() # pylint: disable=protected-access
restore_ops.extend(new_restore_ops)
tensor_saveables.update(new_tensor_saveables)
python_positions.extend(new_python_positions)
for saver_name, trackable_map in new_registered_savers.items():
registered_savers[saver_name].update(trackable_map)
# Pass the restoration to the dependencies.
_queue_children_for_restoration(current_position, visit_queue)
_queue_slot_variables(current_position, visit_queue)
restore_ops.extend(
current_position.checkpoint.restore_saveables(
tensor_saveables, python_positions, registered_savers, reader=reader
)
)
return restore_ops
def _single_restore(self):
"""Restores the trackable."""
trackable = self.trackable
trackable._maybe_initialize_trackable() # pylint: disable=protected-access
checkpoint = self.checkpoint
# If the UID of this restore is lower than our current update UID, we don't
# need to actually restore the object.
if checkpoint.restore_uid > trackable._update_uid: # pylint: disable=protected-access
restore_ops, tensor_saveables, python_positions, registered_savers = (
self.gather_ops_or_named_saveables()
)
trackable._update_uid = checkpoint.restore_uid # pylint: disable=protected-access
else:
restore_ops = ()
tensor_saveables = {}
python_positions = ()
registered_savers = {}
return restore_ops, tensor_saveables, python_positions, registered_savers
def restore_nodes(save_path, nodes_to_restore):
"""Restores nodes from a dict.
Requires that the `Trackable` Python object has been bound to an object
ID in the checkpoint.
Args:
save_path: a string represents path to the checkpoint.
nodes_to_restore: a dict maps `node_id` to `trackable` to be restored.
"""
if save_path is None:
raise ValueError("save_path cannot be empty.")
if not isinstance(nodes_to_restore, dict):
raise ValueError(
"Expecting a dictionary of node_id to Trackable for nodes_to_restore.")
ckpt_view = checkpoint_view.CheckpointView(save_path)
ckpt_view_descendants = ckpt_view.descendants()
for node_id, trackable in nodes_to_restore.items():
# node_id does not have a corresponding Checkpoint value.
if (node_id not in ckpt_view_descendants or
ckpt_view._object_graph_proto.nodes[ # pylint: disable=protected-access
node_id] is None):
raise ValueError(
f"The expected node_id: {node_id} to Trackable {trackable} to "
"restore does not exist in the checkpoint.")
# Trackable mapped to node_id to restore is empty.
if trackable is None or not isinstance(trackable, base.Trackable):
raise ValueError(
f"Expecting a valid Trackable to node_id: {node_id} but got "
f"trackable: {trackable}."
)
serialized_tensors = object_identity.ObjectIdentityDictionary()
for node_id, current_trackable in nodes_to_restore.items():
ckpt_contains_serialized_tensors = ckpt_view._object_graph_proto.nodes[ # pylint: disable=protected-access
node_id].attributes
node = ckpt_view._object_graph_proto.nodes[node_id] # pylint: disable=protected-access
trackable_has_serialize_to_tensor = (
saveable_object_util.trackable_has_serialize_to_tensor(
current_trackable
)
)
if not trackable_has_serialize_to_tensor:
if not node.attributes:
if saveable_object_util.saveable_objects_from_trackable(
current_trackable):
raise ValueError(
f"Trackable {current_trackable} expects checkpointed values but "
"checkpoint does not contain serialized tensors for node_id: "
f"{node_id}.")
else:
continue
object_names = object_identity.ObjectIdentityDictionary()
object_names[current_trackable] = trackable_utils.extract_object_name(
node.attributes[0].checkpoint_key)
checkpoint_factory_map, _ = (
save_util_v1.get_checkpoint_factories_and_keys(object_names, None)
)
saveable_objects = save_util_v1.generate_saveable_objects(
checkpoint_factory_map)[0]
if len(node.attributes) != len(saveable_objects):
raise ValueError("Size for saveable_objects for Trackable: "
f"{len(saveable_objects)} did not match the size for "
"serialized_tensors for checkpoint: "
f"{len(node.attributes)}.")
current_trackable = saveable_object_util.SaveableCompatibilityConverter(
current_trackable, saveable_objects)
serialized_tensors[
current_trackable] = current_trackable._serialize_to_tensors() # pylint: disable=protected-access
trackable_expects_ckpted_value = bool(serialized_tensors[current_trackable])
if trackable_expects_ckpted_value and not ckpt_contains_serialized_tensors:
raise ValueError(
f"Trackable {current_trackable} expects checkpointed values but "
"checkpoint does not contain serialized tensors for node_id: "
f"{node_id}.")
if not trackable_expects_ckpted_value and ckpt_contains_serialized_tensors:
raise ValueError(
f"Trackable {current_trackable} does not expect checkpointed "
"values but checkpoint contains serialized tensors: "
f"{ckpt_contains_serialized_tensors} for node_id: {node_id}.")
if len(node.attributes) != len(serialized_tensors[current_trackable]):
raise ValueError("Size for serialized_tensors for Trackable: "
f"{len(serialized_tensors[current_trackable])} did not "
"match size for serialized_tensors for checkpoint: "
f"{len(node.attributes)}.")
if not trackable_has_serialize_to_tensor:
functional_saver.MultiDeviceSaver(serialized_tensors).restore(save_path)
else:
# Converts attribute.name to attribute.checkpoint_key since that's what
# restore method is expecting. i.e., converts "a" to "/.ATTRIBUTES/a".
serialized_tensors_renamed = object_identity.ObjectIdentityDictionary()
serialized_tensors_renamed[current_trackable] = {}
for attribute in node.attributes:
name = attribute.name
checkpoint_key = attribute.checkpoint_key
serialized_tensors_renamed[current_trackable][
checkpoint_key] = serialized_tensors[current_trackable][name]
functional_saver.MultiDeviceSaver(serialized_tensors_renamed).restore(
save_path)
def _maybe_get_adapter(checkpoint_position, trackable):
adapter = trackable._checkpoint_adapter( # pylint: disable=protected-access
checkpoint_position.checkpoint.save_path_string
)
if adapter and adapter.is_applicable(trackable):
return adapter
return None
def _queue_children_for_restoration(checkpoint_position, visit_queue):
"""Queues the restoration of trackable's children or defers them."""
# pylint: disable=protected-access
trackable = checkpoint_position.trackable
trackable_children = trackable._trackable_children()
adapter = _maybe_get_adapter(checkpoint_position, trackable)
for child in checkpoint_position.object_proto.children:
# trackable._lookup_dependency can be expensive so first check if this node
# already has an object correspondence. If so we skip this node.
correspondence = checkpoint_position.checkpoint.object_by_proto_id.get(
child.node_id, None
)
if correspondence is not None:
continue
child_position = checkpoint_position.create_child_position(child.node_id)
local_object = trackable._lookup_dependency(child.local_name,
trackable_children)
child_proto = child_position.object_proto
if local_object is None:
# We don't yet have a dependency registered with this name. Save it
# in case we do.
if child_proto.HasField("has_checkpoint_values"):
has_value = child_proto.has_checkpoint_values.value
else:
# If the field is not set, do a simple check to see if the dependency
# has children and/or checkpointed values.
has_value = bool(
child_proto.children
or child_proto.attributes
or child_proto.slot_variables
or child_proto.HasField("registered_saver")
)
if has_value:
local_trackable_name = child.local_name
if adapter:
local_trackable_name, reshard_callback = adapter.maybe_reshard(
child.local_name
)
if reshard_callback:
child_position.update_resharding_callback(reshard_callback)
trackable._deferred_dependencies.setdefault(
local_trackable_name, []
).append(child_position)
else:
if child_position.bind_object(trackable=local_object):
# This object's correspondence is new, so dependencies need to be
# visited. Delay doing it so that we get a breadth-first dependency
# resolution order (shallowest paths first). The caller is responsible
# for emptying visit_queue.
visit_queue.append((child_position, local_object))
_DeferredSlotVariableRestoration = collections.namedtuple(
"_DeferredSlotVariableRestoration", [
"original_variable",
"slot_variable_id",
"slot_name",
])
def _queue_slot_variables(checkpoint_position, visit_queue):
"""Queues slot variables for restoration."""
trackable = checkpoint_position.trackable
checkpoint = checkpoint_position.checkpoint
for deferred_slot_restoration in (checkpoint.deferred_slot_restorations.pop(
checkpoint_position.proto_id, ())):
slot_variable_position, slot_variable = (
checkpoint_position.create_slot_variable_position(
trackable,
deferred_slot_restoration.original_variable,
deferred_slot_restoration.slot_variable_id,
deferred_slot_restoration.slot_name,
# If the corresponding variable has a non trivial resharding
# attached, the the slot variable should be resharded in the same
# way.
checkpoint_position.callback
if checkpoint_position.has_non_trivial_reshard_callback()
else None,
)
)
if slot_variable_position is not None:
visit_queue.append((slot_variable_position, slot_variable))
for slot_restoration in checkpoint.slot_restorations.pop(
checkpoint_position.proto_id, ()):
optimizer_object = checkpoint.object_by_proto_id.get(
slot_restoration.optimizer_id, None)
if optimizer_object is None:
# The optimizer has not yet been created or tracked. Record in the
# checkpoint that the slot variables need to be restored when it is.
checkpoint.deferred_slot_restorations.setdefault(
slot_restoration.optimizer_id, []).append(
_DeferredSlotVariableRestoration(
original_variable=trackable,
slot_variable_id=slot_restoration.slot_variable_id,
slot_name=slot_restoration.slot_name))
# `optimizer_object` can be a `Checkpoint` when user only needs the
# attributes the optimizer holds, such as `iterations`. In those cases,
# it would not have the optimizer's `_create_or_restore_slot_variable`
# method.
elif hasattr(optimizer_object, "_create_or_restore_slot_variable"):
slot_variable_position, slot_variable = (
checkpoint_position.create_slot_variable_position(
optimizer_object,
trackable,
slot_restoration.slot_variable_id,
slot_restoration.slot_name,
# If the corresponding variable has a non trivial resharding
# attached, the the slot variable should be resharded in the same
# way.
checkpoint_position.callback
if checkpoint_position.has_non_trivial_reshard_callback()
else None,
)
)
if slot_variable_position is not None:
visit_queue.append((slot_variable_position, slot_variable))
def _extract_saveable_name(checkpoint_key):
# Substring the checkpoint key to the end of the "{...}.ATTRIBUTES/"
search_key = trackable_utils.OBJECT_ATTRIBUTES_NAME + "/"
return checkpoint_key[:checkpoint_key.index(search_key) + len(search_key)]
def _get_saveable_from_factory(saveable_factories, serialized_tensor,
created_compat_names):
"""Returns the saveable generated from the factory method."""
matched_factory = None
# The `expected_factory_name` is used to find the right saveable factory,
# while the `factory_input_name` is the value that is passed to the factory
# method to instantiate the SaveableObject.
expected_factory_name = serialized_tensor.name
factory_input_name = serialized_tensor.checkpoint_key
# Case 1: the name already exactly matches a key in saveable_factories.
if expected_factory_name in saveable_factories:
matched_factory = saveable_factories[expected_factory_name]
# Case 2: (Forward compat) The serialized name is composed of
# "factory_name" + "SUFFIX". Get the matching factory name.
if matched_factory is None:
for factory_name, factory in saveable_factories.items():
if expected_factory_name.startswith(factory_name):
if matched_factory is not None:
# This condition is met in the extreme edge case where the object
# returns two saveable factories with similar names. This is very
# unlikely because there zero objects inside TensorFlow that use
# more than one saveable factory.
raise ValueError("Forward compatibility load error: Unable to load "
"checkpoint saved in future version of TensorFlow. "
"Please update your version of TensorFlow to the "
"version in which the checkpoint was saved.")
matched_factory = factory
factory_input_name = _extract_saveable_name(
serialized_tensor.checkpoint_key) + factory_name
created_compat_names.add(factory_name)
if callable(matched_factory):
return matched_factory(name=factory_input_name)
return matched_factory
|
CheckpointPosition
|
python
|
pytorch__pytorch
|
torch/nn/modules/activation.py
|
{
"start": 52432,
"end": 54922
}
|
class ____(Module):
r"""Applies the element-wise PReLU function.
.. math::
\text{PReLU}(x) = \max(0,x) + a * \min(0,x)
or
.. math::
\text{PReLU}(x) =
\begin{cases}
x, & \text{ if } x \ge 0 \\
ax, & \text{ otherwise }
\end{cases}
Here :math:`a` is a learnable parameter. When called without arguments, `nn.PReLU()` uses a single
parameter :math:`a` across all input channels. If called with `nn.PReLU(nChannels)`,
a separate :math:`a` is used for each input channel.
.. note::
weight decay should not be used when learning :math:`a` for good performance.
.. note::
Channel dim is the 2nd dim of input. When input has dims < 2, then there is
no channel dim and the number of channels = 1.
Args:
num_parameters (int): number of :math:`a` to learn.
Although it takes an int as input, there is only two values are legitimate:
1, or the number of channels at input. Default: 1
init (float): the initial value of :math:`a`. Default: 0.25
Shape:
- Input: :math:`( *)` where `*` means, any number of additional
dimensions.
- Output: :math:`(*)`, same shape as the input.
Attributes:
weight (Tensor): the learnable weights of shape (:attr:`num_parameters`).
.. image:: ../scripts/activation_images/PReLU.png
Examples::
>>> m = nn.PReLU()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ["num_parameters"]
num_parameters: int
def __init__(
self, num_parameters: int = 1, init: float = 0.25, device=None, dtype=None
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
self.num_parameters = num_parameters
super().__init__()
self.init = init
self.weight = Parameter(torch.empty(num_parameters, **factory_kwargs))
self.reset_parameters()
def reset_parameters(self) -> None:
"""
Resets parameters based on their initialization used in ``__init__``.
"""
torch.nn.init.constant_(self.weight, self.init)
def forward(self, input: Tensor) -> Tensor:
"""
Runs the forward pass.
"""
return F.prelu(input, self.weight)
def extra_repr(self) -> str:
"""
Return the extra representation of the module.
"""
return f"num_parameters={self.num_parameters}"
|
PReLU
|
python
|
pytorch__pytorch
|
torch/_inductor/codegen/cpp_template_kernel.py
|
{
"start": 1412,
"end": 23741
}
|
class ____(CppKernel):
def __init__(self, kernel_name, num_threads):
super().__init__(None, num_threads)
self.kernel_name = kernel_name
self.render_hooks = {}
self.local_buffers = {}
def render(self, template, **kwargs):
return PartialRender(
template.render(kernel=self, **kwargs), self.render_hooks
).finalize_all()
def def_kernel(
self,
inputs: dict[str, ir.Buffer],
outputs: dict[str, ir.Buffer],
aliases: Optional[dict[str, str]] = None,
function_name: str = "",
extra_sizevars: Optional[list[sympy.Expr]] = None,
placeholder: str = "<DEF_KERNEL>",
) -> str:
if len(function_name) == 0:
function_name = str(self.kernel_name)
for name, inp in inputs.items():
if inp is not None:
self.args.input_buffers[inp.get_name()] = name
for name, out in outputs.items():
self.args.output_buffers[out.get_name()] = name
if aliases is not None:
for alias, orig in aliases.items():
if orig in self.args.input_buffers:
self.args.input_buffers[alias] = self.args.input_buffers[orig]
if orig in self.args.output_buffers:
self.args.output_buffers[alias] = self.args.output_buffers[orig]
unique_sizevars = OrderedSet(
s
for input in inputs.values()
if input is not None
for sym in itertools.chain(input.get_size(), input.get_stride())
if isinstance(sym, sympy.Expr)
for s in sym.free_symbols
)
unique_sizevars.update(
s
for sym in extra_sizevars or []
if isinstance(sym, sympy.Expr)
for s in sym.free_symbols
)
unique_sizevars.update(
s
for output in outputs.values()
for sym in itertools.chain(output.get_size(), output.get_stride())
if isinstance(sym, sympy.Expr)
for s in sym.free_symbols
)
sizevars = sorted(unique_sizevars, key=str)
for sizevar in sizevars:
self.args.sizevars[sizevar] = f"k{sizevar}"
def hook():
# remove all aliases before generate function definition
if aliases is not None:
for alias in aliases:
if alias in self.args.input_buffers:
raise AssertionError(
f"input_buffers cannot be removed: {alias}"
)
if alias in self.args.output_buffers:
self.args.output_buffers[alias] = REMOVED
cpp_argdefs, _, _ = self.args.cpp_argdefs()
return f"void {function_name}({', '.join(cpp_argdefs)})"
assert placeholder not in self.render_hooks
self.render_hooks[placeholder] = hook
return placeholder
def call_kernel(self, name: str, node: ir.CppTemplateBuffer):
wrapper = V.graph.wrapper_code
_, call_args, arg_types = self.args.cpp_argdefs()
wrapper.generate_kernel_call(name, call_args, triton=False, arg_types=arg_types)
def dtype(self, node: ir.Buffer) -> str:
return DTYPE_TO_CPP[node.get_dtype()]
def acc_dtype(self, node: ir.Buffer) -> str:
if node.get_dtype() in [torch.float32, torch.bfloat16, torch.half]:
return "float"
else:
raise NotImplementedError(f"Unsupported dtype: {node.get_dtype()}")
def size(self, node: ir.Buffer, dim: int) -> str:
return cexpr_index(self.rename_indexing(node.get_size()[dim]))
def stride(self, node: ir.Buffer, dim: int) -> str:
return cexpr_index(self.rename_indexing(node.get_stride()[dim]))
def index(self, node: ir.Buffer, indices: list[Any]) -> str:
indexer = node.get_layout().as_fixed().make_indexer()
index = indexer(parse_expr_with_index_symbols(indices))
index = self.rename_indexing(index)
outer_name = node.get_name()
inner_name = (
outer_name
if outer_name in self.local_buffers
else self.args.input(node.get_name())
)
return f"{inner_name}[{cexpr_index(index)}]"
def slice_nd(self, node, ranges: list[tuple[Any, Any]]) -> ir.ReinterpretView:
"""
Slice the given node with a list of ranges (start and end) corresponding to its dims.
The dim is not sliced if the corresponding range is empty.
"""
assert len(ranges) == len(node.get_size()), f"{ranges=}, {node=}"
sliced = wrap_with_tensorbox(node)
for dim, _range in enumerate(ranges):
if len(_range) == 0:
continue
assert len(_range) == 2
start, end = parse_expr_with_index_symbols(_range)
sliced = L.slice_(sliced, dim, start, end, clamp=False)
assert isinstance(sliced, ir.TensorBox)
assert isinstance(sliced.data, ir.ReinterpretView), sliced.data
return sliced.data
def select(self, node, dim: int, idx: int) -> ir.ReinterpretView:
# We avoid using L.select here because we need clamp=False so the dim after slicing
# is 1 instead of a sympy expression of symbol - dim_size.
node = wrap_with_tensorbox(node)
idx = ir.View.handle_negative_index(idx, node.get_size()[dim])
sliced = L.squeeze(L.slice_(node, dim, idx, idx + 1, clamp=False), dim)
assert isinstance(sliced.data, ir.ReinterpretView), sliced.data
return sliced.data
def view(self, node, sizes: list[Any]) -> ir.IRNode:
node = wrap_with_tensorbox(node)
sizes = parse_expr_with_index_symbols(sizes)
return L.view(node, sizes).data # type: ignore[arg-type]
def permute(self, node, dims):
node = wrap_with_tensorbox(node)
permuted = L.permute(node, dims).data
assert isinstance(permuted, ir.ReinterpretView)
return permuted
def maybe_codegen_profile(self) -> str:
if config.cpp.enable_kernel_profile:
graph_id = V.graph.graph_id
prefix = "graph_" + str(graph_id) + "_" if graph_id is not None else ""
handle_str = (
"torch::aot_inductor::RAIIAtenRecordFunctionHandle "
f'record_{prefix}{self.kernel_name}_("{prefix}{self.kernel_name}", nullptr);'
)
return handle_str
else:
return ""
def unroll_pragma(self, unroll):
if cpp_builder.is_gcc():
return f"#pragma GCC unroll {unroll}"
else:
return f"#pragma unroll {unroll}"
def define_buffer(self, name, sizes: list[Any], dtype=torch.float) -> str:
"""Define kernel local buffer"""
sizes = parse_expr_with_index_symbols(sizes)
buf = ir.Buffer(
name=name, layout=ir.FixedLayout(torch.device("cpu"), dtype, sizes)
)
self.local_buffers[name] = buf
ctype = f"{DTYPE_TO_CPP[dtype]}"
numel = f"{cexpr_index(buf.get_numel())}"
return f"auto _{name} = std::make_unique<{ctype}[]>({numel}); auto {name} = _{name}.get();"
def define_stack_allocated_buffer(
self, name, sizes: list[Any], dtype=torch.float
) -> str:
"""Define stack-allocated buffer"""
sizes = parse_expr_with_index_symbols(sizes)
buf = ir.Buffer(
name=name, layout=ir.FixedLayout(torch.device("cpu"), dtype, sizes)
)
self.local_buffers[name] = buf
ctype = f"{DTYPE_TO_CPP[dtype]}"
numel = f"{cexpr_index(buf.get_numel())}"
return f"alignas(64) {ctype} _{name}[{numel}]; {ctype}* {name} = _{name};"
def reinit_buffer_if_null(self, name):
"""Reinit the previously defined local buffer if it is null"""
assert name in self.local_buffers
buf = self.local_buffers[name]
ctype = f"{DTYPE_TO_CPP[buf.layout.dtype]}"
numel = f"{cexpr_index(buf.get_numel())}"
return f"if (_{name} == nullptr) {{ _{name} = std::make_unique<{ctype}[]>({numel}); {name} = _{name}.get(); }}"
def release_buffer(self, name):
"""Codegen the code to release the ownership of a local buffer to others"""
assert name in self.local_buffers
return f"_{name}.release()"
def store_pointwise_nodes(
self,
dst: ir.Buffer,
nodes: list[ir.IRNode],
offsets: Optional[list[sympy.Expr]] = None,
reindexers: Optional[list[Optional[Callable[[list[Any]], list[Any]]]]] = None,
) -> str:
var_sizes = (tuple(dst.get_size()), ())
var_ranges = {
sympy_index_symbol_with_prefix(SymT.INDEX, i): sz
for i, sz in enumerate(var_sizes[0])
}
if not offsets:
offsets = [sympy.S.Zero] * len(var_sizes[0])
if not reindexers:
reindexers = [None] * len(nodes)
assert len(offsets) == len(var_sizes[0])
output_index = dst.get_layout().make_indexer()([*var_ranges.keys()])
kernel_group = KernelGroup()
kernel_group.args = self.args
cpp_kernel_proxy = CppKernelProxy(kernel_group)
bodies = []
var_sizes_list = []
for i, node in enumerate(nodes):
output_name = node.get_name() if i < len(nodes) - 1 else dst.get_name()
node = node.data if isinstance(node, ir.ComputedBuffer) else node
assert isinstance(node, ir.Pointwise), node
def fn(*args):
assert len(args) == 2
assert len(args[0]) == len(var_sizes[0])
assert len(args[1]) == 0
new_args = [arg + offset for arg, offset in zip(args[0], offsets)] # type: ignore[arg-type]
if reindexers[i] is not None:
new_args = reindexers[i](new_args) # type: ignore[misc]
V.ops.store(
output_name,
output_index,
node.make_loader()(new_args).value,
)
body = LoopBody(
fn,
(list(var_ranges.keys()), ()),
var_ranges,
list(var_ranges.keys()),
tuple(),
)
bodies.append(body)
var_sizes_list.append(var_sizes)
cpp_kernel_proxy.codegen_loop_bodies(bodies, var_sizes_list)
def max_parallel_depth():
return ParallelDepth(parallel_depth=0, start_depth=0)
# This loop is not parallelized since it is not the outermost loop.
with patch.object(
cpp_kernel_proxy.loop_nest, "max_parallel_depth", max_parallel_depth
):
kernel_group.finalize_kernel(cpp_kernel_proxy, [])
return kernel_group.loops_code.getvalue()
def store_grouped_gemm_pointwise_nodes(
self,
dst: tuple[ir.Buffer],
nodes: list[ir.IRNode],
offsets: list[sympy.Expr],
reindexers: list[Optional[Callable[[list[Any]], list[Any]]]],
output_names: list[str],
) -> str:
ref_dst = dst[0]
var_sizes = (tuple(ref_dst.get_size()), ())
var_ranges = {
sympy_index_symbol_with_prefix(SymT.INDEX, i): sz
for i, sz in enumerate(var_sizes[0])
}
assert offsets, "offsets should be set outside"
assert all(len(offset) == len(var_sizes[0]) for offset in offsets)
output_index = ref_dst.get_layout().make_indexer()([*var_ranges.keys()])
kernel_group = KernelGroup()
kernel_group.args = self.args
cpp_kernel_proxy = CppKernelProxy(kernel_group)
bodies = []
var_sizes_list = []
for i, node in enumerate(nodes):
output_name = output_names[i]
node = node.data if isinstance(node, ir.ComputedBuffer) else node
assert isinstance(node, ir.Pointwise), node
def fn(*args):
assert len(args) == 2
assert len(args[0]) == len(var_sizes[0])
assert len(args[1]) == 0
new_args = [arg + offset for arg, offset in zip(args[0], offsets[i])] # type: ignore[arg-type]
if reindexers[i] is not None:
new_args = reindexers[i](new_args) # type: ignore[misc]
V.ops.store(
output_name,
output_index,
node.make_loader()(new_args).value,
)
body = LoopBody(
fn,
(list(var_ranges.keys()), ()),
var_ranges,
list(var_ranges.keys()),
tuple(),
)
bodies.append(body)
var_sizes_list.append(var_sizes)
cpp_kernel_proxy.codegen_loop_bodies(bodies, var_sizes_list)
def max_parallel_depth():
return ParallelDepth(parallel_depth=0, start_depth=0)
# This loop is not parallelized since it is not the outermost loop.
with patch.object(
cpp_kernel_proxy.loop_nest, "max_parallel_depth", max_parallel_depth
):
kernel_group.finalize_kernel(cpp_kernel_proxy, [])
return kernel_group.loops_code.getvalue()
def store_output(
self,
dst: ir.Buffer,
src: ir.Buffer,
orig_src: Optional[ir.Buffer] = None,
epilogue_nodes: Optional[list[ir.IRNode]] = None,
offsets: Optional[list[Any]] = None,
reindexers: Optional[list[Optional[Callable[[list[Any]], list[Any]]]]] = None,
):
"""
Store the `src` buffer to the `dst` buffer. The size of `src` and `dst` should match.
If `epilogue_nodes` is provided, the `src` buffer is firstly computed with the epilogues
before stored to `dst`. The `epilogues_nodes` are all pointwise.
Notes:
1. `src` and `dst` buffer could be the same buffer in which case we are doing in-place compute
and stores. In case `epilogue_nodes` are not provided, we do nothing.
2. The `epilogue_nodes`, if exist, have computations on `src` before storing to `dst` but since
they come form the original Inductor IR, they might need to be adjusted before working with
`src` and `dst` as outlined below:
a) `src` or `dst` buffer could be a sub-slice of the ranges the `epilogue_nodes`work on.
In this case, the `offsets` could be provided to adjust the indices passed to
`epilogue_nodes` during codegen and the data ranges are also configured according to
the sizes of `src` and `dst`.
b) `dst` might be indexed in a different way as the `epilogue_nodes`, hence a `reindexer` is
needed on the indices to `epilogue_nodes` to match the indexing of `dst`.
c) If `src` is local, we need to add a local buffer for it and localize the `orig_src` buffer
in `epilogue_nodes` with `src`.
"""
assert isinstance(dst, (ir.Buffer, ir.ReinterpretView))
assert dst.get_size() == src.get_size(), f"{dst=}, {src=}"
if offsets:
offsets = parse_expr_with_index_symbols(offsets)
if epilogue_nodes:
with LocalBufferContext(self.args) as scope:
assert orig_src is not None
if orig_src.get_name() != src.get_name():
scope.add_local_buffer(
src,
[
orig_src,
],
)
epilogue_nodes = scope.localize_nodes(epilogue_nodes)
return self.store_pointwise_nodes(
# pyrefly: ignore [bad-argument-type]
dst,
epilogue_nodes, # type: ignore[arg-type]
offsets,
reindexers,
)
else:
if dst.get_name() != src.get_name():
# src is local
copy = L.copy(dst, src).data.data
with LocalBufferContext(self.args) as scope:
scope.add_local_buffer(src)
# pyrefly: ignore [bad-argument-type]
return self.store_pointwise_nodes(dst, [copy])
else:
assert dst.layout == src.layout, f"{dst=}, {src=}"
return ""
def store_outputs(
self,
dst: tuple[ir.Buffer],
src: tuple[ir.IRNode],
orig_src: Optional[tuple[ir.IRNode]] = None,
epilogue_nodes: Optional[list[ir.IRNode]] = None,
offsets: Optional[list[Any]] = None,
reindexers: Optional[list[Optional[Callable[[list[Any]], list[Any]]]]] = None,
multi_output_buffers: Optional[tuple[ir.MultiOutput]] = None,
):
assert isinstance(dst, Iterable)
assert all(_dst.get_size() == _src.get_size() for _src, _dst in zip(src, dst))
if offsets:
offsets = parse_expr_with_index_symbols(offsets)
gemm_num = len(src)
final_offsets = []
output_names = []
if epilogue_nodes:
if not reindexers:
reindexers = [None] * len(epilogue_nodes)
with LocalBufferContext(self.args) as scope:
assert orig_src is not None
localize_epilogue_nodes = []
all_read_names = []
for epilogue in epilogue_nodes:
all_read_names.extend(list(epilogue.get_read_names()))
localize_epilogue_nodes.extend(scope.localize_nodes(epilogue_nodes))
final_offsets.extend([offsets] * len(localize_epilogue_nodes))
output_names.extend(
[node.get_name() for node in localize_epilogue_nodes]
)
for gemm_idx in range(gemm_num):
if orig_src[gemm_idx].get_name() != src[gemm_idx].get_name():
if orig_src[gemm_idx].get_name() in all_read_names or (
multi_output_buffers
and multi_output_buffers[gemm_idx].get_name()
in all_read_names
):
# If any of the Epilogue nodes use this GEMM output, let's localize the GEMM output
global_buffers = [orig_src[gemm_idx]]
if (
multi_output_buffers
and multi_output_buffers[gemm_idx].get_name()
in all_read_names
and orig_src[gemm_idx].get_name() not in all_read_names
):
# Epilogue might directly read the MultiOutput, Locallize MultiOutput to the local Buffer
# if this MultiOutput has not been stored by in-template epilogue
# otherwise, use the cse store cache if it will be stored before used
global_buffers.append(multi_output_buffers[gemm_idx])
scope.add_local_buffer(
src[gemm_idx],
global_buffers,
)
else:
scope.add_local_buffer(src[gemm_idx])
localize_epilogue_nodes.extend(
[L.copy(dst[gemm_idx], src[gemm_idx]).data.data]
)
reindexers.append(None)
output_names.append(dst[gemm_idx].get_name())
final_offsets.append(
[sympy.S.Zero] * len(dst[gemm_idx].get_size())
)
res = self.store_grouped_gemm_pointwise_nodes(
dst,
localize_epilogue_nodes,
final_offsets,
reindexers,
output_names=output_names,
)
for gemm_idx in range(gemm_num):
if (
multi_output_buffers
and multi_output_buffers[gemm_idx].get_name() in all_read_names
):
# If the MultiOutput is used in the Epilogue, let's remove it from args
multi_output_name = multi_output_buffers[gemm_idx].get_name()
if (
multi_output_name in self.args.output_buffers
and self.args.output_buffers[multi_output_name]
is not REMOVED
):
self.remove_buffer(multi_output_name)
return res
else:
if dst[0].get_name() != src[0].get_name():
copy_list = []
with LocalBufferContext(self.args) as scope:
for _src, _dst in zip(src, dst):
copy_list.extend([L.copy(_dst, _src).data.data])
scope.add_local_buffer(_src)
output_names.append(_dst.get_name())
final_offsets.append([sympy.S.Zero] * len(_dst.get_size()))
reindexers = [None] * len(copy_list)
return self.store_grouped_gemm_pointwise_nodes(
dst,
nodes=copy_list,
offsets=final_offsets,
reindexers=reindexers,
output_names=output_names,
)
else:
assert all(
_src.get_name() == _dst.get_name() for _src, _dst in zip(src, dst)
)
assert all(
_src.get_layout() == _dst.get_layout()
for _src, _dst in zip(src, dst)
)
return ""
def check_bounds(self, expr, size, lower, upper):
# CppTemplateKernel does not need codegen related operations
return
|
CppTemplateKernel
|
python
|
astropy__astropy
|
astropy/units/format/generic.py
|
{
"start": 1071,
"end": 12689
}
|
class ____(_ParsingFormatMixin):
"""Provide the parser used by Generic, FITS and VOUnit."""
_tokens: ClassVar[tuple[str, ...]] = (
"COMMA",
"POWER",
"PRODUCT",
"DIVISION",
"OPEN_PAREN",
"CLOSE_PAREN",
"FUNCNAME",
"UNIT",
"SIGN",
"UINT",
"UFLOAT",
)
@classproperty(lazy=True)
def _lexer(cls) -> Lexer:
tokens = cls._tokens
t_COMMA = r"\,"
t_PRODUCT = "[*.]"
t_DIVISION = "/"
t_POWER = r"\^|(\*\*)"
t_OPEN_PAREN = r"\("
t_CLOSE_PAREN = r"\)"
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r"((\d+\.?\d*)|(\.\d+))([eE][+-]?\d+)?"
if not re.search(r"[eE\.]", t.value):
t.type = "UINT"
t.value = int(t.value)
elif t.value.endswith("."):
t.type = "UINT"
t.value = int(t.value[:-1])
else:
t.value = float(t.value)
return t
def t_UINT(t):
r"\d+"
t.value = int(t.value)
return t
def t_SIGN(t):
r"[+-](?=\d)"
t.value = int(t.value + "1")
return t
# This needs to be a function so we can force it to happen
# before t_UNIT
def t_FUNCNAME(t):
r"((sqrt)|(ln)|(exp)|(log)|(mag)|(dB)|(dex))(?=\ *\()"
return t
# A possible unit is something that consists of characters not used
# for anything else: no spaces, no digits, signs, periods, stars,
# carets, parentheses or commas.
def t_UNIT(t):
r"[^\s\d+\-\./\*\^\(\)\,]+"
t.value = cls._get_unit(t)
return t
t_ignore = " "
# Error handling rule
def t_error(t):
raise ValueError(f"Invalid character at col {t.lexpos}")
return parsing.lex(
lextab="generic_lextab", package="astropy/units", reflags=int(re.UNICODE)
)
@classproperty(lazy=True)
def _parser(cls) -> ThreadSafeParser:
"""
The grammar here is based on the description in the `FITS
standard
<http://fits.gsfc.nasa.gov/standard30/fits_standard30aa.pdf>`_,
Section 4.3, which is not terribly precise. The exact grammar
is here is based on the YACC grammar in the `unity library
<https://bitbucket.org/nxg/unity/>`_.
This same grammar is used by the `"fits"` and `"vounit"`
formats, the only difference being the set of available unit
strings.
"""
tokens = cls._tokens
def p_main(p):
"""
main : unit
| structured_unit
| structured_subunit
"""
if isinstance(p[1], tuple):
# Unpack possible StructuredUnit inside a tuple, ie.,
# ignore any set of very outer parentheses.
p[0] = p[1][0]
else:
p[0] = p[1]
def p_structured_subunit(p):
"""
structured_subunit : OPEN_PAREN structured_unit CLOSE_PAREN
"""
# We hide a structured unit enclosed by parentheses inside
# a tuple, so that we can easily distinguish units like
# "(au, au/day), yr" from "au, au/day, yr".
p[0] = (p[2],)
def p_structured_unit(p):
"""
structured_unit : subunit COMMA
| subunit COMMA subunit
"""
from astropy.units.structured import StructuredUnit
inputs = (p[1],) if len(p) == 3 else (p[1], p[3])
units = ()
for subunit in inputs:
if isinstance(subunit, tuple):
# Structured unit that should be its own entry in the
# new StructuredUnit (was enclosed in parentheses).
units += subunit
elif isinstance(subunit, StructuredUnit):
# Structured unit whose entries should be
# individually added to the new StructuredUnit.
units += subunit.values()
else:
# Regular unit to be added to the StructuredUnit.
units += (subunit,)
p[0] = StructuredUnit(units)
def p_subunit(p):
"""
subunit : unit
| structured_unit
| structured_subunit
"""
p[0] = p[1]
def p_unit(p):
"""
unit : product_of_units
| factor product_of_units
| factor PRODUCT product_of_units
| division_product_of_units
| factor division_product_of_units
| factor PRODUCT division_product_of_units
| inverse_unit
| factor inverse_unit
| factor PRODUCT inverse_unit
| factor
"""
if len(p) == 2:
p[0] = Unit(p[1])
elif len(p) == 3:
p[0] = CompositeUnit(p[1] * p[2].scale, p[2].bases, p[2].powers)
elif len(p) == 4:
p[0] = CompositeUnit(p[1] * p[3].scale, p[3].bases, p[3].powers)
def p_division_product_of_units(p):
"""
division_product_of_units : division_product_of_units DIVISION product_of_units
| product_of_units
"""
if len(p) == 4:
p[0] = Unit(p[1] / p[3])
else:
p[0] = p[1]
def p_inverse_unit(p):
"""
inverse_unit : DIVISION unit_expression
"""
p[0] = p[2] ** -1
def p_factor(p):
"""
factor : factor_fits
| factor_float
| factor_int
"""
p[0] = p[1]
def p_factor_float(p):
"""
factor_float : signed_float
| signed_float UINT signed_int
| signed_float UINT POWER numeric_power
"""
if cls.name == "fits":
raise ValueError("Numeric factor not supported by FITS")
if len(p) == 4:
p[0] = p[1] * p[2] ** float(p[3])
elif len(p) == 5:
p[0] = p[1] * p[2] ** float(p[4])
elif len(p) == 2:
p[0] = p[1]
def p_factor_int(p):
"""
factor_int : UINT
| UINT signed_int
| UINT POWER numeric_power
| UINT UINT signed_int
| UINT UINT POWER numeric_power
"""
if cls.name == "fits":
raise ValueError("Numeric factor not supported by FITS")
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] ** float(p[2])
elif len(p) == 4:
if isinstance(p[2], int):
p[0] = p[1] * p[2] ** float(p[3])
else:
p[0] = p[1] ** float(p[3])
elif len(p) == 5:
p[0] = p[1] * p[2] ** p[4]
def p_factor_fits(p):
"""
factor_fits : UINT POWER OPEN_PAREN signed_int CLOSE_PAREN
| UINT POWER OPEN_PAREN UINT CLOSE_PAREN
| UINT POWER signed_int
| UINT POWER UINT
| UINT SIGN UINT
| UINT OPEN_PAREN signed_int CLOSE_PAREN
"""
if p[1] != 10:
if cls.name == "fits":
raise ValueError("Base must be 10")
else:
return
if len(p) == 4:
if p[2] in ("**", "^"):
p[0] = 10 ** p[3]
else:
p[0] = 10 ** (p[2] * p[3])
elif len(p) == 5:
p[0] = 10 ** p[3]
elif len(p) == 6:
p[0] = 10 ** p[4]
def p_product_of_units(p):
"""
product_of_units : unit_expression PRODUCT product_of_units
| unit_expression product_of_units
| unit_expression
"""
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] * p[2]
else:
p[0] = p[1] * p[3]
def p_unit_expression(p):
"""
unit_expression : function
| unit_with_power
| OPEN_PAREN product_of_units CLOSE_PAREN
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
def p_unit_with_power(p):
"""
unit_with_power : UNIT POWER numeric_power
| UNIT numeric_power
| UNIT
"""
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] ** p[2]
else:
p[0] = p[1] ** p[3]
def p_numeric_power(p):
"""
numeric_power : sign UINT
| OPEN_PAREN paren_expr CLOSE_PAREN
"""
if len(p) == 3:
p[0] = p[1] * p[2]
elif len(p) == 4:
p[0] = p[2]
def p_paren_expr(p):
"""
paren_expr : sign UINT
| signed_float
| frac
"""
if len(p) == 3:
p[0] = p[1] * p[2]
else:
p[0] = p[1]
def p_frac(p):
"""
frac : sign UINT DIVISION sign UINT
"""
p[0] = Fraction(p[1] * p[2], p[4] * p[5])
def p_sign(p):
"""
sign : SIGN
|
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1
def p_signed_int(p):
"""
signed_int : SIGN UINT
"""
p[0] = p[1] * p[2]
def p_signed_float(p):
"""
signed_float : sign UINT
| sign UFLOAT
"""
p[0] = p[1] * p[2]
def p_function(p):
"""
function : FUNCNAME OPEN_PAREN main CLOSE_PAREN
"""
if p[1] == "sqrt":
p[0] = p[3] ** 0.5
return
elif p[1] in ("mag", "dB", "dex"):
try:
function_unit = cls._validate_unit(p[1])
except KeyError:
raise ValueError(cls._invalid_unit_error_message(p[1])) from None
# In Generic, this is callable, but that does not have to
# be the case in subclasses (e.g., in VOUnit it is not).
if callable(function_unit):
p[0] = function_unit(p[3])
return
raise ValueError(f"'{p[1]}' is not a recognized function")
def p_error(p):
raise ValueError()
return parsing.yacc(tabmodule="generic_parsetab", package="astropy/units")
|
_GenericParserMixin
|
python
|
django__django
|
tests/check_framework/test_security.py
|
{
"start": 357,
"end": 2547
}
|
class ____(SimpleTestCase):
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=[],
)
def test_session_cookie_secure_with_installed_app(self):
"""
Warn if SESSION_COOKIE_SECURE is off and "django.contrib.sessions" is
in INSTALLED_APPS.
"""
self.assertEqual(sessions.check_session_cookie_secure(None), [sessions.W010])
@override_settings(
SESSION_COOKIE_SECURE="1",
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=[],
)
def test_session_cookie_secure_with_installed_app_truthy(self):
"""SESSION_COOKIE_SECURE must be boolean."""
self.assertEqual(sessions.check_session_cookie_secure(None), [sessions.W010])
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=[],
MIDDLEWARE=["django.contrib.sessions.middleware.SessionMiddleware"],
)
def test_session_cookie_secure_with_middleware(self):
"""
Warn if SESSION_COOKIE_SECURE is off and
"django.contrib.sessions.middleware.SessionMiddleware" is in
MIDDLEWARE.
"""
self.assertEqual(sessions.check_session_cookie_secure(None), [sessions.W011])
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=["django.contrib.sessions.middleware.SessionMiddleware"],
)
def test_session_cookie_secure_both(self):
"""
If SESSION_COOKIE_SECURE is off and we find both the session app and
the middleware, provide one common warning.
"""
self.assertEqual(sessions.check_session_cookie_secure(None), [sessions.W012])
@override_settings(
SESSION_COOKIE_SECURE=True,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=["django.contrib.sessions.middleware.SessionMiddleware"],
)
def test_session_cookie_secure_true(self):
"""
If SESSION_COOKIE_SECURE is on, there's no warning about it.
"""
self.assertEqual(sessions.check_session_cookie_secure(None), [])
|
CheckSessionCookieSecureTest
|
python
|
getsentry__sentry
|
src/sentry/tasks/store.py
|
{
"start": 1676,
"end": 2806
}
|
class ____(Exception):
pass
def should_process(data: Mapping[str, Any]) -> bool:
"""Quick check if processing is needed at all."""
from sentry.plugins.base import plugins
if data.get("type") == "transaction":
return False
for plugin in plugins.all(version=2):
processors = safe_execute(plugin.get_event_preprocessors, data=data)
if processors:
return True
if should_process_for_stacktraces(data):
return True
return False
def submit_process(
from_reprocessing: bool,
cache_key: str,
event_id: str | None,
start_time: float | None,
data_has_changed: bool = False,
from_symbolicate: bool = False,
has_attachments: bool = False,
) -> None:
if from_reprocessing:
task = process_event_from_reprocessing
else:
task = process_event
task.delay(
cache_key=cache_key,
start_time=start_time,
event_id=event_id,
data_has_changed=data_has_changed,
from_symbolicate=from_symbolicate,
has_attachments=has_attachments,
)
@dataclass(frozen=True)
|
RetryProcessing
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/declarative_automation/serialized_objects.py
|
{
"start": 7088,
"end": 7589
}
|
class ____(Generic[T_EntityKey]):
"""A union of an AutomatConditionEvaluation and the set of run IDs that have been launched in
response to it.
"""
evaluation: AutomationConditionEvaluation[T_EntityKey]
run_ids: frozenset[str]
@property
def key(self) -> T_EntityKey:
return self.evaluation.key
@property
def num_requested(self) -> int:
return self.evaluation.true_subset.size
@whitelist_for_serdes
@dataclass
|
AutomationConditionEvaluationWithRunIds
|
python
|
huggingface__transformers
|
src/transformers/models/owlvit/modeling_owlvit.py
|
{
"start": 41074,
"end": 49664
}
|
class ____(OwlViTPreTrainedModel):
config: OwlViTConfig
def __init__(self, config: OwlViTConfig):
super().__init__(config)
if not isinstance(config.text_config, OwlViTTextConfig):
raise TypeError(
"config.text_config is expected to be of type OwlViTTextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.vision_config, OwlViTVisionConfig):
raise TypeError(
"config.vision_config is expected to be of type OwlViTVisionConfig but is of type"
f" {type(config.vision_config)}."
)
text_config = config.text_config
vision_config = config.vision_config
self.projection_dim = config.projection_dim
self.text_embed_dim = text_config.hidden_size
self.vision_embed_dim = vision_config.hidden_size
self.text_model = OwlViTTextTransformer(text_config)
self.vision_model = OwlViTVisionTransformer(vision_config)
self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
self.logit_scale = nn.Parameter(torch.tensor(config.logit_scale_init_value))
# Initialize weights and apply final processing
self.post_init()
@filter_out_non_signature_kwargs()
@auto_docstring
def get_text_features(
self,
input_ids: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
) -> torch.FloatTensor:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
IDs?](../glossary#input-ids)
Returns:
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
applying the projection layer to the pooled output of [`OwlViTTextModel`].
Examples:
```python
>>> import torch
>>> from transformers import AutoProcessor, OwlViTModel
>>> model = OwlViTModel.from_pretrained("google/owlvit-base-patch32")
>>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32")
>>> inputs = processor(
... text=[["a photo of a cat", "a photo of a dog"], ["photo of a astranaut"]], return_tensors="pt"
... )
>>> with torch.inference_mode():
... text_features = model.get_text_features(**inputs)
```"""
# Get embeddings for all text queries in all batch samples
text_outputs: BaseModelOutputWithPooling = self.text_model(input_ids=input_ids, attention_mask=attention_mask)
text_features = self.text_projection(text_outputs.pooler_output)
return text_features
@filter_out_non_signature_kwargs()
@auto_docstring
def get_image_features(
self,
pixel_values: torch.Tensor,
interpolate_pos_encoding: bool = False,
) -> torch.FloatTensor:
r"""
Returns:
image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
applying the projection layer to the pooled output of [`OwlViTVisionModel`].
Examples:
```python
>>> import torch
>>> from transformers.image_utils import load_image
>>> from transformers import AutoProcessor, OwlViTModel
>>> model = OwlViTModel.from_pretrained("google/owlvit-base-patch32")
>>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = load_image(url)
>>> inputs = processor(images=image, return_tensors="pt")
>>> with torch.inference_mode():
... image_features = model.get_image_features(**inputs)
```"""
vision_outputs: BaseModelOutputWithPooling = self.vision_model(
pixel_values=pixel_values,
interpolate_pos_encoding=interpolate_pos_encoding,
)
image_features = self.visual_projection(vision_outputs.pooler_output)
return image_features
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
return_loss: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
interpolate_pos_encoding: bool = False,
return_base_image_embeds: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, OwlViTOutput]:
r"""
return_loss (`bool`, *optional*):
Whether or not to return the contrastive loss.
return_base_image_embeds (`bool`, *optional*):
Whether or not to return the base image embeddings.
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, OwlViTModel
>>> model = OwlViTModel.from_pretrained("google/owlvit-base-patch32")
>>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(text=[["a photo of a cat", "a photo of a dog"]], images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
```"""
# Use OWL-ViT model's config for some fields (if specified) instead of those of vision & text components.
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_outputs = self.vision_model(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
interpolate_pos_encoding=interpolate_pos_encoding,
return_dict=return_dict,
)
# Get embeddings for all text queries in all batch samples
text_outputs = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
text_embeds = text_outputs[1]
text_embeds = self.text_projection(text_embeds)
image_embeds = vision_outputs[1]
image_embeds = self.visual_projection(image_embeds)
# normalized features
image_embeds = image_embeds / torch.linalg.norm(image_embeds, ord=2, dim=-1, keepdim=True)
text_embeds_norm = text_embeds / torch.linalg.norm(text_embeds, ord=2, dim=-1, keepdim=True)
# cosine similarity as logits and set it on the correct device
logit_scale = self.logit_scale.exp().to(image_embeds.device)
logits_per_text = torch.matmul(text_embeds_norm, image_embeds.t()) * logit_scale
logits_per_image = logits_per_text.t()
loss = None
if return_loss:
loss = owlvit_loss(logits_per_text)
text_embeds = text_embeds_norm
if not return_dict:
output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
return ((loss,) + output) if loss is not None else output
return OwlViTOutput(
loss=loss,
logits_per_image=logits_per_image,
logits_per_text=logits_per_text,
text_embeds=text_embeds,
image_embeds=image_embeds,
text_model_output=text_outputs,
vision_model_output=vision_outputs,
)
|
OwlViTModel
|
python
|
kamyu104__LeetCode-Solutions
|
Python/minimum-increment-to-make-array-unique.py
|
{
"start": 33,
"end": 562
}
|
class ____(object):
def minIncrementForUnique(self, A):
"""
:type A: List[int]
:rtype: int
"""
A.sort()
A.append(float("inf"))
result, duplicate = 0, 0
for i in xrange(1, len(A)):
if A[i-1] == A[i]:
duplicate += 1
result -= A[i]
else:
move = min(duplicate, A[i]-A[i-1]-1)
duplicate -= move
result += move*A[i-1] + move*(move+1)//2
return result
|
Solution
|
python
|
django__django
|
tests/migrations/test_migrations_squashed_loop/2_squashed.py
|
{
"start": 35,
"end": 229
}
|
class ____(migrations.Migration):
replaces = [("migrations", "2_auto")]
dependencies = [("migrations", "1_auto")]
operations = [migrations.RunPython(migrations.RunPython.noop)]
|
Migration
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_shuffle.py
|
{
"start": 30286,
"end": 35452
}
|
class ____(BaseSetIndexSortValues):
_parameters = [
"frame",
"by",
"ascending",
"na_position",
"npartitions",
"partition_size",
"sort_function",
"sort_function_kwargs",
"upsample",
"ignore_index",
"shuffle_method",
"options", # Options for the chosen shuffle method
]
_defaults = {
"partition_size": 128e6,
"ascending": True,
"npartitions": None,
"na_position": "last",
"sort_function": None,
"sort_function_kwargs": None,
"upsample": 1.0,
"ignore_index": False,
"shuffle_method": None,
}
_filter_passthrough = True
def _divisions(self):
if self.frame.npartitions == 1:
# Protect against triggering calculations when we only have one division
return (None, None)
divisions, mins, maxes, presorted = _get_divisions(
self.frame,
self.frame[self.by[0]],
self._npartitions_input,
self._divisions_ascending,
upsample=self.upsample,
)
if presorted:
return self.frame.divisions
return (None,) * len(divisions)
@property
def _divisions_ascending(self) -> bool:
divisions_ascending = self.ascending
if not isinstance(divisions_ascending, bool):
divisions_ascending = divisions_ascending[0]
assert isinstance(divisions_ascending, bool)
return divisions_ascending
@property
def sort_function(self):
if self.operand("sort_function") is not None:
return self.operand("sort_function")
return M.sort_values
@property
def sort_function_kwargs(self):
sort_kwargs = {
"by": self.by,
"ascending": self.ascending,
"na_position": self.na_position,
"ignore_index": self.ignore_index,
}
if self.operand("sort_function_kwargs") is not None:
sort_kwargs.update(self.operand("sort_function_kwargs"))
return sort_kwargs
@functools.cached_property
def _meta(self):
return self.frame._meta
@functools.cached_property
def _meta_by_dtype(self):
dtype = self._meta.dtypes[self.by]
if is_series_like(dtype):
dtype = dtype.iloc[0]
return dtype
def _lower(self):
if self.frame.npartitions == 1:
return SortValuesBlockwise(
self.frame, self.sort_function, self.sort_function_kwargs
)
_divisions_by = self.frame[self.by[0]]
divisions, _, _, presorted = _get_divisions(
self.frame,
_divisions_by,
self._npartitions_input,
self._divisions_ascending,
upsample=self.upsample,
)
if presorted and self.npartitions == self.frame.npartitions:
return SortValuesBlockwise(
self.frame, self.sort_function, self.sort_function_kwargs
)
partitions = _SetPartitionsPreSetIndex(
_divisions_by,
_divisions_by._meta._constructor(divisions).sort_values(),
ascending=self._divisions_ascending,
)
assigned = Assign(self.frame, "_partitions", partitions)
shuffled = Shuffle(
assigned,
"_partitions",
npartitions_out=len(divisions) - 1,
ignore_index=self.ignore_index,
method=self.shuffle_method,
options=self.options,
)
shuffled = Projection(shuffled, self.frame.columns)
return SortValuesBlockwise(
shuffled, self.sort_function, self.sort_function_kwargs
)
def _simplify_up(self, parent, dependents):
from dask.dataframe.dask_expr._expr import Filter, Head, Tail
if isinstance(parent, Head):
return NFirst(
self.frame, n=parent.n, _columns=self.by, ascending=self.ascending
)
if isinstance(parent, Tail):
return NLast(
self.frame, n=parent.n, _columns=self.by, ascending=self.ascending
)
if isinstance(parent, Filter) and self._filter_passthrough_available(
parent, dependents
):
return self._filter_simplification(parent)
if isinstance(parent, Projection):
columns = determine_column_projection(
self, parent, dependents, additional_columns=self.by
)
columns = _convert_to_list(columns)
columns = [col for col in self.frame.columns if col in columns]
if self.frame.columns == columns:
return
return type(parent)(
type(self)(self.frame[columns], *self.operands[1:]),
parent.operand("columns"),
)
if (
isinstance(parent, Repartition)
and parent.operand("new_partitions") is not None
):
return type(self)(
type(parent)(self.frame, *parent.operands[1:]), *self.operands[1:]
)
|
SortValues
|
python
|
pytorch__pytorch
|
torch/distributed/elastic/events/api.py
|
{
"start": 626,
"end": 1685
}
|
class ____:
"""
The class represents the generic event that occurs during the torchelastic job execution.
The event can be any kind of meaningful action.
Args:
name: event name.
source: the event producer, e.g. agent or worker
timestamp: timestamp in milliseconds when event occurred.
metadata: additional data that is associated with the event.
"""
name: str
source: EventSource
timestamp: int = 0
metadata: dict[str, EventMetadataValue] = field(default_factory=dict)
def __str__(self):
return self.serialize()
@staticmethod
def deserialize(data: Union[str, "Event"]) -> "Event":
if isinstance(data, Event):
return data
if isinstance(data, str):
data_dict = json.loads(data)
data_dict["source"] = EventSource[data_dict["source"]] # type: ignore[possibly-undefined]
# pyrefly: ignore [unbound-name]
return Event(**data_dict)
def serialize(self) -> str:
return json.dumps(asdict(self))
|
Event
|
python
|
dask__distributed
|
distributed/diagnostics/plugin.py
|
{
"start": 20528,
"end": 21815
}
|
class ____:
INSTALLER = "conda"
packages: list[str]
conda_options: list[str]
def __init__(self, packages: list[str], conda_options: list[str] | None = None):
self.packages = packages
self.conda_options = conda_options or []
def __call__(self) -> None:
logger.info(
"%s installing the following packages: %s",
self.INSTALLER,
self.packages,
)
try:
from conda.cli.python_api import Commands, run_command
except ModuleNotFoundError as e: # pragma: nocover
msg = (
"conda install failed because conda could not be found. "
"Please make sure that conda is installed."
)
logger.error(msg)
raise RuntimeError(msg) from e
try:
_, stderr, returncode = run_command(
Commands.INSTALL, self.conda_options + self.packages
)
except Exception as e:
msg = "conda install failed"
logger.error(msg)
raise RuntimeError(msg) from e
if returncode != 0:
msg = f"conda install failed with '{stderr.decode().strip()}'"
logger.error(msg)
raise RuntimeError(msg)
|
_CondaInstaller
|
python
|
spack__spack
|
lib/spack/spack/vendor/ruamel/yaml/constructor.py
|
{
"start": 2070,
"end": 2139
}
|
class ____(MarkedYAMLFutureWarning):
pass
|
DuplicateKeyFutureWarning
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/elements/plotly_chart_test.py
|
{
"start": 982,
"end": 10908
}
|
class ____(DeltaGeneratorTestCase):
def test_basic(self):
"""Test that plotly object works."""
df = px.data.gapminder().query("country=='Canada'")
fig = px.line(df, x="year", y="lifeExp", title="Life expectancy in Canada")
st.plotly_chart(fig)
el = self.get_delta_from_queue().new_element
assert el.plotly_chart.spec != ""
assert el.plotly_chart.config != ""
# Check that deprecated properties are empty
assert el.plotly_chart.figure.spec == ""
assert el.plotly_chart.figure.config == ""
assert not el.plotly_chart.HasField("url")
@parameterized.expand(
[
("streamlit", "streamlit"),
(None, ""),
]
)
def test_theme(self, theme_value, proto_value):
df = px.data.gapminder().query("country=='Canada'")
fig = px.line(df, x="year", y="lifeExp", title="Life expectancy in Canada")
st.plotly_chart(fig, theme=theme_value)
el = self.get_delta_from_queue().new_element
assert el.plotly_chart.theme == proto_value
def test_bad_theme(self):
df = px.data.gapminder().query("country=='Canada'")
fig = px.line(df, x="year", y="lifeExp", title="Life expectancy in Canada")
with pytest.raises(StreamlitAPIException) as exc:
st.plotly_chart(fig, theme="bad_theme")
assert str(exc.value) == (
'You set theme="bad_theme" while Streamlit charts only support '
"theme=”streamlit” or theme=None to fallback to the default library theme."
)
def test_st_plotly_chart_simple(self):
"""Test st.plotly_chart."""
import plotly.graph_objs as go
trace0 = go.Scatter(x=[1, 2, 3, 4], y=[10, 15, 13, 17])
data = [trace0]
st.plotly_chart(data)
el = self.get_delta_from_queue().new_element
assert not el.plotly_chart.HasField("url")
assert el.plotly_chart.spec != ""
assert el.plotly_chart.config != ""
def test_works_with_element_replay(self):
"""Test that element replay works for plotly if used as non-widget element."""
import plotly.graph_objs as go
trace0 = go.Scatter(x=[1, 2, 3, 4], y=[10, 15, 13, 17])
data = [trace0]
@st.cache_data
def cache_element():
st.plotly_chart(data)
with patch(
"streamlit.runtime.caching.cache_utils.replay_cached_messages",
wraps=cached_message_replay.replay_cached_messages,
) as replay_cached_messages_mock:
cache_element()
el = self.get_delta_from_queue().new_element.plotly_chart
assert el.spec != ""
# The first time the cached function is called, the replay function is not called
replay_cached_messages_mock.assert_not_called()
cache_element()
el = self.get_delta_from_queue().new_element.plotly_chart
assert el.spec != ""
# The second time the cached function is called, the replay function is called
replay_cached_messages_mock.assert_called_once()
cache_element()
el = self.get_delta_from_queue().new_element.plotly_chart
assert el.spec != ""
# The third time the cached function is called, the replay function is called
replay_cached_messages_mock.assert_called()
@parameterized.expand(
[
("rerun", [0, 1, 2]),
("ignore", []),
(lambda: None, [0, 1, 2]),
]
)
def test_st_plotly_chart_valid_on_select(self, on_select, proto_value):
import plotly.graph_objs as go
trace0 = go.Scatter(x=[1, 2, 3, 4], y=[10, 15, 13, 17])
data = [trace0]
st.plotly_chart(data, on_select=on_select)
el = self.get_delta_from_queue().new_element
assert el.plotly_chart.selection_mode == proto_value
assert el.plotly_chart.form_id == ""
def test_plotly_chart_on_select_initial_returns(self):
"""Test st.plotly_chart returns an empty selection as initial result."""
import plotly.graph_objs as go
trace0 = go.Scatter(x=[1, 2, 3, 4], y=[10, 15, 13, 17])
data = [trace0]
selection = st.plotly_chart(data, on_select="rerun", key="plotly_chart")
assert selection.selection.points == []
assert selection.selection.box == []
assert selection.selection.lasso == []
assert selection.selection.point_indices == []
# Check that the selection state is added to the session state:
assert st.session_state.plotly_chart.selection.points == []
assert st.session_state.plotly_chart.selection.box == []
assert st.session_state.plotly_chart.selection.lasso == []
assert st.session_state.plotly_chart.selection.point_indices == []
def test_st_plotly_chart_invalid_on_select(self):
import plotly.graph_objs as go
trace0 = go.Scatter(x=[1, 2, 3, 4], y=[10, 15, 13, 17])
data = [trace0]
with pytest.raises(StreamlitAPIException):
st.plotly_chart(data, on_select="invalid")
@patch("streamlit.runtime.Runtime.exists", MagicMock(return_value=True))
def test_inside_form_on_select_rerun(self):
"""Test that form id is marshalled correctly inside of a form."""
import plotly.graph_objs as go
with st.form("form"):
trace0 = go.Scatter(x=[1, 2, 3, 4], y=[10, 15, 13, 17])
data = [trace0]
st.plotly_chart(data, on_select="rerun")
# 2 elements will be created: form block, plotly_chart
assert len(self.get_all_deltas_from_queue()) == 2
form_proto = self.get_delta_from_queue(0).add_block
plotly_proto = self.get_delta_from_queue(1).new_element.plotly_chart
assert plotly_proto.form_id == form_proto.form.form_id
@patch("streamlit.runtime.Runtime.exists", MagicMock(return_value=True))
def test_inside_form_on_select_ignore(self):
"""Test that form id is marshalled correctly inside of a form."""
import plotly.graph_objs as go
with st.form("form"):
trace0 = go.Scatter(x=[1, 2, 3, 4], y=[10, 15, 13, 17])
data = [trace0]
st.plotly_chart(data, on_select="ignore")
# 2 elements will be created: form block, plotly_chart
assert len(self.get_all_deltas_from_queue()) == 2
form_proto = self.get_delta_from_queue(0).add_block
plotly_proto = self.get_delta_from_queue(1).new_element.plotly_chart
assert plotly_proto.form_id == form_proto.form.form_id
def test_shows_cached_widget_replay_warning(self):
"""Test that a warning is shown when this is used with selections activated
inside a cached function."""
import plotly.graph_objs as go
trace0 = go.Scatter(x=[1, 2, 3, 4], y=[10, 15, 13, 17])
data = [trace0]
st.cache_data(lambda: st.plotly_chart(data, on_select="rerun"))()
# The widget itself is still created, so we need to go back one element more:
el = self.get_delta_from_queue(-2).new_element.exception
assert el.type == "CachedWidgetWarning"
assert el.is_warning
def test_selection_mode_parsing(self):
"""Test that the selection_mode parameter is parsed correctly."""
import plotly.graph_objs as go
trace0 = go.Scatter(x=[1, 2, 3, 4], y=[10, 15, 13, 17])
data = [trace0]
st.plotly_chart(data, on_select="rerun", selection_mode="points")
el = self.get_delta_from_queue().new_element
assert el.plotly_chart.selection_mode == [0]
st.plotly_chart(data, on_select="rerun", selection_mode=("points", "lasso"))
el = self.get_delta_from_queue().new_element
assert el.plotly_chart.selection_mode == [0, 2]
st.plotly_chart(data, on_select="rerun", selection_mode={"box", "lasso"})
el = self.get_delta_from_queue().new_element
assert el.plotly_chart.selection_mode == [1, 2]
# If selections are deactivated, the selection mode list should be empty
# even if the selection_mode parameter is set.
st.plotly_chart(data, on_select="ignore", selection_mode={"box", "lasso"})
el = self.get_delta_from_queue().new_element
assert el.plotly_chart.selection_mode == []
st.plotly_chart(
data, on_select=lambda: None, selection_mode=["points", "box", "lasso"]
)
el = self.get_delta_from_queue().new_element
assert el.plotly_chart.selection_mode == [0, 1, 2]
# Should throw an exception of the selection mode is parsed wrongly
with pytest.raises(StreamlitAPIException):
st.plotly_chart(data, on_select="rerun", selection_mode=["invalid", "box"])
def test_plotly_config(self):
"""Test st.plotly_chart config dict parameter."""
import plotly.graph_objs as go
trace0 = go.Scatter(x=[1, 2, 3, 4], y=[10, 15, 13, 17])
data = [trace0]
config = {"displayModeBar": False, "responsive": True}
st.plotly_chart(data, config=config)
el = self.get_delta_from_queue().new_element
assert el.plotly_chart.config != ""
assert '"displayModeBar": false' in el.plotly_chart.config
assert '"responsive": true' in el.plotly_chart.config
def test_show_deprecation_warning_for_kwargs(self):
import plotly.graph_objs as go
trace0 = go.Scatter(x=[1, 2, 3, 4], y=[10, 15, 13, 17])
data = [trace0]
st.plotly_chart(data, sharing="streamlit")
# Get the second to last element, which should be deprecation warning
el = self.get_delta_from_queue(-2).new_element
assert (
"have been deprecated and will be removed in a future release"
in el.alert.body
)
|
PyDeckTest
|
python
|
django__django
|
tests/syndication_tests/feeds.py
|
{
"start": 4488,
"end": 4842
}
|
class ____(TestRss2Feed):
"""
A feed to test defining item titles and descriptions with templates.
"""
title_template = "syndication/title.html"
description_template = "syndication/description.html"
# Defining a template overrides any item_title definition
def item_title(self):
return "Not in a template"
|
TemplateFeed
|
python
|
pydantic__pydantic
|
pydantic/v1/errors.py
|
{
"start": 12932,
"end": 13068
}
|
class ____(PydanticValueError):
code = 'date.not_in_the_future'
msg_template = 'date is not in the future'
|
DateNotInTheFutureError
|
python
|
walkccc__LeetCode
|
solutions/2961. Double Modular Exponentiation/2961.py
|
{
"start": 0,
"end": 230
}
|
class ____:
def getGoodIndices(
self,
variables: list[list[int]],
target: int,
) -> list[int]:
return [i for i, (a, b, c, m) in enumerate(variables)
if pow(pow(a, b, 10), c, m) == target]
|
Solution
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/layers/merge.py
|
{
"start": 10399,
"end": 11242
}
|
class ____(_Merge):
"""Layer that multiplies (element-wise) a list of inputs.
It takes as input a list of tensors, all of the same shape, and returns
a single tensor (also of the same shape).
>>> tf.keras.layers.Multiply()([np.arange(5).reshape(5, 1),
... np.arange(5, 10).reshape(5, 1)])
<tf.Tensor: shape=(5, 1), dtype=int64, numpy=
array([[ 0],
[ 6],
[14],
[24],
[36]])>
>>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))
>>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))
>>> multiplied = tf.keras.layers.Multiply()([x1, x2])
>>> multiplied.shape
TensorShape([5, 8])
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = output * inputs[i]
return output
|
Multiply
|
python
|
ansible__ansible
|
test/units/module_utils/facts/test_facts.py
|
{
"start": 2629,
"end": 2809
}
|
class ____(BaseTestFactsPlatform):
platform_id = 'GNU'
fact_class = hardware.hurd.HurdHardware
collector_class = hardware.hurd.HurdHardwareCollector
|
TestHurdFactsPlatform
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/base/llms/types.py
|
{
"start": 14534,
"end": 15075
}
|
class ____(BaseModel):
"""A representation of cited content from past messages."""
block_type: Literal["citation"] = "citation"
cited_content: Annotated[
Union[TextBlock, ImageBlock], Field(discriminator="block_type")
]
source: str
title: str
additional_location_info: Dict[str, int]
@field_validator("cited_content", mode="before")
@classmethod
def validate_cited_content(cls, v: Any) -> Any:
if isinstance(v, str):
return TextBlock(text=v)
return v
|
CitationBlock
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/overloadOverlap1.py
|
{
"start": 6604,
"end": 6638
}
|
class ____(E1, Generic[TE1]): ...
|
E2
|
python
|
google__pytype
|
pytype/rewrite/overlays/special_builtins_test.py
|
{
"start": 362,
"end": 831
}
|
class ____(SpecialBuiltinsTest):
def test_types_match(self):
assert_type_func = self.load_builtin_function('assert_type')
var = self.ctx.consts[0].to_variable()
typ = abstract.SimpleClass(self.ctx, 'int', {}).to_variable()
ret = assert_type_func.call(abstract.Args(posargs=(var, typ)))
self.assertEqual(ret.get_return_value(), self.ctx.consts[None])
self.assertEqual(len(self.ctx.errorlog), 0) # pylint: disable=g-generic-assert
|
AssertTypeTest
|
python
|
huggingface__transformers
|
src/transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py
|
{
"start": 2764,
"end": 4061
}
|
class ____(PreTrainedModel):
config: Qwen3OmniMoeConfig
base_model_prefix = "model"
input_modalities = ("image", "video", "audio", "text")
supports_gradient_checkpointing = True
_no_split_modules = ["Qwen3OmniMoeDecoderLayer", "Qwen3OmniMoeVisionBlock"]
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_sdpa = True
_can_compile_fullgraph = False
_supports_attention_backend = True
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
std = self.config.initializer_range
if isinstance(module, Qwen3OmniMoeThinkerTextSparseMoeBlock):
init.normal_(module.experts.gate_up_proj, mean=0.0, std=std)
init.normal_(module.experts.down_proj, mean=0.0, std=std)
init.normal_(module.router.weight, mean=0.0, std=std)
def _get_feat_extract_output_lengths(input_lengths):
"""
Computes the output length of the convolutional layers and the output length of the audio encoder
"""
input_lengths_leave = input_lengths % 100
feat_lengths = (input_lengths_leave - 1) // 2 + 1
output_lengths = ((feat_lengths - 1) // 2 + 1 - 1) // 2 + 1 + (input_lengths // 100) * 13
return output_lengths
|
Qwen3OmniMoePreTrainedModel
|
python
|
numpy__numpy
|
numpy/ma/tests/test_old_ma.py
|
{
"start": 26978,
"end": 29518
}
|
class ____:
def _create_data(self):
return (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6),
array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),)
def test_testUfuncRegression(self):
f_invalid_ignore = [
'sqrt', 'arctanh', 'arcsin', 'arccos',
'arccosh', 'arctanh', 'log', 'log10', 'divide',
'true_divide', 'floor_divide', 'remainder', 'fmod']
for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
'sin', 'cos', 'tan',
'arcsin', 'arccos', 'arctan',
'sinh', 'cosh', 'tanh',
'arcsinh',
'arccosh',
'arctanh',
'absolute', 'fabs', 'negative',
'floor', 'ceil',
'logical_not',
'add', 'subtract', 'multiply',
'divide', 'true_divide', 'floor_divide',
'remainder', 'fmod', 'hypot', 'arctan2',
'equal', 'not_equal', 'less_equal', 'greater_equal',
'less', 'greater',
'logical_and', 'logical_or', 'logical_xor']:
try:
uf = getattr(umath, f)
except AttributeError:
uf = getattr(fromnumeric, f)
mf = getattr(np.ma, f)
args = self._create_data()[:uf.nin]
with np.errstate():
if f in f_invalid_ignore:
np.seterr(invalid='ignore')
if f in ['arctanh', 'log', 'log10']:
np.seterr(divide='ignore')
ur = uf(*args)
mr = mf(*args)
assert_(eq(ur.filled(0), mr.filled(0), f))
assert_(eqmask(ur.mask, mr.mask))
def test_reduce(self):
a = self._create_data()[0]
assert_(not alltrue(a, axis=0))
assert_(sometrue(a, axis=0))
assert_equal(sum(a[:3], axis=0), 0)
assert_equal(product(a, axis=0), 0)
def test_minmax(self):
a = arange(1, 13).reshape(3, 4)
amask = masked_where(a < 5, a)
assert_equal(amask.max(), a.max())
assert_equal(amask.min(), 5)
assert_((amask.max(0) == a.max(0)).all())
assert_((amask.min(0) == [5, 6, 7, 8]).all())
assert_(amask.max(1)[0].mask)
assert_(amask.min(1)[0].mask)
def test_nonzero(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = array([1, 0, 2, 0], mask=[0, 0, 1, 1])
assert_(eq(nonzero(x), [0]))
|
TestUfuncs
|
python
|
scrapy__scrapy
|
tests/test_pipelines.py
|
{
"start": 1479,
"end": 1818
}
|
class ____:
async def process_item(self, item):
d = Deferred()
loop = asyncio.get_event_loop()
loop.call_later(0, d.callback, None)
await deferred_to_future(d)
await asyncio.sleep(0.2)
item["pipeline_passed"] = await get_from_asyncio_queue(True)
return item
|
AsyncDefAsyncioPipeline
|
python
|
fastai__fastai
|
fastai/vision/augment.py
|
{
"start": 45159,
"end": 46051
}
|
class ____():
def __init__(self, max_lighting=0.2, p=0.75, draw=None, batch=False): store_attr()
def _def_draw(self, x):
if not self.batch: res = x.new_empty(x.size(0)).uniform_(math.log(1-self.max_lighting), -math.log(1-self.max_lighting))
else: res = x.new_zeros(x.size(0)) + random.uniform(math.log(1-self.max_lighting), -math.log(1-self.max_lighting))
return torch.exp(res)
def before_call(self, x):
self.change = _draw_mask(x, self._def_draw, draw=self.draw, p=self.p, neutral=1., batch=self.batch)
def __call__(self, x): return x.mul_(self.change[:,None,None,None])
# %% ../../nbs/09_vision.augment.ipynb 212
@patch
@delegates(_ContrastLogit.__init__)
def contrast(x: TensorImage, **kwargs):
func = _ContrastLogit(**kwargs)
func.before_call(x)
return x.lighting(func)
# %% ../../nbs/09_vision.augment.ipynb 213
|
_ContrastLogit
|
python
|
pytorch__pytorch
|
test/torch_np/numpy_tests/core/test_getlimits.py
|
{
"start": 1767,
"end": 2981
}
|
class ____(TestCase):
@skipIf(numpy.__version__ < "1.23", reason=".smallest_normal is new")
def test_basic(self):
dts = list(
zip(
["f2", "f4", "f8", "c8", "c16"],
[np.float16, np.float32, np.float64, np.complex64, np.complex128],
)
)
for dt1, dt2 in dts:
for attr in (
"bits",
"eps",
"max",
"min",
"resolution",
"tiny",
"smallest_normal",
):
assert_equal(getattr(finfo(dt1), attr), getattr(finfo(dt2), attr), attr)
with assert_raises((TypeError, ValueError)):
finfo("i4")
@skip # (reason="Some of these attributes are not implemented vs NP versions")
def test_basic_missing(self):
dt = np.float32
for attr in [
"epsneg",
"iexp",
"machep",
"maxexp",
"minexp",
"negep",
"nexp",
"nmant",
"precision",
"smallest_subnormal",
]:
getattr(finfo(dt), attr)
@instantiate_parametrized_tests
|
TestFinfo
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/system_config/composite_descent.py
|
{
"start": 968,
"end": 1529
}
|
class ____(
NamedTuple("_SolidConfigEntry", [("handle", NodeHandle), ("solid_config", OpConfig)])
):
def __new__(cls, handle: NodeHandle, op_config: OpConfig):
return super().__new__(
cls,
check.inst_param(handle, "handle", NodeHandle),
check.inst_param(op_config, "solid_config", OpConfig),
)
# This is a dummy handle used to simplify the code, corresponding to the root container (graph). It
# doesn't actually represent a node during execution.
_ROOT_HANDLE = NodeHandle("root", None)
|
OpConfigEntry
|
python
|
pytorch__pytorch
|
test/test_overrides.py
|
{
"start": 12440,
"end": 39097
}
|
class ____(TestCase):
def test_dtype_override(self):
class MyDtype:
def __torch_function__(self, *args, **kwargs):
return 4
self.assertEqual(torch.empty(4).view(MyDtype()), 4)
def test_mean_semantics(self):
"""Test that a function with one argument can be overridden"""
t1 = DiagonalTensor(5, 2)
t2 = SubTensor([[1, 2], [1, 2]])
t3 = SubDiagonalTensor(5, 2)
self.assertEqual(torch.mean(t1), 0.4)
self.assertEqual(bar(t1), -1)
self.assertEqual(torch.mean(t2), 0)
self.assertEqual(bar(t2), 1)
self.assertEqual(torch.mean(t3), 4.0)
self.assertEqual(bar(t3), 0)
def test_has_torch_function_non_sequence(self):
with self.assertRaisesRegex(TypeError, "expected a sequence"):
has_torch_function(object())
def test_mm_semantics(self):
"""Test that a function with multiple arguments can be overridden"""
t1 = DiagonalTensor(5, 2)
t2 = torch.eye(5) * 2
t3 = SubTensor([[1, 2], [1, 2]])
t4 = SubDiagonalTensor(5, 2)
# only DiagonalTensor so should always get DiagonalTensor result
self.assertEqual(torch.mm(t1, t1), 0)
# tensor and DiagonalTensor, always return DiagonalTensor result
self.assertEqual(torch.mm(t1, t2), 0)
self.assertEqual(torch.mm(t2, t1), 0)
# only SubTensor so should always get SubTensor result
self.assertEqual(torch.mm(t3, t3), -1)
# tensor and SubTensor so should always get SubTensor result
self.assertEqual(torch.mm(t3, t2), -1)
self.assertEqual(torch.mm(t2, t3), -1)
# DiagonalTensor and SubTensor are unrelated classes so the result
# depends on which argument appears first
self.assertEqual(torch.mm(t3, t1), -1)
self.assertEqual(torch.mm(t1, t3), 0)
# SubDiagonalTensor should take precedence over DiagonalTensor
# but should behave otherwise the same as DiagonalTensor
self.assertEqual(torch.mm(t4, t4), 1)
self.assertEqual(torch.mm(t4, t1), 1)
self.assertEqual(torch.mm(t1, t4), 1)
self.assertEqual(torch.mm(t4, t2), 1)
self.assertEqual(torch.mm(t2, t4), 1)
self.assertEqual(torch.mm(t3, t4), -1)
self.assertEqual(torch.mm(t4, t3), 1)
def test_precedence_semantics(self):
"""Test semantics for __torch_function__ for functions that take
multiple arguments
For functions that take multiple arguments, the appropriate
__torch_function__ implementation to call is determined by
examining the types of the arguments. The precedence order is
left-to-right in the argument list, except subclasses are always
checked before superclasses. The first result of calling the
implementations in precedence order that is not NotImplemented
is returned to the user. If all implementations return
NotImplemented, a TypeError is raised.
All cases are tested with functions implemented in C++ and
either foo or baz, which are python functions defined above that
are instrumented to obey the same dispatch rules as the
functions in torch.functional.
"""
# DiagonalTensor has a valid override and SubDiagonal has an
# override that returns NotImplemented so we should call the
# DiagonalTensor implementation, returning -1
t1 = DiagonalTensor(5, 2)
t2 = SubDiagonalTensor(5, 2)
self.assertEqual(torch.div(t1, t2), -1)
self.assertEqual(torch.div(t2, t1), -1)
self.assertEqual(foo(t1, t2), -1)
self.assertEqual(foo(t2, t1), -1)
# SubTensor has an implementation that returns NotImplemented as
# well so it should behave exactly like SubDiagonalTensor in the
# test above
t3 = SubTensor([[1, 2], [1, 2]])
self.assertEqual(torch.div(t1, t3), -1)
self.assertEqual(torch.div(t3, t1), -1)
self.assertEqual(foo(t1, t3), -1)
self.assertEqual(foo(t3, t1), -1)
# div between SubTensor and SubDiagonalTensor should raise
# TypeError since both have an implementation that
# explicitly returns NotImplemented
with self.assertRaises(TypeError):
torch.div(t2, t3)
with self.assertRaises(TypeError):
torch.div(t3, t2)
with self.assertRaises(TypeError):
foo(t2, t3)
with self.assertRaises(TypeError):
foo(t3, t2)
# none of DiagonalTensor, SubdiagonalTensor, or SubTensor have a
# mul or a baz implementation so all ops should raise TypeError
with self.assertRaises(TypeError):
torch.mul(t1, t1)
with self.assertRaises(TypeError):
torch.mul(t1, t2)
with self.assertRaises(TypeError):
torch.mul(t1, t3)
with self.assertRaises(TypeError):
torch.mul(t2, t1)
with self.assertRaises(TypeError):
torch.mul(t2, t2)
with self.assertRaises(TypeError):
torch.mul(t2, t3)
with self.assertRaises(TypeError):
torch.mul(t3, t1)
with self.assertRaises(TypeError):
torch.mul(t3, t2)
with self.assertRaises(TypeError):
torch.mul(t3, t3)
with self.assertRaises(TypeError):
baz(t1, t1)
with self.assertRaises(TypeError):
baz(t1, t2)
with self.assertRaises(TypeError):
baz(t1, t3)
with self.assertRaises(TypeError):
baz(t2, t1)
with self.assertRaises(TypeError):
baz(t2, t2)
with self.assertRaises(TypeError):
baz(t2, t3)
with self.assertRaises(TypeError):
baz(t3, t1)
with self.assertRaises(TypeError):
baz(t3, t2)
with self.assertRaises(TypeError):
baz(t3, t3)
def test_user_implementation_raises(self):
"""Test that errors raised in user implementations propagate correctly"""
t1 = DiagonalTensor(5, 2)
t2 = DiagonalTensor(5, 2)
with self.assertRaises(ValueError):
torch.add(t1, t2)
with self.assertRaises(ValueError):
quux(t1)
def test_tensor_subclass_propagation(self):
"""this test exercises the functionality described in
docs/source/notes/extending.rst#subclassing-torchtensor"""
t1 = torch.tensor([5])
t2 = torch.tensor([6])
s1 = SubTensor2([5])
s2 = SubTensor2([6])
ss1 = SubSubTensor2([5])
ss2 = SubSubTensor2([6])
sn1 = SubTensor3([5])
sn2 = SubTensor3([6])
# Check that leaf subclass is kept regardless of order
self.assertTrue(isinstance(s1 + t2, SubTensor2))
self.assertTrue(isinstance(t1 + s2, SubTensor2))
self.assertTrue(isinstance(s1 + s2, SubTensor2))
# Check indexing subclass is kept
self.assertTrue(isinstance(s1[0], SubTensor2))
# Check case for subclass of subclass.
self.assertTrue(isinstance(ss1 + ss2, SubSubTensor2))
self.assertTrue(isinstance(ss1 + s2, SubSubTensor2))
self.assertTrue(isinstance(s1 + ss2, SubSubTensor2))
self.assertTrue(isinstance(ss1 + ss2, SubSubTensor2))
self.assertTrue(isinstance(ss1 + t2, SubSubTensor2))
self.assertTrue(isinstance(t1 + ss2, SubSubTensor2))
self.assertTrue(isinstance(ss1[0], SubSubTensor2))
# Make sure unrelated class trees are not merged.
with self.assertRaises(TypeError):
s1 + sn2
with self.assertRaises(TypeError):
sn1 + s2
def test_base(self):
# https://github.com/szagoruyko/pytorchviz/issues/65
class DummyTensor(torch.Tensor):
pass
a = torch.ones(1)
c = DummyTensor(a)
self.assertTrue(c._is_view())
self.assertTrue(c._base is a)
def test_grad(self):
# Previously, Tensor-like objects that did not subclass from Tensor
# did not get wrapped into unary tuples before being passed into
# handle_torch_function, in contradiction with how Tensor-likes
# were handled
#
# NB: this asserts that the arguments get normalized into a tuple
# before entering the torch function handler; it could go the
# other way but beware https://github.com/pytorch/pytorch/issues/76037
class Dummy:
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
inputs, outputs = args
self.assertEqual(inputs, (x,))
self.assertEqual(outputs, (x,))
return -1
x = Dummy()
self.assertEqual(torch.autograd.grad(x, x), -1)
def test_pow_rpow(self):
class NothingImplemented(torch.Tensor):
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
return NotImplemented
class RPowOnly(torch.Tensor):
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if func is torch.Tensor.__rpow__:
return -1
return NotImplemented
self.assertEqual(NothingImplemented() ** RPowOnly(), -1)
def test_torch_function_in_lists(self):
"""Test that __torch_function__ is called for objects inside lists"""
class IntLike:
"""Object that can be used in int lists"""
def __init__(self, value):
self.value = value
self.torch_function_called = False
def __torch_function__(self, func, types, args=(), kwargs=None):
self.torch_function_called = True
# Return a result that makes the operation succeed
if func.__name__ == 'pad':
# For pad, return the input with shape adjusted
return args[0]
elif func.__name__ == 'layer_norm':
# For layer_norm, return normalized tensor
return torch.ones_like(args[0])
elif func.__name__ == 'tensordot':
# For tensordot, return appropriate shape
return torch.tensor(42.0)
# Fallback
return torch.tensor(42.0)
# Test with F.pad which takes int list
import torch.nn.functional as F
x = torch.randn(2, 3)
obj = IntLike(1)
# pad takes [left, right, top, bottom] as padding
_ = F.pad(x, [1, obj, 0, 0])
self.assertTrue(obj.torch_function_called,
"torch_function should be called for object in int list")
# Test multiple objects in list
obj1 = IntLike(1)
obj2 = IntLike(2)
_ = F.pad(x, [obj1, obj2, 0, 0])
self.assertTrue(obj1.torch_function_called or obj2.torch_function_called,
"torch_function should be called for at least one object")
def test_torch_function_in_float_lists(self):
"""Test that __torch_function__ is called for objects inside float lists"""
class FloatLike:
"""Object that can be used in float lists"""
def __init__(self, value):
self.value = float(value)
self.torch_function_called = False
def __torch_function__(self, func, types, args=(), kwargs=None):
self.torch_function_called = True
# Return appropriate result
if func.__name__ == 'layer_norm':
return torch.ones_like(args[0])
return torch.tensor(42.0)
import torch.nn.functional as F
x = torch.randn(2, 3, 4)
obj = FloatLike(4.0)
# layer_norm takes normalized_shape as int/float list
_ = F.layer_norm(x, [3, obj])
self.assertTrue(obj.torch_function_called,
"torch_function should be called for object in float list")
def test_torch_function_in_scalar_lists(self):
"""Test that __torch_function__ is called for scalar objects inside lists"""
class ScalarLike:
"""Object that can be used as a scalar in lists"""
def __init__(self, value):
self.value = value
self.torch_function_called = False
def __torch_function__(self, func, types, args=(), kwargs=None):
self.torch_function_called = True
# Return a scalar tensor
return torch.tensor(self.value)
def __float__(self):
return float(self.value)
def __int__(self):
return int(self.value)
# Test with a function that takes scalar lists
# Using torch.as_tensor which can take scalar lists
obj1 = ScalarLike(1.0)
obj2 = ScalarLike(2.0)
# Create a tensor with scalar list containing torch function objects
# Use a different operation that should trigger torch_function
_ = torch.stack([obj1, obj2])
self.assertTrue(obj1.torch_function_called or obj2.torch_function_called,
"torch_function should be called for scalar objects in list")
def test_torch_function_precedence_in_lists(self):
"""Test precedence when multiple torch function objects are in a list"""
call_order = []
class HighPriority:
def __torch_function__(self, func, types, args=(), kwargs=None):
call_order.append('high')
# Delegate to lower priority
return NotImplemented
class LowPriority:
def __torch_function__(self, func, types, args=(), kwargs=None):
call_order.append('low')
# Return valid result
if func.__name__ == 'pad':
return args[0]
return torch.tensor(42.0)
import torch.nn.functional as F
x = torch.randn(2, 3)
high = HighPriority()
low = LowPriority()
# Test with both objects in list
call_order.clear()
_ = F.pad(x, [1, high, low, 0])
# High priority should be called first
self.assertEqual(call_order[0], 'high',
"Higher priority torch_function should be called first")
self.assertEqual(call_order[1], 'low',
"Lower priority torch_function should be called after NotImplemented")
def test_torch_function_mixed_lists(self):
"""Test lists with mix of regular values and torch function objects"""
class CountingInt:
call_count = 0
def __init__(self, value):
self.value = value
@classmethod
def reset(cls):
cls.call_count = 0
def __torch_function__(self, func, types, args=(), kwargs=None):
CountingInt.call_count += 1
# Return valid result
if func.__name__ == 'pad':
return args[0]
return torch.tensor(42.0)
def __index__(self):
return self.value
import torch.nn.functional as F
x = torch.randn(2, 3)
obj = CountingInt(2)
CountingInt.reset()
# Mix regular ints with torch function object
_ = F.pad(x, [1, obj, 0, 0])
self.assertEqual(CountingInt.call_count, 1,
"torch_function should be called exactly once for mixed list")
def test_torch_function_empty_lists(self):
"""Test that empty lists work correctly"""
# This should work without calling any torch_function
x = torch.randn(1) # Single element tensor
# Functions that accept empty lists should still work
# torch.stack with empty list of tensors would fail,
# but empty size lists should work
result = x.view([]) # Empty list means scalar
self.assertEqual(result.shape, torch.Size([]),
"Empty list should work for size arguments")
def test_torch_function_not_first_in_list(self):
"""Test that torch_function is called even when object is not first in list"""
class IntLikeNotFirst:
"""Object with torch_function that won't be first in list"""
def __init__(self, value):
self.value = value
self.torch_function_called = False
def __torch_function__(self, func, types, args=(), kwargs=None):
self.torch_function_called = True
# Return input tensor for pad
return args[0]
def __index__(self):
return self.value
import torch.nn.functional as F
x = torch.randn(2, 3)
# Test with torch_function object as second item
obj_second = IntLikeNotFirst(2)
_ = F.pad(x, [1, obj_second, 0, 0])
self.assertTrue(obj_second.torch_function_called,
"torch_function should be called when object is second in list")
# Test with torch_function object as third item
obj_third = IntLikeNotFirst(1)
_ = F.pad(x, [1, 1, obj_third, 0])
self.assertTrue(obj_third.torch_function_called,
"torch_function should be called when object is third in list")
# Test with torch_function object as last item
obj_last = IntLikeNotFirst(1)
_ = F.pad(x, [1, 1, 1, obj_last])
self.assertTrue(obj_last.torch_function_called,
"torch_function should be called when object is last in list")
def test_torch_function_nested_tuple_getitem(self):
"""Test that torch_function is called with getitem for TF objects inside nested tuples"""
called_functions = []
class TorchFunctionObj:
"""Object with torch_function that tracks which functions are called"""
def __init__(self, value):
self.value = value
def __torch_function__(self, func, types, args=(), kwargs=None):
called_functions.append(func.__name__)
# For getitem, return the tensor unchanged
if func.__name__ == '__getitem__':
return args[0]
# Return a simple result for other functions
return torch.tensor(42.0)
def __index__(self):
return self.value
# Create a tensor to index
x = torch.randn(5, 5, 5)
# Create torch function objects - these will be INSIDE the nested structure
tf_obj1 = TorchFunctionObj(0)
tf_obj2 = TorchFunctionObj(1)
# Clear the called functions list
called_functions.clear()
# Test with tuple of tuple where TF objects are only on the INSIDE
# The outer structure is regular tuples, but inner elements have __torch_function__
# This tests the recursive detection logic added in the recent commit
x[(0, (tf_obj1, tf_obj2))]
# Assert that torch_function was called
self.assertTrue(len(called_functions) > 0,
"torch_function should be called for TF objects inside nested tuples")
# Assert that getitem was called, not size
self.assertIn('__getitem__', called_functions,
"getitem should be called for tuple indexing with torch function objects inside")
self.assertNotIn('size', called_functions,
"size should not be called - we should use getitem, not convert to advanced indexing")
def generate_tensor_like_override_tests(cls):
from torch.testing._internal.generated.annotated_fn_args import annotated_args
def test_generator(func, override):
# If func corresponds to a torch.Tensor method or property.
if is_tensor_method_or_property(func):
# Generate an instance by using SubTensor,
def instance_gen():
return SubTensor([5])
else:
# Otherwise, TensorLike.
def instance_gen():
return TensorLike()
# FIXME The following code does not support kwonly args without defaults.
# The fix is easy, as one just needs to save these args when generating the variable
# annotated_args. The problem is that, if one does so, one finds a number
# of functions that have problematic signatures in native_functions.yaml.
# Fixing these would be BC breaking, so hence this terrible hack
# https://github.com/pytorch/pytorch/issues/67008
kwargs = {}
if hasattr(func, "__name__") and "linalg_solve_triangular" in func.__name__:
kwargs = {"upper": True}
func_args = []
is_method = is_tensor_method_or_property(func)
def _simple_type_parser(func, arg_name, arg_type):
# Guess valid input to aten function based on type of argument
if arg_type == "Tensor":
return instance_gen()
elif arg_type == "TensorList" or arg_type == "ITensorListRef":
return [instance_gen(), instance_gen()]
elif arg_type == "c10::List<::std::optional<Tensor>>":
return [instance_gen(), instance_gen()]
elif arg_type == "IntArrayRef" or arg_type == "SymIntArrayRef":
size = arg.get("size", 2)
if size == 1:
return 1
else:
return [1] * size
elif arg_type == "Scalar":
return 3.5
elif arg_type == "bool":
return False
elif arg_type == "Dimname":
return ""
elif arg_type == "DimnameList":
return [""]
elif arg_type.startswith("int"):
return 0
elif arg_type in {"Stream"}:
return torch.Stream()
elif arg_type.startswith("float") or arg_type == "double":
return 1.0
elif arg_type in {"Generator", "MemoryFormat", "TensorOptions"}:
return None
elif arg_type == "ScalarType":
return torch.float32
elif arg_type == "c10::string_view":
return ""
elif arg_type in ("std::string_view", "::std::string_view"):
return ""
elif arg_type == "SymInt":
# TODO: generate actual SymbolicInt
return 1
else:
raise RuntimeError(
f"Unsupported argument type {arg_type} for {arg_name} of function {func}"
)
# Special case; this doesn't have a schema but takes a list
if func is torch.sym_sum:
func_args.append([TensorLike(), TensorLike()])
elif func in annotated_args:
for arg in annotated_args[func]:
# Guess valid input to aten function based on type of argument
t = arg["simple_type"]
t = t.removesuffix("?")
if t == "Tensor" and is_method and arg["name"] == "self":
# See "Note: properties and __get__"
func = func.__get__(instance_gen())
continue
arg_to_add = _simple_type_parser(func, arg["name"], t)
if "is_kwarg_only" in arg and arg["is_kwarg_only"] == str(True):
kwargs[arg["name"]] = arg_to_add
else:
func_args.append(arg_to_add)
else:
args = inspect.getfullargspec(override)
try:
func_args = inspect.getfullargspec(func)
# Remove annotations from argspec
func_args = type(func_args)(**{**func_args, 'annotations': None})
if func_args != args:
raise RuntimeError(f"Override for {func} doesn't match its argspec.\n"
+ f"Original: {inspect.signature(func)}\n"
+ f"Override: {inspect.signature(override)}")
except TypeError:
pass
nargs = len(args.args)
if args.defaults is not None:
nargs -= len(args.defaults)
func_args = [instance_gen() for _ in range(nargs)]
if args.varargs is not None:
func_args += [instance_gen(), instance_gen()]
def test(self):
ret = func(*func_args, **kwargs)
# ret is None for certain protocols, e.g., `__weakref__` and `__setitem__`
# This is currently the best check but doesn't work for, for example,
# Tensor.__add__ because it redirects to Tensor.add.
# See note "_triggered wrapper"
if not is_method or ret is None:
self.assertTrue(WRAPPED_TRIGGERED_IMPLS[func]._triggered)
return
self.assertEqual(ret, -1)
return test
for func, override in get_testing_overrides().items():
test_method = test_generator(func, override)
if func.__name__ == "__get__":
# Note: properties and __get__
# __get__ is part of the descriptor protocol.
# https://docs.python.org/3/howto/descriptor.html
# This is used for properties of the form
# torch.Tensor.<property>, with the method __get__
# In this case we get the property name in two ways:
# This case for properties defined in C.
module = getattr(
func.__self__,
"__qualname__",
None
)
# This one for properties defined in Python.
if module is None:
module = "Tensor." + func.__self__.fget.__name__
# Unfortunately I couldn't find a way to unify these two cases
# and there is no way for general descriptors.
elif is_tensor_method_or_property(func):
module = "Tensor"
else:
module = func.__module__
if module:
name = 'test_{}_{}'.format(module.replace('.', '_'), func.__name__)
else:
name = f'test_{func.__name__}'
test_method.__name__ = name
setattr(cls, name, test_method)
generate_tensor_like_override_tests(TestTorchFunctionOverride)
|
TestTorchFunctionOverride
|
python
|
PyCQA__pylint
|
tests/functional/n/not_callable.py
|
{
"start": 893,
"end": 1519
}
|
class ____:
""" class """
def __init__(self):
self.attr = 4
@property
def test(self):
""" Get the attribute """
return self.attr
@test.setter
def test(self, value):
""" Set the attribute """
self.attr = value
@MyProperty
def custom(self):
""" Get the attribute """
return self.attr
@custom.setter
def custom(self, value):
""" Set the attribute """
self.attr = value
PROP = PropertyTest()
PROP.test(40) # [not-callable]
PROP.custom() # [not-callable]
# Safe from not-callable when using properties.
|
PropertyTest
|
python
|
huggingface__transformers
|
src/transformers/models/aimv2/modeling_aimv2.py
|
{
"start": 19551,
"end": 22362
}
|
class ____(Aimv2PreTrainedModel):
main_input_name = "input_ids"
_can_record_outputs = {
"hidden_states": Aimv2EncoderLayer,
"attentions": Aimv2Attention,
}
def __init__(self, config: Aimv2TextConfig):
super().__init__(config)
self.config = config
self.embeddings = Aimv2TextEmbeddings(config)
self.encoder = Aimv2Encoder(config)
self.rms_norm = Aimv2RMSNorm(config.hidden_size, config.rms_norm_eps)
self.eos_token_id = config.eos_token_id
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.embeddings.token_embedding
def set_input_embeddings(self, value):
self.embeddings.token_embedding = value
@check_model_inputs(tie_last_hidden_states=False)
@auto_docstring
def forward(
self,
input_ids,
attention_mask: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPooling:
hidden_states = self.embeddings(input_ids)
batch_size, seq_len, _ = hidden_states.shape
cache_position = torch.arange(seq_len, dtype=torch.long, device=hidden_states.device)
position_ids = cache_position.unsqueeze(0).expand(batch_size, -1)
if attention_mask is not None:
attention_mask = create_causal_mask(
config=self.config,
input_embeds=hidden_states,
position_ids=position_ids,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=None,
)
encoder_outputs = self.encoder(
inputs_embeds=hidden_states,
attention_mask=attention_mask,
**kwargs,
)
last_hidden_state = encoder_outputs.last_hidden_state
last_hidden_state = self.rms_norm(last_hidden_state)
# Get pooled output
pooled_output = last_hidden_state[
torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
(input_ids.to(dtype=torch.int, device=last_hidden_state.device) == self.eos_token_id).int().argmax(dim=-1),
]
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
)
def _get_vector_norm(tensor: torch.Tensor) -> torch.Tensor:
"""
This method is equivalent to tensor.norm(p=2, dim=-1, keepdim=True) and used to make
model `executorch` exportable. See issue https://github.com/pytorch/executorch/issues/3566
"""
square_tensor = torch.pow(tensor, 2)
sum_tensor = torch.sum(square_tensor, dim=-1, keepdim=True)
normed_tensor = torch.pow(sum_tensor, 0.5)
return normed_tensor
@auto_docstring
|
Aimv2TextModel
|
python
|
FactoryBoy__factory_boy
|
factory/errors.py
|
{
"start": 117,
"end": 224
}
|
class ____(FactoryError):
"""Exception for Factory subclasses lacking Meta.model."""
|
AssociatedClassError
|
python
|
facebook__pyre-check
|
scripts/tests/shape_type_coverage_test.py
|
{
"start": 13196,
"end": 14594
}
|
class ____(unittest.TestCase):
def assert_extract_text_as(
self,
corpus: List[str],
start: Position,
stop: Position,
expected: str,
) -> None:
self.assertEqual(_extract_multiline_text(corpus, start, stop), expected)
def test_extract_text(self) -> None:
corpus = ["bar(", " x,", " y,", ")"]
self.assert_extract_text_as(
corpus,
start=Position(1, 0),
stop=Position(4, 1),
expected="bar( x, y, )",
)
self.assert_extract_text_as(
corpus,
start=Position(1, 3),
stop=Position(4, 1),
expected="( x, y, )",
)
self.assert_extract_text_as(
corpus,
start=Position(1, 0),
stop=Position(2, 5),
expected="bar( x",
)
self.assert_extract_text_as(
corpus,
start=Position(1, 0),
stop=Position(1, 3),
expected="bar",
)
self.assert_extract_text_as(
corpus,
start=Position(1, 1),
stop=Position(1, 4),
expected="ar(",
)
self.assert_extract_text_as(
corpus,
start=Position(1, 0),
stop=Position(1, 4),
expected="bar(",
)
|
ExtractMultilineTextTests
|
python
|
pytorch__pytorch
|
test/higher_order_ops/test_invoke_subgraph.py
|
{
"start": 70641,
"end": 73630
}
|
class ____(torch.nn.Module):
def forward(self, L_x_: "f32[8, 8]", L_y_: "f32[16, 16]"):
l_x_ = L_x_
l_y_ = L_y_
subgraph_0 = self.subgraph_0
invoke_subgraph = torch.ops.higher_order.invoke_subgraph(subgraph_0, 'subgraph_0', l_x_); subgraph_0 = l_x_ = None
getitem: "f32[8, 8]" = invoke_subgraph[0]; invoke_subgraph = None
subgraph_1 = self.subgraph_1
invoke_subgraph_1 = torch.ops.higher_order.invoke_subgraph(subgraph_1, 'subgraph_1', l_y_); subgraph_1 = l_y_ = None
getitem_1: "f32[16, 16]" = invoke_subgraph_1[0]; invoke_subgraph_1 = None
return (getitem, getitem_1)
class subgraph_0(torch.nn.Module):
def forward(self, l_x_: "f32[8, 8]"):
sin: "f32[8, 8]" = torch.sin(l_x_); l_x_ = None
return (sin,)
class subgraph_1(torch.nn.Module):
def forward(self, l_y_: "f32[16, 16]"):
sin: "f32[16, 16]" = torch.sin(l_y_); l_y_ = None
return (sin,)
""",
)
def test_return_size(self):
def run(dynamic):
torch.compiler.reset()
@nested_compile_region
def gn(x):
y = x + 1
z = x.shape
return y, z
def fn(x):
z0 = gn(x)
z1 = gn(x)
return z0[0] + z1[0], z0[1]
x = torch.randn(8, 8, requires_grad=True)
x_clone = x.detach().clone().requires_grad_(True)
ref = fn(x)
opt_fn = torch.compile(
fn, backend="inductor", fullgraph=True, dynamic=dynamic
)
res = opt_fn(x_clone)
self.assertEqual(ref, res)
ref[0].sum().backward()
res[0].sum().backward()
self.assertEqual(x.grad, x_clone.grad)
run(dynamic=True)
run(dynamic=False)
def test_different_symint(self):
"""
Tests check that the same subgraph called with different symints use different graphs
"""
@nested_compile_region
def gn(x):
return torch.sin(x)
def fn(x):
a = gn(x)
# Get first half of the tensor
b = torch.narrow(a, 0, 0, a.size()[0] // 2)
return gn(b)
opt_fn = torch.compile(fn, fullgraph=True)
x = torch.randn(8, 8, requires_grad=True)
torch._dynamo.mark_dynamic(x, 0)
ref = fn(x)
res = opt_fn(x)
torch._dynamo.reset()
backend = AotEagerAndRecordGraphs()
opt_fn = torch.compile(fn, backend=backend, fullgraph=True)
x = torch.randn(8, 8, requires_grad=True)
torch._dynamo.mark_dynamic(x, 0)
ref = fn(x)
res = opt_fn(x)
self.assertEqual(ref, res)
if not TEST_WITH_CROSSREF:
self.assertExpectedInline(
normalize_gm(backend.graphs[0].print_readable(print_output=False)),
"""\
|
GraphModule
|
python
|
oauthlib__oauthlib
|
oauthlib/openid/connect/core/grant_types/dispatchers.py
|
{
"start": 1284,
"end": 2489
}
|
class ____(Dispatcher):
"""
This is an adapter class that will route simple Authorization
requests, those that have `id_token` in `response_type` and a scope
including `openid` to either the `default_grant` or the `oidc_grant`
based on the scopes requested.
"""
def __init__(self, default_grant=None, oidc_grant=None):
self.default_grant = default_grant
self.oidc_grant = oidc_grant
def _handler_for_request(self, request):
handler = self.default_grant
if request.scopes and "openid" in request.scopes and 'id_token' in request.response_type:
handler = self.oidc_grant
log.debug('Selecting handler for request %r.', handler)
return handler
def create_authorization_response(self, request, token_handler):
"""Read scope and route to the designated handler."""
return self._handler_for_request(request).create_authorization_response(request, token_handler)
def validate_authorization_request(self, request):
"""Read scope and route to the designated handler."""
return self._handler_for_request(request).validate_authorization_request(request)
|
ImplicitTokenGrantDispatcher
|
python
|
great-expectations__great_expectations
|
docs/sphinx_api_docs_source/build_sphinx_api_docs.py
|
{
"start": 1291,
"end": 1529
}
|
class ____:
"""Paths and metadata for a sidebar entry."""
name: str
definition: Definition
class_min_dotted_path: str | None
md_relpath: pathlib.Path
mdx_relpath: pathlib.Path
type: SidebarEntryType
|
SidebarEntry
|
python
|
pytorch__pytorch
|
test/distributed/checkpoint/test_tp_checkpoint.py
|
{
"start": 693,
"end": 1125
}
|
class ____(torch.nn.Module):
def __init__(self, device):
super().__init__()
torch.manual_seed(5)
self.net1 = torch.nn.Linear(5, 10, device=device)
self.relu = torch.nn.ReLU()
self.net2 = torch.nn.Linear(10, 15, device=device)
self.net3 = torch.nn.Linear(15, 1, device=device)
def forward(self, x):
return self.net3(self.net2(self.relu(self.net1(x))))
|
UnevenShardedModel
|
python
|
huggingface__transformers
|
src/transformers/models/bert/modeling_bert.py
|
{
"start": 21849,
"end": 22166
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
prediction_scores = self.predictions(sequence_output)
return prediction_scores
|
BertOnlyMLMHead
|
python
|
huggingface__transformers
|
src/transformers/models/florence2/modeling_florence2.py
|
{
"start": 16114,
"end": 18555
}
|
class ____(nn.Module):
def __init__(
self,
config: Florence2VisionConfig,
stage_idx: int,
drop_path_rate: float,
):
super().__init__()
self.conv1 = nn.Conv2d(
config.embed_dim[stage_idx],
config.embed_dim[stage_idx],
kernel_size=3,
padding=1,
groups=config.embed_dim[stage_idx],
)
self.norm1 = nn.LayerNorm(config.embed_dim[stage_idx])
self.window_attn = Florence2VisionWindowAttention(config=config, stage_idx=stage_idx)
self.drop_path1 = Florence2VisionDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
self.conv2 = nn.Conv2d(
config.embed_dim[stage_idx],
config.embed_dim[stage_idx],
kernel_size=3,
padding=1,
groups=config.embed_dim[stage_idx],
)
self.norm2 = nn.LayerNorm(config.embed_dim[stage_idx])
self.ffn = Florence2VisionMLP(config=config, stage_idx=stage_idx)
self.drop_path2 = Florence2VisionDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
def forward(self, hidden_states: torch.Tensor):
batch_size, embed_dim, height, width = hidden_states.shape
# First spatial mixing block: Conv + Window Attention
hidden_states = self.conv1(hidden_states) + hidden_states
hidden_states = hidden_states.flatten(2).transpose(1, 2)
residual = hidden_states
# Spatial Window-based self-attention mechanism
hidden_states = self.norm1(hidden_states)
hidden_states = hidden_states.view(batch_size, height, width, embed_dim)
hidden_states = self.window_attn(hidden_states)
hidden_states = residual + self.drop_path1(hidden_states)
hidden_states = hidden_states.transpose(1, 2).view(batch_size, embed_dim, height, width)
# Second spatial mixing block: Conv + FFN
hidden_states = self.conv2(hidden_states) + hidden_states
hidden_states = hidden_states.flatten(2).transpose(1, 2)
residual = hidden_states
# FFN
hidden_states = self.norm2(hidden_states)
hidden_states = self.ffn(hidden_states)
hidden_states = residual + self.drop_path2(hidden_states)
hidden_states = hidden_states.transpose(1, 2).view(batch_size, embed_dim, height, width)
return hidden_states
|
Florence2VisionSpatialBlock
|
python
|
jina-ai__jina
|
jina/proto/docarray_v1/pb/jina_pb2_grpc.py
|
{
"start": 23667,
"end": 24547
}
|
class ____(object):
"""*
jina gRPC service to trigger a restore at the Executor Runtime.
"""
@staticmethod
def restore(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
'/jina.JinaExecutorRestore/restore',
jina__pb2.RestoreSnapshotCommand.SerializeToString,
jina__pb2.RestoreSnapshotStatusProto.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
|
JinaExecutorRestore
|
python
|
TheAlgorithms__Python
|
neural_network/two_hidden_layers_neural_network.py
|
{
"start": 278,
"end": 11626
}
|
class ____:
def __init__(self, input_array: np.ndarray, output_array: np.ndarray) -> None:
"""
This function initializes the TwoHiddenLayerNeuralNetwork class with random
weights for every layer and initializes predicted output with zeroes.
input_array : input values for training the neural network (i.e training data) .
output_array : expected output values of the given inputs.
"""
# Input values provided for training the model.
self.input_array = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
rng = np.random.default_rng()
self.input_layer_and_first_hidden_layer_weights = rng.random(
(self.input_array.shape[1], 4)
)
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
self.first_hidden_layer_and_second_hidden_layer_weights = rng.random((4, 3))
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
self.second_hidden_layer_and_output_layer_weights = rng.random((3, 1))
# Real output values provided.
self.output_array = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
self.predicted_output = np.zeros(output_array.shape)
def feedforward(self) -> np.ndarray:
"""
The information moves in only one direction i.e. forward from the input nodes,
through the two hidden nodes and to the output nodes.
There are no cycles or loops in the network.
Return layer_between_second_hidden_layer_and_output
(i.e the last layer of the neural network).
>>> input_val = np.array(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float)
>>> output_val = np.array(([0], [0], [0]), dtype=float)
>>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val)
>>> res = nn.feedforward()
>>> array_sum = np.sum(res)
>>> bool(np.isnan(array_sum))
False
"""
# Layer_between_input_and_first_hidden_layer is the layer connecting the
# input nodes with the first hidden layer nodes.
self.layer_between_input_and_first_hidden_layer = sigmoid(
np.dot(self.input_array, self.input_layer_and_first_hidden_layer_weights)
)
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
self.layer_between_first_hidden_layer_and_second_hidden_layer = sigmoid(
np.dot(
self.layer_between_input_and_first_hidden_layer,
self.first_hidden_layer_and_second_hidden_layer_weights,
)
)
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
self.layer_between_second_hidden_layer_and_output = sigmoid(
np.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer,
self.second_hidden_layer_and_output_layer_weights,
)
)
return self.layer_between_second_hidden_layer_and_output
def back_propagation(self) -> None:
"""
Function for fine-tuning the weights of the neural net based on the
error rate obtained in the previous epoch (i.e., iteration).
Updation is done using derivative of sogmoid activation function.
>>> input_val = np.array(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float)
>>> output_val = np.array(([0], [0], [0]), dtype=float)
>>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val)
>>> res = nn.feedforward()
>>> nn.back_propagation()
>>> updated_weights = nn.second_hidden_layer_and_output_layer_weights
>>> bool((res == updated_weights).all())
False
"""
updated_second_hidden_layer_and_output_layer_weights = np.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T,
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output),
)
updated_first_hidden_layer_and_second_hidden_layer_weights = np.dot(
self.layer_between_input_and_first_hidden_layer.T,
np.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output),
self.second_hidden_layer_and_output_layer_weights.T,
)
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer
),
)
updated_input_layer_and_first_hidden_layer_weights = np.dot(
self.input_array.T,
np.dot(
np.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output),
self.second_hidden_layer_and_output_layer_weights.T,
)
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer
),
self.first_hidden_layer_and_second_hidden_layer_weights.T,
)
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer),
)
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def train(self, output: np.ndarray, iterations: int, give_loss: bool) -> None:
"""
Performs the feedforwarding and back propagation process for the
given number of iterations.
Every iteration will update the weights of neural network.
output : real output values,required for calculating loss.
iterations : number of times the weights are to be updated.
give_loss : boolean value, If True then prints loss for each iteration,
If False then nothing is printed
>>> input_val = np.array(([0, 0, 0], [0, 1, 0], [0, 0, 1]), dtype=float)
>>> output_val = np.array(([0], [1], [1]), dtype=float)
>>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val)
>>> first_iteration_weights = nn.feedforward()
>>> nn.back_propagation()
>>> updated_weights = nn.second_hidden_layer_and_output_layer_weights
>>> bool((first_iteration_weights == updated_weights).all())
False
"""
for iteration in range(1, iterations + 1):
self.output = self.feedforward()
self.back_propagation()
if give_loss:
loss = np.mean(np.square(output - self.feedforward()))
print(f"Iteration {iteration} Loss: {loss}")
def predict(self, input_arr: np.ndarray) -> int:
"""
Predict's the output for the given input values using
the trained neural network.
The output value given by the model ranges in-between 0 and 1.
The predict function returns 1 if the model value is greater
than the threshold value else returns 0,
as the real output values are in binary.
>>> input_val = np.array(([0, 0, 0], [0, 1, 0], [0, 0, 1]), dtype=float)
>>> output_val = np.array(([0], [1], [1]), dtype=float)
>>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val)
>>> nn.train(output_val, 1000, False)
>>> nn.predict([0, 1, 0]) in (0, 1)
True
"""
# Input values for which the predictions are to be made.
self.array = input_arr
self.layer_between_input_and_first_hidden_layer = sigmoid(
np.dot(self.array, self.input_layer_and_first_hidden_layer_weights)
)
self.layer_between_first_hidden_layer_and_second_hidden_layer = sigmoid(
np.dot(
self.layer_between_input_and_first_hidden_layer,
self.first_hidden_layer_and_second_hidden_layer_weights,
)
)
self.layer_between_second_hidden_layer_and_output = sigmoid(
np.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer,
self.second_hidden_layer_and_output_layer_weights,
)
)
return int((self.layer_between_second_hidden_layer_and_output > 0.6)[0])
def sigmoid(value: np.ndarray) -> np.ndarray:
"""
Applies sigmoid activation function.
return normalized values
>>> sigmoid(np.array(([1, 0, 2], [1, 0, 0]), dtype=np.float64))
array([[0.73105858, 0.5 , 0.88079708],
[0.73105858, 0.5 , 0.5 ]])
"""
return 1 / (1 + np.exp(-value))
def sigmoid_derivative(value: np.ndarray) -> np.ndarray:
"""
Provides the derivative value of the sigmoid function.
returns derivative of the sigmoid value
>>> sigmoid_derivative(np.array(([1, 0, 2], [1, 0, 0]), dtype=np.float64))
array([[ 0., 0., -2.],
[ 0., 0., 0.]])
"""
return (value) * (1 - (value))
def example() -> int:
"""
Example for "how to use the neural network class and use the
respected methods for the desired output".
Calls the TwoHiddenLayerNeuralNetwork class and
provides the fixed input output values to the model.
Model is trained for a fixed amount of iterations then the predict method is called.
In this example the output is divided into 2 classes i.e. binary classification,
the two classes are represented by '0' and '1'.
>>> example() in (0, 1)
True
"""
# Input values.
test_input = np.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
),
dtype=np.float64,
)
# True output values for the given input values.
output = np.array(([0], [1], [1], [0], [1], [0], [0], [1]), dtype=np.float64)
# Calling neural network class.
neural_network = TwoHiddenLayerNeuralNetwork(
input_array=test_input, output_array=output
)
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=output, iterations=10, give_loss=False)
return neural_network.predict(np.array(([1, 1, 1]), dtype=np.float64))
if __name__ == "__main__":
example()
|
TwoHiddenLayerNeuralNetwork
|
python
|
keras-team__keras
|
keras/src/ops/core_test.py
|
{
"start": 46018,
"end": 49222
}
|
class ____(testing.TestCase):
"""Test the dtype to verify that the behavior matches JAX."""
ALL_DTYPES = [
x
for x in dtypes.ALLOWED_DTYPES
if x
not in (
"string",
"complex64",
"complex128",
# Remove 64-bit dtypes.
"float64",
"uint64",
"int64",
)
+ dtypes.FLOAT8_TYPES # Remove float8 dtypes for the following tests
] + [None]
INT_DTYPES = [x for x in dtypes.INT_TYPES if x not in ("uint64", "int64")]
FLOAT_DTYPES = [x for x in dtypes.FLOAT_TYPES if x not in ("float64",)]
if backend.backend() == "torch":
ALL_DTYPES = [x for x in ALL_DTYPES if x not in ("uint16", "uint32")]
INT_DTYPES = [x for x in INT_DTYPES if x not in ("uint16", "uint32")]
@parameterized.named_parameters(
named_product(
dtype=[dtype for dtype in ALL_DTYPES if dtype is not None]
)
)
def test_cast(self, dtype):
x = np.ones((1,))
self.assertDType(core.cast(x, dtype), dtype)
self.assertDType(core.Cast(dtype).symbolic_call(x), dtype)
@parameterized.parameters(
((), None, backend.floatx()),
([], None, backend.floatx()),
(bool(0), None, "bool"),
(int(0), None, "int32"),
(float(0), None, backend.floatx()),
(1, "bool", "bool"),
(1.0, "int32", "int32"),
(1.0, "float32", "float32"),
([False, True, False], None, "bool"),
([1, 2, 3], None, "int32"),
([1.0, 2.0, 3.0], None, backend.floatx()),
([1, 2.0, 3], None, backend.floatx()),
([[False], [True], [False]], None, "bool"),
([[1], [2], [3]], None, "int32"),
([[1], [2.0], [3]], None, backend.floatx()),
*[
(np.array(0, dtype=dtype), None, dtype)
for dtype in ALL_DTYPES
if dtype is not None
],
*[
([[1, 0, 1], [1, 1, 0]], dtype, dtype)
for dtype in ALL_DTYPES
if dtype is not None
],
)
def test_convert_to_tensor(self, x, dtype, expected_dtype):
self.assertDType(ops.convert_to_tensor(x, dtype=dtype), expected_dtype)
@parameterized.named_parameters(
named_product(
dtype=[dtype for dtype in ALL_DTYPES if dtype is not None]
)
)
def test_convert_to_tensor_with_tensor(self, dtype):
x = ops.convert_to_tensor(np.ones((2, 3), dtype="float32"))
self.assertDType(ops.convert_to_tensor(x, dtype=dtype), dtype)
@parameterized.named_parameters(
named_product(
dtype=[dtype for dtype in ALL_DTYPES if dtype is not None]
)
)
def test_convert_to_tensor_with_variable(self, dtype):
x = backend.Variable(np.ones((2, 3), dtype="float32"))
self.assertDType(ops.convert_to_tensor(x, dtype=dtype), dtype)
@parameterized.named_parameters(named_product(dtype=ALL_DTYPES))
def test_saturate_cast(self, dtype):
x = np.ones((1,))
self.assertDType(core.saturate_cast(x, dtype), dtype)
self.assertDType(core.SaturateCast(dtype).symbolic_call(x), dtype)
|
CoreOpsDtypeTest
|
python
|
django__django
|
tests/queries/test_query.py
|
{
"start": 847,
"end": 6750
}
|
class ____(SimpleTestCase):
def test_simple_query(self):
query = Query(Author)
where = query.build_where(Q(num__gt=2))
lookup = where.children[0]
self.assertIsInstance(lookup, GreaterThan)
self.assertEqual(lookup.rhs, 2)
self.assertEqual(lookup.lhs.target, Author._meta.get_field("num"))
def test_non_alias_cols_query(self):
query = Query(Author, alias_cols=False)
where = query.build_where(Q(num__gt=2, name__isnull=False) | Q(num__lt=F("id")))
name_isnull_lookup, num_gt_lookup = where.children[0].children
self.assertIsInstance(num_gt_lookup, GreaterThan)
self.assertIsInstance(num_gt_lookup.lhs, Col)
self.assertIsNone(num_gt_lookup.lhs.alias)
self.assertIsInstance(name_isnull_lookup, IsNull)
self.assertIsInstance(name_isnull_lookup.lhs, Col)
self.assertIsNone(name_isnull_lookup.lhs.alias)
num_lt_lookup = where.children[1]
self.assertIsInstance(num_lt_lookup, LessThan)
self.assertIsInstance(num_lt_lookup.rhs, Col)
self.assertIsNone(num_lt_lookup.rhs.alias)
self.assertIsInstance(num_lt_lookup.lhs, Col)
self.assertIsNone(num_lt_lookup.lhs.alias)
def test_complex_query(self):
query = Query(Author)
where = query.build_where(Q(num__gt=2) | Q(num__lt=0))
self.assertEqual(where.connector, OR)
lookup = where.children[0]
self.assertIsInstance(lookup, GreaterThan)
self.assertEqual(lookup.rhs, 2)
self.assertEqual(lookup.lhs.target, Author._meta.get_field("num"))
lookup = where.children[1]
self.assertIsInstance(lookup, LessThan)
self.assertEqual(lookup.rhs, 0)
self.assertEqual(lookup.lhs.target, Author._meta.get_field("num"))
def test_multiple_fields(self):
query = Query(Item, alias_cols=False)
where = query.build_where(Q(modified__gt=F("created")))
lookup = where.children[0]
self.assertIsInstance(lookup, GreaterThan)
self.assertIsInstance(lookup.rhs, Col)
self.assertIsNone(lookup.rhs.alias)
self.assertIsInstance(lookup.lhs, Col)
self.assertIsNone(lookup.lhs.alias)
self.assertEqual(lookup.rhs.target, Item._meta.get_field("created"))
self.assertEqual(lookup.lhs.target, Item._meta.get_field("modified"))
def test_transform(self):
query = Query(Author, alias_cols=False)
with register_lookup(CharField, Lower):
where = query.build_where(~Q(name__lower="foo"))
lookup = where.children[0]
self.assertIsInstance(lookup, Exact)
self.assertIsInstance(lookup.lhs, Lower)
self.assertIsInstance(lookup.lhs.lhs, Col)
self.assertIsNone(lookup.lhs.lhs.alias)
self.assertEqual(lookup.lhs.lhs.target, Author._meta.get_field("name"))
def test_negated_nullable(self):
query = Query(Item)
where = query.build_where(~Q(modified__lt=datetime(2017, 1, 1)))
self.assertTrue(where.negated)
lookup = where.children[0]
self.assertIsInstance(lookup, LessThan)
self.assertEqual(lookup.lhs.target, Item._meta.get_field("modified"))
lookup = where.children[1]
self.assertIsInstance(lookup, IsNull)
self.assertEqual(lookup.lhs.target, Item._meta.get_field("modified"))
def test_foreign_key(self):
query = Query(Item)
msg = "Joined field references are not permitted in this query"
with self.assertRaisesMessage(FieldError, msg):
query.build_where(Q(creator__num__gt=2))
def test_foreign_key_f(self):
query = Query(Ranking)
with self.assertRaises(FieldError):
query.build_where(Q(rank__gt=F("author__num")))
def test_foreign_key_exclusive(self):
query = Query(ObjectC, alias_cols=False)
where = query.build_where(Q(objecta=None) | Q(objectb=None))
a_isnull = where.children[0]
self.assertIsInstance(a_isnull, RelatedIsNull)
self.assertIsInstance(a_isnull.lhs, Col)
self.assertIsNone(a_isnull.lhs.alias)
self.assertEqual(a_isnull.lhs.target, ObjectC._meta.get_field("objecta"))
b_isnull = where.children[1]
self.assertIsInstance(b_isnull, RelatedIsNull)
self.assertIsInstance(b_isnull.lhs, Col)
self.assertIsNone(b_isnull.lhs.alias)
self.assertEqual(b_isnull.lhs.target, ObjectC._meta.get_field("objectb"))
def test_clone_select_related(self):
query = Query(Item)
query.add_select_related(["creator"])
clone = query.clone()
clone.add_select_related(["note", "creator__extra"])
self.assertEqual(query.select_related, {"creator": {}})
def test_iterable_lookup_value(self):
query = Query(Item)
where = query.build_where(Q(name=["a", "b"]))
name_exact = where.children[0]
self.assertIsInstance(name_exact, Exact)
self.assertEqual(name_exact.rhs, "['a', 'b']")
def test_filter_conditional(self):
query = Query(Item)
where = query.build_where(Func(output_field=BooleanField()))
exact = where.children[0]
self.assertIsInstance(exact, Exact)
self.assertIsInstance(exact.lhs, Func)
self.assertIs(exact.rhs, True)
def test_filter_conditional_join(self):
query = Query(Item)
filter_expr = Func("note__note", output_field=BooleanField())
msg = "Joined field references are not permitted in this query"
with self.assertRaisesMessage(FieldError, msg):
query.build_where(filter_expr)
def test_filter_non_conditional(self):
query = Query(Item)
msg = "Cannot filter against a non-conditional expression."
with self.assertRaisesMessage(TypeError, msg):
query.build_where(Func(output_field=CharField()))
|
TestQuery
|
python
|
fluentpython__example-code
|
08-obj-ref/haunted_bus.py
|
{
"start": 716,
"end": 1035
}
|
class ____:
"""A bus model haunted by ghost passengers"""
def __init__(self, passengers=[]): # <1>
self.passengers = passengers # <2>
def pick(self, name):
self.passengers.append(name) # <3>
def drop(self, name):
self.passengers.remove(name)
# END HAUNTED_BUS_CLASS
|
HauntedBus
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 239712,
"end": 240487
}
|
class ____(sgqlc.types.Input):
"""Autogenerated input type of
GrantEnterpriseOrganizationsMigratorRole
"""
__schema__ = github_schema
__field_names__ = ("enterprise_id", "login", "client_mutation_id")
enterprise_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="enterpriseId")
"""The ID of the enterprise to which all organizations managed by it
will be granted the migrator role.
"""
login = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="login")
"""The login of the user to grant the migrator role"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
|
GrantEnterpriseOrganizationsMigratorRoleInput
|
python
|
mlflow__mlflow
|
examples/pydanticai/tracing.py
|
{
"start": 458,
"end": 1029
}
|
class ____:
"""This is a fake database for example purposes.
In reality, you'd be connecting to an external database
(e.g. PostgreSQL) to get information about customers.
"""
@classmethod
async def customer_name(cls, *, id: int) -> str | None:
if id == 123:
return "John"
@classmethod
async def customer_balance(cls, *, id: int, include_pending: bool) -> float:
if id == 123 and include_pending:
return 123.45
else:
raise ValueError("Customer not found")
@dataclass
|
DatabaseConn
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.