language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
numpy__numpy
|
numpy/_core/tests/test_simd.py
|
{
"start": 12383,
"end": 22559
}
|
class ____(_Test_Utility):
"""
To test all float vector types at once
"""
def test_arithmetic_fused(self):
vdata_a, vdata_b, vdata_c = [self.load(self._data())] * 3
vdata_cx2 = self.add(vdata_c, vdata_c)
# multiply and add, a*b + c
data_fma = self.load([a * b + c for a, b, c in zip(vdata_a, vdata_b, vdata_c)])
fma = self.muladd(vdata_a, vdata_b, vdata_c)
assert fma == data_fma
# multiply and subtract, a*b - c
fms = self.mulsub(vdata_a, vdata_b, vdata_c)
data_fms = self.sub(data_fma, vdata_cx2)
assert fms == data_fms
# negate multiply and add, -(a*b) + c
nfma = self.nmuladd(vdata_a, vdata_b, vdata_c)
data_nfma = self.sub(vdata_cx2, data_fma)
assert nfma == data_nfma
# negate multiply and subtract, -(a*b) - c
nfms = self.nmulsub(vdata_a, vdata_b, vdata_c)
data_nfms = self.mul(data_fma, self.setall(-1))
assert nfms == data_nfms
# multiply, add for odd elements and subtract even elements.
# (a * b) -+ c
fmas = list(self.muladdsub(vdata_a, vdata_b, vdata_c))
assert fmas[0::2] == list(data_fms)[0::2]
assert fmas[1::2] == list(data_fma)[1::2]
def test_abs(self):
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
data = self._data()
vdata = self.load(self._data())
abs_cases = ((-0, 0), (ninf, pinf), (pinf, pinf), (nan, nan))
for case, desired in abs_cases:
data_abs = [desired] * self.nlanes
vabs = self.abs(self.setall(case))
assert vabs == pytest.approx(data_abs, nan_ok=True)
vabs = self.abs(self.mul(vdata, self.setall(-1)))
assert vabs == data
def test_sqrt(self):
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
data = self._data()
vdata = self.load(self._data())
sqrt_cases = ((-0.0, -0.0), (0.0, 0.0), (-1.0, nan), (ninf, nan), (pinf, pinf))
for case, desired in sqrt_cases:
data_sqrt = [desired] * self.nlanes
sqrt = self.sqrt(self.setall(case))
assert sqrt == pytest.approx(data_sqrt, nan_ok=True)
# load to truncate precision
data_sqrt = self.load([math.sqrt(x) for x in data])
sqrt = self.sqrt(vdata)
assert sqrt == data_sqrt
def test_square(self):
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
data = self._data()
vdata = self.load(self._data())
# square
square_cases = ((nan, nan), (pinf, pinf), (ninf, pinf))
for case, desired in square_cases:
data_square = [desired] * self.nlanes
square = self.square(self.setall(case))
assert square == pytest.approx(data_square, nan_ok=True)
data_square = [x * x for x in data]
square = self.square(vdata)
assert square == data_square
@pytest.mark.parametrize("intrin, func", [("ceil", math.ceil),
("trunc", math.trunc), ("floor", math.floor), ("rint", round)])
def test_rounding(self, intrin, func):
"""
Test intrinsics:
npyv_rint_##SFX
npyv_ceil_##SFX
npyv_trunc_##SFX
npyv_floor##SFX
"""
intrin_name = intrin
intrin = getattr(self, intrin)
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
# special cases
round_cases = ((nan, nan), (pinf, pinf), (ninf, ninf))
for case, desired in round_cases:
data_round = [desired] * self.nlanes
_round = intrin(self.setall(case))
assert _round == pytest.approx(data_round, nan_ok=True)
for x in range(0, 2**20, 256**2):
for w in (-1.05, -1.10, -1.15, 1.05, 1.10, 1.15):
data = self.load([(x + a) * w for a in range(self.nlanes)])
data_round = [func(x) for x in data]
_round = intrin(data)
assert _round == data_round
# test large numbers
for i in (
1.1529215045988576e+18, 4.6116860183954304e+18,
5.902958103546122e+20, 2.3611832414184488e+21
):
x = self.setall(i)
y = intrin(x)
data_round = [func(n) for n in x]
assert y == data_round
# signed zero
if intrin_name == "floor":
data_szero = (-0.0,)
else:
data_szero = (-0.0, -0.25, -0.30, -0.45, -0.5)
for w in data_szero:
_round = self._to_unsigned(intrin(self.setall(w)))
data_round = self._to_unsigned(self.setall(-0.0))
assert _round == data_round
@pytest.mark.parametrize("intrin", [
"max", "maxp", "maxn", "min", "minp", "minn"
])
def test_max_min(self, intrin):
"""
Test intrinsics:
npyv_max_##sfx
npyv_maxp_##sfx
npyv_maxn_##sfx
npyv_min_##sfx
npyv_minp_##sfx
npyv_minn_##sfx
npyv_reduce_max_##sfx
npyv_reduce_maxp_##sfx
npyv_reduce_maxn_##sfx
npyv_reduce_min_##sfx
npyv_reduce_minp_##sfx
npyv_reduce_minn_##sfx
"""
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
chk_nan = {"xp": 1, "np": 1, "nn": 2, "xn": 2}.get(intrin[-2:], 0)
func = eval(intrin[:3])
reduce_intrin = getattr(self, "reduce_" + intrin)
intrin = getattr(self, intrin)
hf_nlanes = self.nlanes // 2
cases = (
([0.0, -0.0], [-0.0, 0.0]),
([10, -10], [10, -10]),
([pinf, 10], [10, ninf]),
([10, pinf], [ninf, 10]),
([10, -10], [10, -10]),
([-10, 10], [-10, 10])
)
for op1, op2 in cases:
vdata_a = self.load(op1 * hf_nlanes)
vdata_b = self.load(op2 * hf_nlanes)
data = func(vdata_a, vdata_b)
simd = intrin(vdata_a, vdata_b)
assert simd == data
data = func(vdata_a)
simd = reduce_intrin(vdata_a)
assert simd == data
if not chk_nan:
return
if chk_nan == 1:
test_nan = lambda a, b: (
b if math.isnan(a) else a if math.isnan(b) else b
)
else:
test_nan = lambda a, b: (
nan if math.isnan(a) or math.isnan(b) else b
)
cases = (
(nan, 10),
(10, nan),
(nan, pinf),
(pinf, nan),
(nan, nan)
)
for op1, op2 in cases:
vdata_ab = self.load([op1, op2] * hf_nlanes)
data = test_nan(op1, op2)
simd = reduce_intrin(vdata_ab)
assert simd == pytest.approx(data, nan_ok=True)
vdata_a = self.setall(op1)
vdata_b = self.setall(op2)
data = [data] * self.nlanes
simd = intrin(vdata_a, vdata_b)
assert simd == pytest.approx(data, nan_ok=True)
def test_reciprocal(self):
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
data = self._data()
vdata = self.load(self._data())
recip_cases = ((nan, nan), (pinf, 0.0), (ninf, -0.0), (0.0, pinf), (-0.0, ninf))
for case, desired in recip_cases:
data_recip = [desired] * self.nlanes
recip = self.recip(self.setall(case))
assert recip == pytest.approx(data_recip, nan_ok=True)
data_recip = self.load([1 / x for x in data]) # load to truncate precision
recip = self.recip(vdata)
assert recip == data_recip
def test_special_cases(self):
"""
Compare Not NaN. Test intrinsics:
npyv_notnan_##SFX
"""
nnan = self.notnan(self.setall(self._nan()))
assert nnan == [0] * self.nlanes
@pytest.mark.parametrize("intrin_name", [
"rint", "trunc", "ceil", "floor"
])
def test_unary_invalid_fpexception(self, intrin_name):
intrin = getattr(self, intrin_name)
for d in [float("nan"), float("inf"), -float("inf")]:
v = self.setall(d)
clear_floatstatus()
intrin(v)
assert check_floatstatus(invalid=True) is False
@pytest.mark.parametrize('py_comp,np_comp', [
(operator.lt, "cmplt"),
(operator.le, "cmple"),
(operator.gt, "cmpgt"),
(operator.ge, "cmpge"),
(operator.eq, "cmpeq"),
(operator.ne, "cmpneq")
])
def test_comparison_with_nan(self, py_comp, np_comp):
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
mask_true = self._true_mask()
def to_bool(vector):
return [lane == mask_true for lane in vector]
intrin = getattr(self, np_comp)
cmp_cases = ((0, nan), (nan, 0), (nan, nan), (pinf, nan),
(ninf, nan), (-0.0, +0.0))
for case_operand1, case_operand2 in cmp_cases:
data_a = [case_operand1] * self.nlanes
data_b = [case_operand2] * self.nlanes
vdata_a = self.setall(case_operand1)
vdata_b = self.setall(case_operand2)
vcmp = to_bool(intrin(vdata_a, vdata_b))
data_cmp = [py_comp(a, b) for a, b in zip(data_a, data_b)]
assert vcmp == data_cmp
@pytest.mark.parametrize("intrin", ["any", "all"])
@pytest.mark.parametrize("data", (
[float("nan"), 0],
[0, float("nan")],
[float("nan"), 1],
[1, float("nan")],
[float("nan"), float("nan")],
[0.0, -0.0],
[-0.0, 0.0],
[1.0, -0.0]
))
def test_operators_crosstest(self, intrin, data):
"""
Test intrinsics:
npyv_any_##SFX
npyv_all_##SFX
"""
data_a = self.load(data * self.nlanes)
func = eval(intrin)
intrin = getattr(self, intrin)
desired = func(data_a)
simd = intrin(data_a)
assert not not simd == desired
|
_SIMD_FP
|
python
|
pytorch__pytorch
|
torch/testing/_internal/distributed/distributed_test.py
|
{
"start": 9824,
"end": 10104
}
|
class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = nn.Linear(10, 10, bias=False)
self.b = nn.Linear(10, 1, bias=False)
def forward(self, x):
a = self.a(x)
b = self.b(x)
return (a, b)
|
TwoLinLayerNet
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximize-greatness-of-an-array.py
|
{
"start": 66,
"end": 337
}
|
class ____(object):
def maximizeGreatness(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return len(nums)-max(collections.Counter(nums).itervalues())
# Time: O(nlogn)
# Space: O(1)
# sort, greedy, two pointers
|
Solution
|
python
|
huggingface__transformers
|
src/transformers/models/sam3_tracker_video/configuration_sam3_tracker_video.py
|
{
"start": 1288,
"end": 3411
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Sam3TrackerVideoPromptEncoder`]. The [`Sam3TrackerVideoPromptEncoder`]
module is used to encode the input 2D points and bounding boxes.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the hidden states.
image_size (`int`, *optional*, defaults to 1008):
The expected output resolution of the image.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
mask_input_channels (`int`, *optional*, defaults to 16):
The number of channels to be fed to the `MaskDecoder` module.
num_point_embeddings (`int`, *optional*, defaults to 4):
The number of point embeddings to be used.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the encoder and pooler.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
scale (`float`, *optional*, defaults to 1):
The scale factor for the prompt encoder.
"""
base_config_key = "prompt_encoder_config"
def __init__(
self,
hidden_size=256,
image_size=1008,
patch_size=14,
mask_input_channels=16,
num_point_embeddings=4,
hidden_act="gelu",
layer_norm_eps=1e-6,
scale=1,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.image_size = image_size
self.patch_size = patch_size
self.mask_input_channels = mask_input_channels
self.num_point_embeddings = num_point_embeddings
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.scale = scale
|
Sam3TrackerVideoPromptEncoderConfig
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/pool/base.py
|
{
"start": 21632,
"end": 36554
}
|
class ____(ConnectionPoolEntry):
"""Maintains a position in a connection pool which references a pooled
connection.
This is an internal object used by the :class:`_pool.Pool` implementation
to provide context management to a DBAPI connection maintained by
that :class:`_pool.Pool`. The public facing interface for this class
is described by the :class:`.ConnectionPoolEntry` class. See that
class for public API details.
.. seealso::
:class:`.ConnectionPoolEntry`
:class:`.PoolProxiedConnection`
"""
__slots__ = (
"__pool",
"fairy_ref",
"finalize_callback",
"fresh",
"starttime",
"dbapi_connection",
"__weakref__",
"__dict__",
)
finalize_callback: Deque[Callable[[DBAPIConnection], None]]
fresh: bool
fairy_ref: Optional[weakref.ref[_ConnectionFairy]]
starttime: float
def __init__(self, pool: Pool, connect: bool = True):
self.fresh = False
self.fairy_ref = None
self.starttime = 0
self.dbapi_connection = None
self.__pool = pool
if connect:
self.__connect()
self.finalize_callback = deque()
dbapi_connection: Optional[DBAPIConnection]
@property
def driver_connection(self) -> Optional[Any]: # type: ignore[override] # mypy#4125 # noqa: E501
if self.dbapi_connection is None:
return None
else:
return self.__pool._dialect.get_driver_connection(
self.dbapi_connection
)
@property
@util.deprecated(
"2.0",
"The _ConnectionRecord.connection attribute is deprecated; "
"please use 'driver_connection'",
)
def connection(self) -> Optional[DBAPIConnection]:
return self.dbapi_connection
_soft_invalidate_time: float = 0
@util.ro_memoized_property
def info(self) -> _InfoType:
return {}
@util.ro_memoized_property
def record_info(self) -> Optional[_InfoType]:
return {}
@classmethod
def checkout(cls, pool: Pool) -> _ConnectionFairy:
if TYPE_CHECKING:
rec = cast(_ConnectionRecord, pool._do_get())
else:
rec = pool._do_get()
try:
dbapi_connection = rec.get_connection()
except BaseException as err:
with util.safe_reraise():
rec._checkin_failed(err, _fairy_was_created=False)
# not reached, for code linters only
raise
echo = pool._should_log_debug()
fairy = _ConnectionFairy(pool, dbapi_connection, rec, echo)
rec.fairy_ref = ref = weakref.ref(
fairy,
lambda ref: (
_finalize_fairy(
None, rec, pool, ref, echo, transaction_was_reset=False
)
if _finalize_fairy is not None
else None
),
)
_strong_ref_connection_records[ref] = rec
if echo:
pool.logger.debug(
"Connection %r checked out from pool", dbapi_connection
)
return fairy
def _checkin_failed(
self, err: BaseException, _fairy_was_created: bool = True
) -> None:
self.invalidate(e=err)
self.checkin(
_fairy_was_created=_fairy_was_created,
)
def checkin(self, _fairy_was_created: bool = True) -> None:
if self.fairy_ref is None and _fairy_was_created:
# _fairy_was_created is False for the initial get connection phase;
# meaning there was no _ConnectionFairy and we must unconditionally
# do a checkin.
#
# otherwise, if fairy_was_created==True, if fairy_ref is None here
# that means we were checked in already, so this looks like
# a double checkin.
util.warn("Double checkin attempted on %s" % self)
return
self.fairy_ref = None
connection = self.dbapi_connection
pool = self.__pool
while self.finalize_callback:
finalizer = self.finalize_callback.pop()
if connection is not None:
finalizer(connection)
if pool.dispatch.checkin:
pool.dispatch.checkin(connection, self)
pool._return_conn(self)
@property
def in_use(self) -> bool:
return self.fairy_ref is not None
@property
def last_connect_time(self) -> float:
return self.starttime
def close(self) -> None:
if self.dbapi_connection is not None:
self.__close()
def invalidate(
self, e: Optional[BaseException] = None, soft: bool = False
) -> None:
# already invalidated
if self.dbapi_connection is None:
return
if soft:
self.__pool.dispatch.soft_invalidate(
self.dbapi_connection, self, e
)
else:
self.__pool.dispatch.invalidate(self.dbapi_connection, self, e)
if e is not None:
self.__pool.logger.info(
"%sInvalidate connection %r (reason: %s:%s)",
"Soft " if soft else "",
self.dbapi_connection,
e.__class__.__name__,
e,
)
else:
self.__pool.logger.info(
"%sInvalidate connection %r",
"Soft " if soft else "",
self.dbapi_connection,
)
if soft:
self._soft_invalidate_time = time.time()
else:
self.__close(terminate=True)
self.dbapi_connection = None
def get_connection(self) -> DBAPIConnection:
recycle = False
# NOTE: the various comparisons here are assuming that measurable time
# passes between these state changes. however, time.time() is not
# guaranteed to have sub-second precision. comparisons of
# "invalidation time" to "starttime" should perhaps use >= so that the
# state change can take place assuming no measurable time has passed,
# however this does not guarantee correct behavior here as if time
# continues to not pass, it will try to reconnect repeatedly until
# these timestamps diverge, so in that sense using > is safer. Per
# https://stackoverflow.com/a/1938096/34549, Windows time.time() may be
# within 16 milliseconds accuracy, so unit tests for connection
# invalidation need a sleep of at least this long between initial start
# time and invalidation for the logic below to work reliably.
if self.dbapi_connection is None:
self.info.clear()
self.__connect()
elif (
self.__pool._recycle > -1
and time.time() - self.starttime > self.__pool._recycle
):
self.__pool.logger.info(
"Connection %r exceeded timeout; recycling",
self.dbapi_connection,
)
recycle = True
elif self.__pool._invalidate_time > self.starttime:
self.__pool.logger.info(
"Connection %r invalidated due to pool invalidation; "
+ "recycling",
self.dbapi_connection,
)
recycle = True
elif self._soft_invalidate_time > self.starttime:
self.__pool.logger.info(
"Connection %r invalidated due to local soft invalidation; "
+ "recycling",
self.dbapi_connection,
)
recycle = True
if recycle:
self.__close(terminate=True)
self.info.clear()
self.__connect()
assert self.dbapi_connection is not None
return self.dbapi_connection
def _is_hard_or_soft_invalidated(self) -> bool:
return (
self.dbapi_connection is None
or self.__pool._invalidate_time > self.starttime
or (self._soft_invalidate_time > self.starttime)
)
def __close(self, *, terminate: bool = False) -> None:
self.finalize_callback.clear()
if self.__pool.dispatch.close:
self.__pool.dispatch.close(self.dbapi_connection, self)
assert self.dbapi_connection is not None
self.__pool._close_connection(
self.dbapi_connection, terminate=terminate
)
self.dbapi_connection = None
def __connect(self) -> None:
pool = self.__pool
# ensure any existing connection is removed, so that if
# creator fails, this attribute stays None
self.dbapi_connection = None
try:
self.starttime = time.time()
self.dbapi_connection = connection = pool._invoke_creator(self)
pool.logger.debug("Created new connection %r", connection)
self.fresh = True
except BaseException as e:
with util.safe_reraise():
pool.logger.debug("Error on connect(): %s", e)
else:
# in SQLAlchemy 1.4 the first_connect event is not used by
# the engine, so this will usually not be set
if pool.dispatch.first_connect:
pool.dispatch.first_connect.for_modify(
pool.dispatch
).exec_once_unless_exception(self.dbapi_connection, self)
# init of the dialect now takes place within the connect
# event, so ensure a mutex is used on the first run
pool.dispatch.connect.for_modify(
pool.dispatch
)._exec_w_sync_on_first_run(self.dbapi_connection, self)
def _finalize_fairy(
dbapi_connection: Optional[DBAPIConnection],
connection_record: Optional[_ConnectionRecord],
pool: Pool,
ref: Optional[
weakref.ref[_ConnectionFairy]
], # this is None when called directly, not by the gc
echo: Optional[log._EchoFlagType],
transaction_was_reset: bool = False,
fairy: Optional[_ConnectionFairy] = None,
) -> None:
"""Cleanup for a :class:`._ConnectionFairy` whether or not it's already
been garbage collected.
When using an async dialect no IO can happen here (without using
a dedicated thread), since this is called outside the greenlet
context and with an already running loop. In this case function
will only log a message and raise a warning.
"""
is_gc_cleanup = ref is not None
if is_gc_cleanup:
assert ref is not None
_strong_ref_connection_records.pop(ref, None)
assert connection_record is not None
if connection_record.fairy_ref is not ref:
return
assert dbapi_connection is None
dbapi_connection = connection_record.dbapi_connection
elif fairy:
_strong_ref_connection_records.pop(weakref.ref(fairy), None)
# null pool is not _is_asyncio but can be used also with async dialects
dont_restore_gced = pool._dialect.is_async
if dont_restore_gced:
detach = connection_record is None or is_gc_cleanup
can_manipulate_connection = not is_gc_cleanup
can_close_or_terminate_connection = (
not pool._dialect.is_async or pool._dialect.has_terminate
)
requires_terminate_for_close = (
pool._dialect.is_async and pool._dialect.has_terminate
)
else:
detach = connection_record is None
can_manipulate_connection = can_close_or_terminate_connection = True
requires_terminate_for_close = False
if dbapi_connection is not None:
if connection_record and echo:
pool.logger.debug(
"Connection %r being returned to pool", dbapi_connection
)
try:
if not fairy:
assert connection_record is not None
fairy = _ConnectionFairy(
pool,
dbapi_connection,
connection_record,
echo,
)
assert fairy.dbapi_connection is dbapi_connection
fairy._reset(
pool,
transaction_was_reset=transaction_was_reset,
terminate_only=detach,
asyncio_safe=can_manipulate_connection,
)
if detach:
if connection_record:
fairy._pool = pool
fairy.detach()
if can_close_or_terminate_connection:
if pool.dispatch.close_detached:
pool.dispatch.close_detached(dbapi_connection)
pool._close_connection(
dbapi_connection,
terminate=requires_terminate_for_close,
)
except BaseException as e:
pool.logger.error(
"Exception during reset or similar", exc_info=True
)
if connection_record:
connection_record.invalidate(e=e)
if not isinstance(e, Exception):
raise
finally:
if detach and is_gc_cleanup and dont_restore_gced:
message = (
"The garbage collector is trying to clean up "
f"non-checked-in connection {dbapi_connection!r}, "
f"""which will be {
'dropped, as it cannot be safely terminated'
if not can_close_or_terminate_connection
else 'terminated'
}. """
"Please ensure that SQLAlchemy pooled connections are "
"returned to "
"the pool explicitly, either by calling ``close()`` "
"or by using appropriate context managers to manage "
"their lifecycle."
)
pool.logger.error(message)
util.warn(message)
if connection_record and connection_record.fairy_ref is not None:
connection_record.checkin()
# give gc some help. See
# test/engine/test_pool.py::PoolEventsTest::test_checkin_event_gc[True]
# which actually started failing when pytest warnings plugin was
# turned on, due to util.warn() above
if fairy is not None:
fairy.dbapi_connection = None # type: ignore
fairy._connection_record = None
del dbapi_connection
del connection_record
del fairy
# a dictionary of the _ConnectionFairy weakrefs to _ConnectionRecord, so that
# GC under pypy will call ConnectionFairy finalizers. linked directly to the
# weakref that will empty itself when collected so that it should not create
# any unmanaged memory references.
_strong_ref_connection_records: Dict[
weakref.ref[_ConnectionFairy], _ConnectionRecord
] = {}
|
_ConnectionRecord
|
python
|
pytorch__pytorch
|
test/dynamo/test_modules.py
|
{
"start": 20230,
"end": 20359
}
|
class ____(torch.nn.Module):
@classmethod
def custom_add(cls, x):
x = x + x
return x
|
ComplicatedSuperParent
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/events.py
|
{
"start": 13479,
"end": 16300
}
|
class ____(
NamedTuple(
"_AssetObservation",
[
("asset_key", PublicAttr[AssetKey]),
("description", PublicAttr[Optional[str]]),
("metadata", PublicAttr[Mapping[str, MetadataValue]]),
("partition", PublicAttr[Optional[str]]),
("tags", PublicAttr[Mapping[str, str]]),
],
),
EventWithMetadata,
):
"""Event that captures metadata about an asset at a point in time.
Args:
asset_key (Union[str, List[str], AssetKey]): A key to identify the asset.
partition (Optional[str]): The name of a partition of the asset that the metadata
corresponds to.
tags (Optional[Mapping[str, str]]): A mapping containing tags for the observation.
metadata (Optional[Dict[str, Union[str, float, int, MetadataValue]]]):
Arbitrary metadata about the asset. Keys are displayed string labels, and values are
one of the following: string, float, int, JSON-serializable dict, JSON-serializable
list, and one of the data classes returned by a MetadataValue static method.
"""
def __new__(
cls,
asset_key: CoercibleToAssetKey,
description: Optional[str] = None,
metadata: Optional[Mapping[str, RawMetadataValue]] = None,
partition: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
):
if isinstance(asset_key, AssetKey):
check.inst_param(asset_key, "asset_key", AssetKey)
elif isinstance(asset_key, str):
asset_key = AssetKey(parse_asset_key_string(asset_key))
else:
check.sequence_param(asset_key, "asset_key", of_type=str)
asset_key = AssetKey(asset_key)
validate_asset_event_tags(tags)
normed_metadata = normalize_metadata(
check.opt_mapping_param(metadata, "metadata", key_type=str),
)
return super().__new__(
cls,
asset_key=asset_key,
description=check.opt_str_param(description, "description"),
metadata=normed_metadata,
tags=tags or {},
partition=check.opt_str_param(partition, "partition"),
)
@property
def label(self) -> str:
return " ".join(self.asset_key.path)
@property
def data_version(self) -> Optional[str]:
return self.tags.get(DATA_VERSION_TAG)
def with_metadata(
self, metadata: Optional[Mapping[str, RawMetadataValue]]
) -> "AssetObservation":
return AssetObservation(
asset_key=self.asset_key,
description=self.description,
metadata=metadata,
partition=self.partition,
tags=self.tags,
)
UNDEFINED_ASSET_KEY_PATH = ["__undefined__"]
|
AssetObservation
|
python
|
PrefectHQ__prefect
|
src/prefect/server/schemas/schedules.py
|
{
"start": 1848,
"end": 11182
}
|
class ____(PrefectBaseModel):
"""
A schedule formed by adding `interval` increments to an `anchor_date`. If no
`anchor_date` is supplied, the current UTC time is used. If a
timezone-naive datetime is provided for `anchor_date`, it is assumed to be
in the schedule's timezone (or UTC). Even if supplied with an IANA timezone,
anchor dates are always stored as UTC offsets, so a `timezone` can be
provided to determine localization behaviors like DST boundary handling. If
none is provided it will be inferred from the anchor date.
NOTE: If the `IntervalSchedule` `anchor_date` or `timezone` is provided in a
DST-observing timezone, then the schedule will adjust itself appropriately.
Intervals greater than 24 hours will follow DST conventions, while intervals
of less than 24 hours will follow UTC intervals. For example, an hourly
schedule will fire every UTC hour, even across DST boundaries. When clocks
are set back, this will result in two runs that *appear* to both be
scheduled for 1am local time, even though they are an hour apart in UTC
time. For longer intervals, like a daily schedule, the interval schedule
will adjust for DST boundaries so that the clock-hour remains constant. This
means that a daily schedule that always fires at 9am will observe DST and
continue to fire at 9am in the local time zone.
Args:
interval (datetime.timedelta): an interval to schedule on.
anchor_date (DateTime, optional): an anchor date to schedule increments against;
if not provided, the current timestamp will be used.
timezone (str, optional): a valid timezone string.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
interval: datetime.timedelta = Field(gt=datetime.timedelta(0))
anchor_date: AnchorDate = Field(
default_factory=lambda: now("UTC"),
examples=["2020-01-01T00:00:00Z"],
)
timezone: Optional[str] = Field(default=None, examples=["America/New_York"])
@model_validator(mode="after")
def validate_timezone(self):
self.timezone = default_timezone(self.timezone, self.model_dump())
return self
async def get_dates(
self,
n: Optional[int] = None,
start: Optional[datetime.datetime] = None,
end: Optional[datetime.datetime] = None,
) -> List[DateTime]:
"""Retrieves dates from the schedule. Up to 1,000 candidate dates are checked
following the start date.
Args:
n (int): The number of dates to generate
start (datetime.datetime, optional): The first returned date will be on or
after this date. Defaults to None. If a timezone-naive datetime is
provided, it is assumed to be in the schedule's timezone.
end (datetime.datetime, optional): The maximum scheduled date to return. If
a timezone-naive datetime is provided, it is assumed to be in the
schedule's timezone.
Returns:
List[DateTime]: A list of dates
"""
return sorted(self._get_dates_generator(n=n, start=start, end=end))
def _get_dates_generator(
self,
n: Optional[int] = None,
start: Optional[datetime.datetime] = None,
end: Optional[datetime.datetime] = None,
) -> Generator[DateTime, None, None]:
"""Retrieves dates from the schedule. Up to 1,000 candidate dates are checked
following the start date.
Args:
n (Optional[int]): The number of dates to generate
start (Optional[datetime.datetime]): The first returned date will be on or
after this date. Defaults to None. If a timezone-naive datetime is
provided, it is assumed to be in the schedule's timezone.
end (Optional[datetime.datetime]): The maximum scheduled date to return. If
a timezone-naive datetime is provided, it is assumed to be in the
schedule's timezone.
Returns:
List[DateTime]: a list of dates
"""
if n is None:
# if an end was supplied, we do our best to supply all matching dates (up to
# MAX_ITERATIONS)
if end is not None:
n = MAX_ITERATIONS
else:
n = 1
if sys.version_info >= (3, 13):
# `pendulum` is not supported in Python 3.13, so we use `whenever` instead
from whenever import PlainDateTime, ZonedDateTime
if start is None:
start = ZonedDateTime.now("UTC").py_datetime()
target_timezone = self.timezone or "UTC"
def to_local_zdt(dt: datetime.datetime | None) -> ZonedDateTime | None:
if dt is None:
return None
if dt.tzinfo is None:
return PlainDateTime.from_py_datetime(dt).assume_tz(target_timezone)
if isinstance(dt.tzinfo, ZoneInfo):
return ZonedDateTime.from_py_datetime(dt).to_tz(target_timezone)
# For offset-based tzinfo instances (e.g. datetime.timezone(+09:00)),
# use astimezone to preserve the instant, then convert to ZonedDateTime.
return ZonedDateTime.from_py_datetime(
dt.astimezone(ZoneInfo(target_timezone))
)
anchor_zdt = to_local_zdt(self.anchor_date)
assert anchor_zdt is not None
local_start = to_local_zdt(start)
assert local_start is not None
local_end = to_local_zdt(end)
offset = (
local_start - anchor_zdt
).in_seconds() / self.interval.total_seconds()
next_date = anchor_zdt.add(
seconds=self.interval.total_seconds() * int(offset)
)
# break the interval into `days` and `seconds` because the datetime
# library will handle DST boundaries properly if days are provided, but not
# if we add `total seconds`. Therefore, `next_date + self.interval`
# fails while `next_date.add(days=days, seconds=seconds)` works.
interval_days = self.interval.days
interval_seconds = self.interval.total_seconds() - (
interval_days * 24 * 60 * 60
)
while next_date < local_start:
next_date = next_date.add(days=interval_days, seconds=interval_seconds)
counter = 0
dates: set[ZonedDateTime] = set()
while True:
# if the end date was exceeded, exit
if local_end and next_date > local_end:
break
# ensure no duplicates; weird things can happen with DST
if next_date not in dates:
dates.add(next_date)
yield next_date.py_datetime()
# if enough dates have been collected or enough attempts were made, exit
if len(dates) >= n or counter > MAX_ITERATIONS:
break
counter += 1
next_date = next_date.add(days=interval_days, seconds=interval_seconds)
else:
if start is None:
start = now("UTC")
anchor_tz = self.anchor_date.in_tz(self.timezone)
start, end = _prepare_scheduling_start_and_end(start, end, self.timezone)
# compute the offset between the anchor date and the start date to jump to the
# next date
offset = (start - anchor_tz).total_seconds() / self.interval.total_seconds()
next_date = anchor_tz.add(
seconds=self.interval.total_seconds() * int(offset)
)
# break the interval into `days` and `seconds` because the datetime
# library will handle DST boundaries properly if days are provided, but not
# if we add `total seconds`. Therefore, `next_date + self.interval`
# fails while `next_date.add(days=days, seconds=seconds)` works.
interval_days = self.interval.days
interval_seconds = self.interval.total_seconds() - (
interval_days * 24 * 60 * 60
)
# daylight saving time boundaries can create a situation where the next date is
# before the start date, so we advance it if necessary
while next_date < start:
next_date = next_date.add(days=interval_days, seconds=interval_seconds)
counter = 0
dates = set()
while True:
# if the end date was exceeded, exit
if end and next_date > end:
break
# ensure no duplicates; weird things can happen with DST
if next_date not in dates:
dates.add(next_date)
yield next_date
# if enough dates have been collected or enough attempts were made, exit
if len(dates) >= n or counter > MAX_ITERATIONS:
break
counter += 1
next_date = next_date.add(days=interval_days, seconds=interval_seconds)
|
IntervalSchedule
|
python
|
getsentry__sentry
|
src/sentry/sentry_metrics/querying/visitors/base.py
|
{
"start": 1932,
"end": 3226
}
|
class ____(ABC, Generic[TVisited]):
"""
Abstract visitor that defines a visiting behavior of a `QueryCondition`.
"""
def visit_group(self, condition_group: ConditionGroup) -> ConditionGroup:
if not condition_group:
return condition_group
visited_conditions = []
for condition in condition_group:
visited_conditions.append(self.visit(condition))
return visited_conditions
def visit(self, query_condition: QueryCondition) -> TVisited:
if isinstance(query_condition, BooleanCondition):
return self._visit_boolean_condition(query_condition)
elif isinstance(query_condition, Condition):
return self._visit_condition(query_condition)
raise AssertionError(
f"Unhandled query condition {query_condition} of type {type(query_condition)}"
)
def _visit_boolean_condition(self, boolean_condition: BooleanCondition) -> TVisited:
conditions = []
for condition in boolean_condition.conditions:
conditions.append(self.visit(condition))
return BooleanCondition(op=boolean_condition.op, conditions=conditions)
def _visit_condition(self, condition: Condition) -> TVisited:
raise condition
|
QueryConditionVisitor
|
python
|
facebook__pyre-check
|
tools/incremental_test/specification.py
|
{
"start": 10456,
"end": 11508
}
|
class ____(SingleUpdate):
changes: Dict[str, str]
removals: List[str]
def update(self, environment: Environment, working_directory: Path) -> None:
for handle, content in self.changes.items():
# Need to create parent directory if it doesn't exist
parent_path = Path(handle).parent
if not parent_path == Path("."):
environment.checked_run(
working_directory=working_directory,
command=f"mkdir -p {parent_path}",
)
environment.checked_run(
working_directory=working_directory,
command=f"tee {handle}",
stdin=content,
)
for handle in self.removals:
environment.checked_run(
working_directory=working_directory, command=f"rm -f {handle}"
)
def to_json(self) -> Dict[str, Any]:
return {"kind": "file", "changes": self.changes, "removals": self.removals}
@dataclass(frozen=True)
|
FileRepositoryUpdate
|
python
|
Pylons__pyramid
|
src/pyramid/httpexceptions.py
|
{
"start": 28507,
"end": 28928
}
|
class ____(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the server is refusing to service the request
because the Request-URI is longer than the server is willing to
interpret.
code: 414, title: Request-URI Too Long
"""
code = 414
title = 'Request-URI Too Long'
explanation = 'The request URI was too long for this server.'
|
HTTPRequestURITooLong
|
python
|
weaviate__weaviate-python-client
|
weaviate/gql/filter.py
|
{
"start": 1747,
"end": 2426
}
|
class ____(ABC):
"""A base abstract class for all filters."""
def __init__(self, content: dict):
"""Initialize a Filter class instance.
Args:
content: The content of the `Filter` clause.
"""
if not isinstance(content, dict):
raise TypeError(
f"{self.__class__.__name__} filter is expected to "
f"be type dict but is {type(content)}"
)
self._content = deepcopy(content)
@abstractmethod
def __str__(self) -> str:
"""Should be implemented in each inheriting class."""
@property
def content(self) -> dict:
return self._content
|
Filter
|
python
|
tensorflow__tensorflow
|
third_party/xla/xla/python/xla_client.py
|
{
"start": 3728,
"end": 5555
}
|
class ____:
"""Python representation of a xla.OpMetadata protobuf."""
__slots__ = ('op_type', 'op_name', 'source_file', 'source_line',
'source_end_line', 'source_column', 'source_end_column')
def __init__(self, op_type='', op_name='', source_file='', source_line=0,
source_end_line=0, source_column=0, source_end_column=0):
self.op_type = op_type
self.op_name = op_name
self.source_file = source_file
self.source_line = source_line
self.source_end_line = source_end_line
self.source_column = source_column
self.source_end_column = source_end_column
def current_source_info_metadata(op_type=None, op_name=None, skip_frames=1):
"""Helper for use in source mapping that returns an OpMetadata object."""
frame = inspect.stack()[skip_frames]
filename = os.path.basename(frame.filename)
if hasattr(frame, 'positions'):
lineno, end_lineno, column, end_column = frame.positions
return OpMetadata(op_type=op_type, op_name=op_name, source_file=filename,
source_line=lineno, source_end_line=end_lineno,
source_column=column, source_end_column=end_column)
else:
return OpMetadata(op_type=op_type, op_name=op_name, source_file=filename,
source_line=frame.lineno)
def shape_from_pyval(pyval, layout: Sequence[int] | None = None):
"""Returns a Shape that describes a tuple-tree of Numpy arrays."""
def convert(pyval):
if isinstance(pyval, tuple):
if layout is not None:
raise NotImplementedError(
'shape_from_pyval does not support layouts for tuple shapes'
)
return Shape.tuple_shape(tuple(convert(elt) for elt in pyval))
else:
return Shape.array_shape(pyval.dtype, np.shape(pyval), layout)
return convert(pyval)
|
OpMetadata
|
python
|
PrefectHQ__prefect
|
src/prefect/blocks/notifications.py
|
{
"start": 631,
"end": 2482
}
|
class ____(NotificationBlock, ABC):
"""
An abstract class for sending notifications using Apprise.
"""
notify_type: Literal["info", "success", "warning", "failure"] = Field(
default=PREFECT_NOTIFY_TYPE_DEFAULT,
description="The type of notification being performed.",
)
def __init__(self, *args: Any, **kwargs: Any):
super().__init__(*args, **kwargs)
def _start_apprise_client(self, url: SecretStr):
from apprise import Apprise, AppriseAsset
# A custom `AppriseAsset` that ensures Prefect Notifications
# appear correctly across multiple messaging platforms
prefect_app_data = AppriseAsset(
app_id="Prefect Notifications",
app_desc="Prefect Notifications",
app_url="https://prefect.io",
)
self._apprise_client = Apprise(asset=prefect_app_data)
self._apprise_client.add(servers=url.get_secret_value()) # pyright: ignore[reportUnknownMemberType]
def block_initialization(self) -> None:
self._start_apprise_client(getattr(self, "url"))
@sync_compatible
async def notify( # pyright: ignore[reportIncompatibleMethodOverride] TODO: update to sync only once base class is updated
self,
body: str,
subject: str | None = None,
) -> None:
with LogEavesdropper("apprise", level=logging.DEBUG) as eavesdropper:
result = await self._apprise_client.async_notify( # pyright: ignore[reportUnknownMemberType] incomplete type hints in apprise
body=body,
title=subject or "",
notify_type=self.notify_type, # pyright: ignore[reportArgumentType]
)
if not result and self._raise_on_failure:
raise NotificationError(log=eavesdropper.text())
|
AbstractAppriseNotificationBlock
|
python
|
keras-team__keras
|
keras/src/ops/image.py
|
{
"start": 2714,
"end": 5272
}
|
class ____(Operation):
def __init__(self, data_format=None, *, name=None):
super().__init__(name=name)
self.data_format = backend.standardize_data_format(data_format)
def call(self, images):
return backend.image.rgb_to_hsv(images, data_format=self.data_format)
def compute_output_spec(self, images):
images_shape = list(images.shape)
dtype = images.dtype
if len(images_shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). "
f"Received: images.shape={images_shape}"
)
if not backend.is_float_dtype(dtype):
raise ValueError(
"Invalid images dtype: expected float dtype. "
f"Received: images.dtype={dtype}"
)
return KerasTensor(shape=images_shape, dtype=images.dtype)
@keras_export("keras.ops.image.rgb_to_hsv")
def rgb_to_hsv(images, data_format=None):
"""Convert RGB images to HSV.
`images` must be of float dtype, and the output is only well defined if the
values in `images` are in `[0, 1]`.
All HSV values are in `[0, 1]`. A hue of `0` corresponds to pure red, `1/3`
is pure green, and `2/3` is pure blue.
Args:
images: Input image or batch of images. Must be 3D or 4D.
data_format: A string specifying the data format of the input tensor.
It can be either `"channels_last"` or `"channels_first"`.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)`, while `"channels_first"`
corresponds to inputs with shape `(batch, channels, height, width)`.
If not specified, the value will default to
`keras.config.image_data_format`.
Returns:
HSV image or batch of HSV images.
Examples:
>>> import numpy as np
>>> from keras import ops
>>> x = np.random.random((2, 4, 4, 3))
>>> y = ops.image.rgb_to_hsv(x)
>>> y.shape
(2, 4, 4, 3)
>>> x = np.random.random((4, 4, 3)) # Single RGB image
>>> y = ops.image.rgb_to_hsv(x)
>>> y.shape
(4, 4, 3)
>>> x = np.random.random((2, 3, 4, 4))
>>> y = ops.image.rgb_to_hsv(x, data_format="channels_first")
>>> y.shape
(2, 3, 4, 4)
"""
if any_symbolic_tensors((images,)):
return RGBToHSV(data_format=data_format).symbolic_call(images)
return backend.image.rgb_to_hsv(images, data_format=data_format)
|
RGBToHSV
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-marketo/source_marketo/source.py
|
{
"start": 17074,
"end": 18266
}
|
class ____(Oauth2Authenticator):
def __init__(self, config):
super().__init__(
token_refresh_endpoint=f"{config['domain_url']}/identity/oauth/token",
client_id=config["client_id"],
client_secret=config["client_secret"],
refresh_token=None,
)
def get_refresh_request_params(self) -> Mapping[str, Any]:
payload: MutableMapping[str, Any] = {
"grant_type": "client_credentials",
"client_id": self.get_client_id(),
"client_secret": self.get_client_secret(),
}
return payload
def refresh_access_token(self) -> Tuple[str, int]:
"""
Returns a tuple of (access_token, token_lifespan_in_seconds)
"""
try:
response = requests.request(method="GET", url=self.get_token_refresh_endpoint(), params=self.get_refresh_request_params())
response.raise_for_status()
response_json = response.json()
return response_json["access_token"], response_json["expires_in"]
except Exception as e:
raise Exception(f"Error while refreshing access token: {e}") from e
|
MarketoAuthenticator
|
python
|
PyCQA__pylint
|
tests/functional/e/e1101_9588_base_attr_aug_assign.py
|
{
"start": 355,
"end": 682
}
|
class ____(BaseClass):
"The first derived class which triggers the false positive"
def __init__(self):
"Augmented assignment triggers E1101."
BaseClass.__init__(self)
self.e1101 += 1
def countup(self):
"Consequently this also triggers E1101."
self.e1101 += 1
|
FalsePositiveClass
|
python
|
wandb__wandb
|
gpu_stats/hatch.py
|
{
"start": 124,
"end": 2481
}
|
class ____(Exception):
"""Raised when building GPU stats service fails."""
def build_gpu_stats(
cargo_binary: pathlib.Path,
output_path: pathlib.Path,
) -> None:
"""Builds the `gpu_stats` Rust binary for monitoring NVIDIA and Apple ARM GPUs.
NOTE: Cargo creates a cache under `./target/release` which speeds up subsequent builds,
but may grow large over time and/or cause issues when changing the commands here.
If you're running into problems, try deleting `./target`.
Args:
cargo_binary: Path to the Cargo binary, which must exist.
output_path: The path where to output the binary, relative to the
workspace root.
"""
rust_pkg_root = pathlib.Path("./gpu_stats")
cmd = (
str(cargo_binary),
"build",
"--release",
"--message-format=json",
"--bin",
"gpu_stats",
)
try:
cargo_output = subprocess.check_output(cmd, cwd=rust_pkg_root)
except subprocess.CalledProcessError as e:
raise GpuStatsBuildError(
"Failed to build the `gpu_stats` Rust binary. If you didn't"
" break the build, you may need to install Rust; see"
" https://www.rust-lang.org/tools/install."
"\n\n"
"As a workaround, you can set the WANDB_BUILD_SKIP_GPU_STATS"
" environment variable to true to skip this step and build a wandb"
" package that doesn't collect NVIDIA and Apple ARM GPU stats."
) from e
built_binary_path = _get_executable_path(cargo_output)
output_path.parent.mkdir(parents=True, exist_ok=True)
built_binary_path.replace(output_path)
output_path.chmod(0o755)
def _get_executable_path(cargo_output: bytes) -> pathlib.Path:
"""Returns the path to the gpu_stats binary.
Args:
cargo_output: The output from `cargo build` with
--message-format="json".
Returns:
The path to the binary.
Raises:
GpuStatsBuildError: if the path could not be determined.
"""
for line in cargo_output.splitlines():
path = json.loads(line).get("executable")
if path:
return pathlib.Path(path)
raise GpuStatsBuildError(
"Failed to find the `gpu_stats` binary. `cargo build` output:\n"
+ str(cargo_output),
)
|
GpuStatsBuildError
|
python
|
pennersr__django-allauth
|
tests/apps/socialaccount/providers/feedly/tests.py
|
{
"start": 240,
"end": 902
}
|
class ____(OAuth2TestsMixin, TestCase):
provider_id = FeedlyProvider.id
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""
{
"id": "c805fcbf-3acf-4302-a97e-d82f9d7c897f",
"email": "jim.smith@example.com",
"givenName": "Jim",
"familyName": "Smith",
"picture": "https://www.google.com/profile_images/1771656873/bigger.jpg",
"gender": "male",
"locale": "en",
"reader": "9080770707070700",
"google": "115562565652656565656",
"twitter": "jimsmith",
"facebook": "",
"wave": "2013.7"
}""",
)
def get_expected_to_str(self):
return "jim.smith@example.com"
|
FeedlyTests
|
python
|
airbytehq__airbyte
|
airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_inline_schemas/pipeline.py
|
{
"start": 978,
"end": 3092
}
|
class ____(Step):
"""Check if the connector is a candidate to get inline schemas.
Candidate conditions:
- The connector is a Python connector.
- The connector is a source connector.
- The connector has a manifest file.
- The connector has schemas directory.
"""
context: ConnectorContext
title = "Check if the connector is a candidate for inline schema migration."
def __init__(self, context: PipelineContext) -> None:
super().__init__(context)
async def _run(self) -> StepResult:
connector = self.context.connector
manifest_path = connector.manifest_path
python_path = connector.python_source_dir_path
if connector.language not in [
ConnectorLanguage.PYTHON,
ConnectorLanguage.LOW_CODE,
ConnectorLanguage.MANIFEST_ONLY,
]:
return StepResult(
step=self,
status=StepStatus.SKIPPED,
stderr="The connector is not a Python connector.",
)
if connector.connector_type != "source":
return StepResult(
step=self,
status=StepStatus.SKIPPED,
stderr="The connector is not a source connector.",
)
if not manifest_path.is_file():
return StepResult(
step=self,
status=StepStatus.SKIPPED,
stderr="The connector does not have a manifest file.",
)
schemas_dir = python_path / SCHEMAS_DIR_NAME
if not schemas_dir.is_dir():
return StepResult(
step=self,
status=StepStatus.SKIPPED,
stderr="The connector does not have a schemas directory.",
)
# TODO: does this help or not?
# if _has_subdirectory(schemas_dir):
# return StepResult(step=self, status=StepStatus.SKIPPED, stderr="This has subdirectories. It's probably complicated.")
return StepResult(
step=self,
status=StepStatus.SUCCESS,
)
|
CheckIsInlineCandidate
|
python
|
sqlalchemy__sqlalchemy
|
test/ext/test_associationproxy.py
|
{
"start": 112175,
"end": 115727
}
|
class ____(fixtures.DeclarativeMappedTest):
# test some GC scenarios, including issue #4268
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
data = Column(String(50))
bs = relationship("B")
b_dyn = relationship("B", lazy="dynamic", viewonly=True)
b_data = association_proxy("bs", "data")
b_dynamic_data = association_proxy("bs", "data")
class B(Base):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
aid = Column(ForeignKey("a.id"))
data = Column(String(50))
@classmethod
def insert_data(cls, connection):
A, B = cls.classes("A", "B")
s = Session(connection)
s.add_all(
[
A(id=1, bs=[B(data="b1"), B(data="b2")]),
A(id=2, bs=[B(data="b3"), B(data="b4")]),
]
)
s.commit()
s.close()
def test_plain_collection_gc(self):
A, B = self.classes("A", "B")
s = Session(testing.db)
a1 = s.query(A).filter_by(id=1).one()
a1bs = a1.bs # noqa
del a1
gc_collect()
assert (A, (1,), None) not in s.identity_map
@testing.fails("dynamic relationship strong references parent")
def test_dynamic_collection_gc(self):
A, B = self.classes("A", "B")
s = Session(testing.db)
a1 = s.query(A).filter_by(id=1).one()
a1bs = a1.b_dyn # noqa
del a1
gc_collect()
# also fails, AppenderQuery holds onto parent
assert (A, (1,), None) not in s.identity_map
@testing.fails("association proxy strong references parent")
def test_associated_collection_gc(self):
A, B = self.classes("A", "B")
s = Session(testing.db)
a1 = s.query(A).filter_by(id=1).one()
a1bs = a1.b_data # noqa
del a1
gc_collect()
assert (A, (1,), None) not in s.identity_map
@testing.fails("association proxy strong references parent")
def test_associated_dynamic_gc(self):
A, B = self.classes("A", "B")
s = Session(testing.db)
a1 = s.query(A).filter_by(id=1).one()
a1bs = a1.b_dynamic_data # noqa
del a1
gc_collect()
assert (A, (1,), None) not in s.identity_map
def test_plain_collection_iterate(self):
A, B = self.classes("A", "B")
s = Session(testing.db)
a1 = s.query(A).filter_by(id=1).one()
a1bs = a1.bs
del a1
gc_collect()
assert len(a1bs) == 2
def test_dynamic_collection_iterate(self):
A, B = self.classes("A", "B")
s = Session(testing.db)
a1 = s.query(A).filter_by(id=1).one()
a1bs = a1.b_dyn # noqa
del a1
gc_collect()
assert len(list(a1bs)) == 2
def test_associated_collection_iterate(self):
A, B = self.classes("A", "B")
s = Session(testing.db)
a1 = s.query(A).filter_by(id=1).one()
a1bs = a1.b_data
del a1
gc_collect()
assert len(a1bs) == 2
def test_associated_dynamic_iterate(self):
A, B = self.classes("A", "B")
s = Session(testing.db)
a1 = s.query(A).filter_by(id=1).one()
a1bs = a1.b_dynamic_data
del a1
gc_collect()
assert len(a1bs) == 2
|
ScopeBehaviorTest
|
python
|
python-excel__xlwt
|
xlwt/antlr.py
|
{
"start": 17477,
"end": 19564
}
|
class ____(object):
SKIP = -1
INVALID_TYPE = 0
EOF_TYPE = 1
EOF = 1
NULL_TREE_LOOKAHEAD = 3
MIN_USER_TYPE = 4
def __init__(self,**argv):
try:
self.type = argv['type']
except:
self.type = INVALID_TYPE
try:
self.text = argv['text']
except:
self.text = "<no text>"
def isEOF(self):
return (self.type == EOF_TYPE)
def getColumn(self):
return 0
def getLine(self):
return 0
def getFilename(self):
return None
def setFilename(self,name):
return self
def getText(self):
return "<no text>"
def setText(self,text):
if is_string_type(text):
pass
else:
raise TypeError("Token.setText requires string argument")
return self
def setColumn(self,column):
return self
def setLine(self,line):
return self
def getType(self):
return self.type
def setType(self,type):
if isinstance(type,int):
self.type = type
else:
raise TypeError("Token.setType requires integer argument")
return self
def toString(self):
## not optimal
type_ = self.type
if type_ == 3:
tval = 'NULL_TREE_LOOKAHEAD'
elif type_ == 1:
tval = 'EOF_TYPE'
elif type_ == 0:
tval = 'INVALID_TYPE'
elif type_ == -1:
tval = 'SKIP'
else:
tval = type_
return '["%s",<%s>]' % (self.getText(),tval)
__str__ = toString
__repr__ = toString
### static attribute ..
Token.badToken = Token( type=INVALID_TYPE, text="<no text>")
if __name__ == "__main__":
print("testing ..")
T = Token.badToken
print(T)
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### CommonToken ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
|
Token
|
python
|
networkx__networkx
|
benchmarks/benchmarks/benchmark_harmonic_centrality.py
|
{
"start": 74,
"end": 1180
}
|
class ____:
timeout = 120
nodes = [10, 100, 1000]
params = [f"wheel_graph({i})" for i in nodes] + [
f"directed_wheel({i})" for i in nodes
]
param_names = ["graph"]
def setup(self, graph):
def directed_wheel(n):
# bidirectional edges on the rim with directed edges to the central node
G = nx.DiGraph(nx.cycle_graph(range(1, n)))
G.add_node(0)
G.add_edges_from((0, i) for i in range(1, n))
return G
self.graphs_dict = {}
for n in self.nodes:
self.graphs_dict[f"wheel_graph({n})"] = nx.wheel_graph(n)
self.graphs_dict[f"directed_wheel({n})"] = directed_wheel(n)
def time_harmonic_centrality(self, graph):
_ = nx.harmonic_centrality(self.graphs_dict[graph])
def time_harmonic_centrality_single_node(self, graph):
_ = nx.harmonic_centrality(self.graphs_dict[graph], nbunch=[0])
def time_harmonic_centrality_node_subset(self, graph):
_ = nx.harmonic_centrality(self.graphs_dict[graph], nbunch=[0, 1, 2, 3])
|
HarmonicCentralityBenchmarks
|
python
|
scrapy__scrapy
|
scrapy/downloadermiddlewares/robotstxt.py
|
{
"start": 877,
"end": 5147
}
|
class ____:
DOWNLOAD_PRIORITY: int = 1000
def __init__(self, crawler: Crawler):
if not crawler.settings.getbool("ROBOTSTXT_OBEY"):
raise NotConfigured
self._default_useragent: str = crawler.settings["USER_AGENT"]
self._robotstxt_useragent: str | None = crawler.settings["ROBOTSTXT_USER_AGENT"]
self.crawler: Crawler = crawler
self._parsers: dict[str, RobotParser | Deferred[RobotParser | None] | None] = {}
self._parserimpl: RobotParser = load_object(
crawler.settings.get("ROBOTSTXT_PARSER")
)
# check if parser dependencies are met, this should throw an error otherwise.
self._parserimpl.from_crawler(self.crawler, b"")
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
return cls(crawler)
@_warn_spider_arg
async def process_request(
self, request: Request, spider: Spider | None = None
) -> None:
if request.meta.get("dont_obey_robotstxt"):
return
if request.url.startswith("data:") or request.url.startswith("file:"):
return
rp = await self.robot_parser(request)
self.process_request_2(rp, request)
def process_request_2(self, rp: RobotParser | None, request: Request) -> None:
if rp is None:
return
useragent: str | bytes | None = self._robotstxt_useragent
if not useragent:
useragent = request.headers.get(b"User-Agent", self._default_useragent)
assert useragent is not None
if not rp.allowed(request.url, useragent):
logger.debug(
"Forbidden by robots.txt: %(request)s",
{"request": request},
extra={"spider": self.crawler.spider},
)
assert self.crawler.stats
self.crawler.stats.inc_value("robotstxt/forbidden")
raise IgnoreRequest("Forbidden by robots.txt")
async def robot_parser(self, request: Request) -> RobotParser | None:
url = urlparse_cached(request)
netloc = url.netloc
if netloc not in self._parsers:
self._parsers[netloc] = Deferred()
robotsurl = f"{url.scheme}://{url.netloc}/robots.txt"
robotsreq = Request(
robotsurl,
priority=self.DOWNLOAD_PRIORITY,
meta={"dont_obey_robotstxt": True},
callback=NO_CALLBACK,
)
assert self.crawler.engine
assert self.crawler.stats
try:
resp = await self.crawler.engine.download_async(robotsreq)
self._parse_robots(resp, netloc)
except Exception as e:
if not isinstance(e, IgnoreRequest):
logger.error(
"Error downloading %(request)s: %(f_exception)s",
{"request": request, "f_exception": e},
exc_info=True,
extra={"spider": self.crawler.spider},
)
self._robots_error(e, netloc)
self.crawler.stats.inc_value("robotstxt/request_count")
parser = self._parsers[netloc]
if isinstance(parser, Deferred):
return await maybe_deferred_to_future(parser)
return parser
def _parse_robots(self, response: Response, netloc: str) -> None:
assert self.crawler.stats
self.crawler.stats.inc_value("robotstxt/response_count")
self.crawler.stats.inc_value(
f"robotstxt/response_status_count/{response.status}"
)
rp = self._parserimpl.from_crawler(self.crawler, response.body)
rp_dfd = self._parsers[netloc]
assert isinstance(rp_dfd, Deferred)
self._parsers[netloc] = rp
rp_dfd.callback(rp)
def _robots_error(self, exc: Exception, netloc: str) -> None:
if not isinstance(exc, IgnoreRequest):
key = f"robotstxt/exception_count/{type(exc)}"
assert self.crawler.stats
self.crawler.stats.inc_value(key)
rp_dfd = self._parsers[netloc]
assert isinstance(rp_dfd, Deferred)
self._parsers[netloc] = None
rp_dfd.callback(None)
|
RobotsTxtMiddleware
|
python
|
pandas-dev__pandas
|
pandas/tests/frame/test_alter_axes.py
|
{
"start": 114,
"end": 887
}
|
class ____:
# Tests for setting index/columns attributes directly (i.e. __setattr__)
def test_set_axis_setattr_index(self):
# GH 6785
# set the index manually
df = DataFrame([{"ts": datetime(2014, 4, 1, tzinfo=timezone.utc), "foo": 1}])
expected = df.set_index("ts")
df.index = df["ts"]
df.pop("ts")
tm.assert_frame_equal(df, expected)
# Renaming
def test_assign_columns(self, float_frame):
float_frame["hi"] = "there"
df = float_frame.copy()
df.columns = ["foo", "bar", "baz", "quux", "foo2"]
tm.assert_series_equal(float_frame["C"], df["baz"], check_names=False)
tm.assert_series_equal(float_frame["hi"], df["foo2"], check_names=False)
|
TestDataFrameAlterAxes
|
python
|
eventlet__eventlet
|
tests/wsgi_test.py
|
{
"start": 3980,
"end": 4327
}
|
class ____(Site):
def __call__(self, env, start_response):
it = self.application(env, start_response)
yield from it
CONTENT_LENGTH = 'content-length'
def recvall(sock):
result = b''
while True:
chunk = sock.recv(16 << 10)
if chunk == b'':
return result
result += chunk
|
IterableSite
|
python
|
spack__spack
|
lib/spack/spack/binary_distribution.py
|
{
"start": 96171,
"end": 98993
}
|
class ____(IndexFetcher):
"""Fetcher for index.json, using separate index.json.hash as cache invalidation strategy"""
def __init__(self, url, local_hash, urlopen=web_util.urlopen):
self.url = url
self.local_hash = local_hash
self.urlopen = urlopen
self.headers = {"User-Agent": web_util.SPACK_USER_AGENT}
def get_remote_hash(self):
# Failure to fetch index.json.hash is not fatal
url_index_hash = url_util.join(self.url, "build_cache", "index.json.hash")
try:
response = self.urlopen(urllib.request.Request(url_index_hash, headers=self.headers))
remote_hash = response.read(64)
except OSError:
return None
# Validate the hash
if not re.match(rb"[a-f\d]{64}$", remote_hash):
return None
return remote_hash.decode("utf-8")
def conditional_fetch(self) -> FetchIndexResult:
# Do an intermediate fetch for the hash
# and a conditional fetch for the contents
# Early exit if our cache is up to date.
if self.local_hash and self.local_hash == self.get_remote_hash():
return FetchIndexResult(etag=None, hash=None, data=None, fresh=True)
# Otherwise, download index.json
url_index = url_util.join(self.url, "build_cache", spack.database.INDEX_JSON_FILE)
try:
response = self.urlopen(urllib.request.Request(url_index, headers=self.headers))
except OSError as e:
raise FetchIndexError(f"Could not fetch index from {url_index}", e) from e
try:
result = codecs.getreader("utf-8")(response).read()
except (ValueError, OSError) as e:
raise FetchIndexError(f"Remote index {url_index} is invalid") from e
computed_hash = compute_hash(result)
# We don't handle computed_hash != remote_hash here, which can happen
# when remote index.json and index.json.hash are out of sync, or if
# the hash algorithm changed.
# The most likely scenario is that we got index.json got updated
# while we fetched index.json.hash. Warning about an issue thus feels
# wrong, as it's more of an issue with race conditions in the cache
# invalidation strategy.
# For now we only handle etags on http(s), since 304 error handling
# in s3:// is not there yet.
if urllib.parse.urlparse(self.url).scheme not in ("http", "https"):
etag = None
else:
etag = web_util.parse_etag(
response.headers.get("Etag", None) or response.headers.get("etag", None)
)
warn_v2_layout(self.url, "Fetching an index")
return FetchIndexResult(etag=etag, hash=computed_hash, data=result, fresh=False)
|
DefaultIndexFetcherV2
|
python
|
pandas-dev__pandas
|
pandas/tests/indexes/test_base.py
|
{
"start": 47749,
"end": 54284
}
|
class ____:
# Mostly the tests from common.py for which the results differ
# in py2 and py3 because ints and strings are uncomparable in py3
# (GH 13514)
@pytest.fixture
def simple_index(self) -> Index:
return Index([0, "a", 1, "b", 2, "c"])
def test_argsort(self, simple_index):
index = simple_index
with pytest.raises(TypeError, match="'>|<' not supported"):
index.argsort()
def test_numpy_argsort(self, simple_index):
index = simple_index
with pytest.raises(TypeError, match="'>|<' not supported"):
np.argsort(index)
def test_copy_name(self, simple_index):
# Check that "name" argument passed at initialization is honoured
# GH12309
index = simple_index
first = type(index)(index, copy=True, name="mario")
second = type(first)(first, copy=False)
# Even though "copy=False", we want a new object.
assert first is not second
tm.assert_index_equal(first, second)
assert first.name == "mario"
assert second.name == "mario"
s1 = Series(2, index=first)
s2 = Series(3, index=second[:-1])
s3 = s1 * s2
assert s3.index.name == "mario"
def test_copy_name2(self):
# Check that adding a "name" parameter to the copy is honored
# GH14302
index = Index([1, 2], name="MyName")
index1 = index.copy()
tm.assert_index_equal(index, index1)
index2 = index.copy(name="NewName")
tm.assert_index_equal(index, index2, check_names=False)
assert index.name == "MyName"
assert index2.name == "NewName"
def test_unique_na(self):
idx = Index([2, np.nan, 2, 1], name="my_index")
expected = Index([2, np.nan, 1], name="my_index")
result = idx.unique()
tm.assert_index_equal(result, expected)
def test_logical_compat(self, simple_index):
index = simple_index
assert index.all() == index.values.all()
assert index.any() == index.values.any()
@pytest.mark.parametrize("how", ["any", "all"])
@pytest.mark.parametrize("dtype", [None, object, "category"])
@pytest.mark.parametrize(
"vals,expected",
[
([1, 2, 3], [1, 2, 3]),
([1.0, 2.0, 3.0], [1.0, 2.0, 3.0]),
([1.0, 2.0, np.nan, 3.0], [1.0, 2.0, 3.0]),
(["A", "B", "C"], ["A", "B", "C"]),
(["A", np.nan, "B", "C"], ["A", "B", "C"]),
],
)
def test_dropna(self, how, dtype, vals, expected):
# GH 6194
index = Index(vals, dtype=dtype)
result = index.dropna(how=how)
expected = Index(expected, dtype=dtype)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("how", ["any", "all"])
@pytest.mark.parametrize(
"index,expected",
[
(
DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"]),
DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"]),
),
(
DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03", pd.NaT]),
DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"]),
),
(
TimedeltaIndex(["1 days", "2 days", "3 days"]),
TimedeltaIndex(["1 days", "2 days", "3 days"]),
),
(
TimedeltaIndex([pd.NaT, "1 days", "2 days", "3 days", pd.NaT]),
TimedeltaIndex(["1 days", "2 days", "3 days"]),
),
(
PeriodIndex(["2012-02", "2012-04", "2012-05"], freq="M"),
PeriodIndex(["2012-02", "2012-04", "2012-05"], freq="M"),
),
(
PeriodIndex(["2012-02", "2012-04", "NaT", "2012-05"], freq="M"),
PeriodIndex(["2012-02", "2012-04", "2012-05"], freq="M"),
),
],
)
def test_dropna_dt_like(self, how, index, expected):
result = index.dropna(how=how)
tm.assert_index_equal(result, expected)
def test_dropna_invalid_how_raises(self):
msg = "invalid how option: xxx"
with pytest.raises(ValueError, match=msg):
Index([1, 2, 3]).dropna(how="xxx")
@pytest.mark.parametrize(
"index",
[
Index([np.nan]),
Index([np.nan, 1]),
Index([1, 2, np.nan]),
Index(["a", "b", np.nan]),
pd.to_datetime(["NaT"]),
pd.to_datetime(["NaT", "2000-01-01"]),
pd.to_datetime(["2000-01-01", "NaT", "2000-01-02"]),
pd.to_timedelta(["1 day", "NaT"]),
],
)
def test_is_monotonic_na(self, index):
assert index.is_monotonic_increasing is False
assert index.is_monotonic_decreasing is False
assert index._is_strictly_monotonic_increasing is False
assert index._is_strictly_monotonic_decreasing is False
@pytest.mark.parametrize("dtype", ["f8", "m8[ns]", "M8[us]"])
@pytest.mark.parametrize("unique_first", [True, False])
def test_is_monotonic_unique_na(self, dtype, unique_first):
# GH 55755
index = Index([None, 1, 1], dtype=dtype)
if unique_first:
assert index.is_unique is False
assert index.is_monotonic_increasing is False
assert index.is_monotonic_decreasing is False
else:
assert index.is_monotonic_increasing is False
assert index.is_monotonic_decreasing is False
assert index.is_unique is False
def test_int_name_format(self, frame_or_series):
index = Index(["a", "b", "c"], name=0)
result = frame_or_series(list(range(3)), index=index)
assert "0" in repr(result)
def test_str_to_bytes_raises(self):
# GH 26447
index = Index([str(x) for x in range(10)])
msg = "^'str' object cannot be interpreted as an integer$"
with pytest.raises(TypeError, match=msg):
bytes(index)
@pytest.mark.filterwarnings("ignore:elementwise comparison failed:FutureWarning")
def test_index_with_tuple_bool(self):
# GH34123
# TODO: also this op right now produces FutureWarning from numpy
# https://github.com/numpy/numpy/issues/11521
idx = Index([("a", "b"), ("b", "c"), ("c", "a")])
result = idx == ("c", "a")
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
|
TestMixedIntIndex
|
python
|
ethereum__web3.py
|
tests/integration/go_ethereum/test_goethereum_http.py
|
{
"start": 5096,
"end": 5177
}
|
class ____(GoEthereumAsyncNetModuleTest):
pass
|
TestGoEthereumAsyncNetModuleTest
|
python
|
huggingface__transformers
|
src/transformers/models/dia/modeling_dia.py
|
{
"start": 11684,
"end": 14783
}
|
class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Union[DiaEncoderConfig, DiaDecoderConfig], layer_idx: int, is_causal: bool = False):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.hidden_size = config.hidden_size
self.num_heads = self.config.num_attention_heads
self.num_key_value_heads = self.config.num_key_value_heads or self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.head_dim = getattr(config, "head_dim", config.hidden_size // self.num_heads)
self.scaling = 1
self.attention_dropout = 0.0
self.is_causal = is_causal
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
|
DiaSelfAttention
|
python
|
Lightning-AI__lightning
|
tests/tests_pytorch/models/test_hooks.py
|
{
"start": 1145,
"end": 7462
}
|
class ____(BoringDataModule):
def __init__(self, called):
super().__init__()
def call(hook, fn, *args, **kwargs):
out = fn(*args, **kwargs)
d = {"name": hook}
if args:
d["args"] = args
if kwargs:
d["kwargs"] = kwargs
called.append(d)
return out
for h in get_members(LightningDataModule):
attr = getattr(self, h)
partial_h = partial(call, h, attr)
update_wrapper(partial_h, attr)
setattr(self, h, partial_h)
# override so that it gets called
def prepare_data(self): ...
@pytest.mark.parametrize("max_steps", [1, 2, 3])
def test_on_before_zero_grad_called(tmp_path, max_steps):
class CurrentTestModel(BoringModel):
on_before_zero_grad_called = 0
def on_before_zero_grad(self, optimizer):
self.on_before_zero_grad_called += 1
model = CurrentTestModel()
trainer = Trainer(devices=1, default_root_dir=tmp_path, max_steps=max_steps, max_epochs=2)
assert model.on_before_zero_grad_called == 0
trainer.fit(model)
assert max_steps == model.on_before_zero_grad_called
model.on_before_zero_grad_called = 0
trainer.test(model)
assert model.on_before_zero_grad_called == 0
def test_on_train_epoch_end_metrics_collection(tmp_path):
"""Test that progress bar metrics also get collected at the end of an epoch."""
num_epochs = 3
class CurrentModel(BoringModel):
def training_step(self, *args, **kwargs):
output = super().training_step(*args, **kwargs)
self.log_dict({"step_metric": torch.tensor(-1), "shared_metric": 100}, logger=False, prog_bar=True)
return output
def on_train_epoch_end(self):
epoch = self.current_epoch
# both scalar tensors and Python numbers are accepted
self.log_dict(
{f"epoch_metric_{epoch}": torch.tensor(epoch), "shared_metric": 111}, logger=False, prog_bar=True
)
model = CurrentModel()
trainer = Trainer(max_epochs=num_epochs, default_root_dir=tmp_path, overfit_batches=2)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
metrics = trainer.progress_bar_callback.get_metrics(trainer, model)
# metrics added in training step should be unchanged by epoch end method
assert metrics["step_metric"] == -1
# a metric shared in both methods gets overwritten by epoch_end
assert metrics["shared_metric"] == 111
# metrics are kept after each epoch
for i in range(num_epochs):
assert metrics[f"epoch_metric_{i}"] == i
@pytest.mark.parametrize(
("accelerator", "expected_device_str"),
[
pytest.param("gpu", "cuda:0", marks=RunIf(min_cuda_gpus=1)),
pytest.param("mps", "mps:0", marks=RunIf(mps=True)),
],
)
@mock.patch(
"lightning.pytorch.strategies.Strategy.lightning_module",
new_callable=PropertyMock,
)
def test_apply_batch_transfer_handler(model_getter_mock, accelerator, expected_device_str):
expected_device = torch.device(expected_device_str)
class CustomBatch:
def __init__(self, data):
self.samples = data[0]
self.targets = data[1]
class CurrentTestModel(BoringModel):
rank = 0
transfer_batch_to_device_hook_rank = None
on_after_batch_transfer_hook_rank = None
def on_after_batch_transfer(self, batch, dataloader_idx):
assert dataloader_idx == 0
assert batch.samples.device == batch.targets.device == expected_device
self.on_after_batch_transfer_hook_rank = self.rank
self.rank += 1
batch.targets *= 2
return batch
def transfer_batch_to_device(self, batch, device, dataloader_idx):
assert dataloader_idx == 0
self.transfer_batch_to_device_hook_rank = self.rank
self.rank += 1
batch.samples = batch.samples.to(device)
batch.targets = batch.targets.to(device)
return batch
model = CurrentTestModel()
batch = CustomBatch((torch.zeros(5, 32), torch.ones(5, 1, dtype=torch.long)))
trainer = Trainer(accelerator=accelerator, devices=1)
# running .fit() would require us to implement custom data loaders, we mock the model reference instead
model_getter_mock.return_value = model
batch_gpu = trainer.strategy.batch_to_device(batch, expected_device)
assert model.transfer_batch_to_device_hook_rank == 0
assert model.on_after_batch_transfer_hook_rank == 1
assert batch_gpu.samples.device == batch_gpu.targets.device == expected_device
assert torch.allclose(batch_gpu.samples.cpu(), torch.zeros(5, 32))
assert torch.allclose(batch_gpu.targets.cpu(), torch.ones(5, 1, dtype=torch.long) * 2)
@RunIf(min_cuda_gpus=2, standalone=True)
def test_transfer_batch_hook_ddp(tmp_path):
"""Test custom data are properly moved to the right device using ddp."""
class CustomBatch:
def __init__(self, data):
self.samples = data[0]
def to(self, device, **kwargs):
self.samples = self.samples.to(device, **kwargs)
return self
def collate_fn(batch):
return CustomBatch(batch)
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
assert batch.samples.device == self.device
assert isinstance(batch_idx, int)
# the actual training step is not needed for the assertions
return super().training_step(torch.rand(1, 32, device=self.device), batch_idx)
def train_dataloader(self):
return torch.utils.data.DataLoader(RandomDataset(32, 64), collate_fn=collate_fn)
model = TestModel()
model.validation_step = None
trainer = Trainer(
default_root_dir=tmp_path,
limit_train_batches=2,
limit_val_batches=0,
max_epochs=1,
strategy="ddp",
accelerator="gpu",
devices=2,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(model)
def get_members(cls):
return {h for h, _ in getmembers(cls, predicate=isfunction) if not h.startswith("_")}
|
HookedDataModule
|
python
|
getsentry__sentry
|
src/sentry/buffer/inprocess.py
|
{
"start": 93,
"end": 630
}
|
class ____(Buffer):
"""
In-process buffer which computes changes in real-time.
**Note**: This does not actually buffer anything, and should only be used
in development and testing environments.
"""
def incr(
self,
model: type[models.Model],
columns: dict[str, int],
filters: dict[str, Any],
extra: dict[str, Any] | None = None,
signal_only: bool | None = None,
) -> None:
self.process(model, columns, filters, extra, signal_only)
|
InProcessBuffer
|
python
|
dask__distributed
|
distributed/core.py
|
{
"start": 35366,
"end": 40969
}
|
class ____:
"""Conveniently interact with a remote server
>>> remote = rpc(address) # doctest: +SKIP
>>> response = await remote.add(x=10, y=20) # doctest: +SKIP
One rpc object can be reused for several interactions.
Additionally, this object creates and destroys many comms as necessary
and so is safe to use in multiple overlapping communications.
When done, close comms explicitly.
>>> remote.close_comms() # doctest: +SKIP
"""
active: ClassVar[weakref.WeakSet[rpc]] = weakref.WeakSet()
comms = ()
address = None
def __init__(
self,
arg=None,
comm=None,
deserialize=True,
timeout=None,
connection_args=None,
serializers=None,
deserializers=None,
):
self.comms = {}
self.address = coerce_to_address(arg)
self.timeout = timeout
self.status = Status.running
self.deserialize = deserialize
self.serializers = serializers
self.deserializers = deserializers if deserializers is not None else serializers
self.connection_args = connection_args or {}
self._created = weakref.WeakSet()
rpc.active.add(self)
async def live_comm(self):
"""Get an open communication
Some comms to the ip/port target may be in current use by other
coroutines. We track this with the `comms` dict
:: {comm: True/False if open and ready for use}
This function produces an open communication, either by taking one
that we've already made or making a new one if they are all taken.
This also removes comms that have been closed.
When the caller is done with the stream they should set
self.comms[comm] = True
As is done in __getattr__ below.
"""
if self.status == Status.closed:
raise RPCClosed("RPC Closed")
to_clear = set()
open = False
for comm, open in self.comms.items():
if comm.closed():
to_clear.add(comm)
if open:
break
for s in to_clear:
del self.comms[s]
if not open or comm.closed():
comm = await connect(
self.address,
self.timeout,
deserialize=self.deserialize,
**self.connection_args,
)
comm.name = "rpc"
self.comms[comm] = False # mark as taken
return comm
def close_comms(self):
async def _close_comm(comm):
# Make sure we tell the peer to close
try:
if not comm.closed():
await comm.write({"op": "close", "reply": False})
await comm.close()
except OSError:
comm.abort()
tasks = []
for comm in list(self.comms):
if comm and not comm.closed():
task = asyncio.ensure_future(_close_comm(comm))
tasks.append(task)
for comm in list(self._created):
if comm and not comm.closed():
task = asyncio.ensure_future(_close_comm(comm))
tasks.append(task)
self.comms.clear()
return tasks
def __getattr__(self, key):
async def send_recv_from_rpc(**kwargs):
if self.serializers is not None and kwargs.get("serializers") is None:
kwargs["serializers"] = self.serializers
if self.deserializers is not None and kwargs.get("deserializers") is None:
kwargs["deserializers"] = self.deserializers
comm = None
try:
comm = await self.live_comm()
comm.name = "rpc." + key
result = await send_recv(comm=comm, op=key, **kwargs)
except (RPCClosed, CommClosedError) as e:
if comm:
raise type(e)(
f"Exception while trying to call remote method {key!r} using comm {comm!r}."
) from e
else:
raise type(e)(
f"Exception while trying to call remote method {key!r} before comm was established."
) from e
self.comms[comm] = True # mark as open
return result
return send_recv_from_rpc
async def close_rpc(self):
if self.status != Status.closed:
rpc.active.discard(self)
self.status = Status.closed
return await asyncio.gather(*self.close_comms())
def __enter__(self):
warnings.warn(
"the rpc synchronous context manager is deprecated",
DeprecationWarning,
stacklevel=2,
)
return self
def __exit__(self, exc_type, exc_value, traceback):
asyncio.ensure_future(self.close_rpc())
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self.close_rpc()
def __del__(self):
if self.status != Status.closed:
rpc.active.discard(self)
self.status = Status.closed
still_open = [comm for comm in self.comms if not comm.closed()]
if still_open:
logger.warning(
"rpc object %s deleted with %d open comms", self, len(still_open)
)
for comm in still_open:
comm.abort()
def __repr__(self):
return "<rpc to %r, %d comms>" % (self.address, len(self.comms))
|
rpc
|
python
|
huggingface__transformers
|
tests/models/textnet/test_modeling_textnet.py
|
{
"start": 11854,
"end": 12132
}
|
class ____(BackboneTesterMixin, unittest.TestCase):
all_model_classes = (TextNetBackbone,) if is_torch_available() else ()
config_class = TextNetConfig
has_attentions = False
def setUp(self):
self.model_tester = TextNetModelTester(self)
|
TextNetBackboneTest
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_deprecations.py
|
{
"start": 80270,
"end": 81407
}
|
class ____(_fixtures.FixtureTest):
run_inserts = None
def test_bidirectional_no_load(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address, backref="user", lazy="noload"
)
},
)
self.mapper_registry.map_imperatively(Address, addresses)
# try it on unsaved objects
with expect_noload_deprecation():
u1 = User(name="u1")
a1 = Address(email_address="e1")
a1.user = u1
session = fixture_session()
session.add(u1)
session.flush()
session.expunge_all()
a1 = session.get(Address, a1.id)
a1.user = None
session.flush()
session.expunge_all()
assert session.get(Address, a1.id).user is None
assert session.get(User, u1.id).addresses == []
|
ManyToOneTest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/distribute/tpu_strategy_compilation_test.py
|
{
"start": 1876,
"end": 2839
}
|
class ____(test.TestCase):
def test_functions_compile_same_signature(self):
"""Tests compiling different functions with the same signature."""
strategy = get_tpu_strategy()
@def_function.function
def return_one():
def computation():
return constant_op.constant(1)
return strategy.run(computation)
@def_function.function
def return_two():
def computation():
return constant_op.constant(2)
return strategy.run(computation)
expected_result_ones = [1 for _ in range(0, strategy.num_replicas_in_sync)]
self.assertAllEqual(expected_result_ones,
strategy.experimental_local_results(return_one()))
expected_result_twos = [2 for _ in range(0, strategy.num_replicas_in_sync)]
self.assertAllEqual(expected_result_twos,
strategy.experimental_local_results(return_two()))
if __name__ == "__main__":
test.main()
|
TPUStrategyCompilationTest
|
python
|
sphinx-doc__sphinx
|
doc/development/tutorials/examples/recipe.py
|
{
"start": 2635,
"end": 3696
}
|
class ____(Index):
"""A custom index that creates an recipe matrix."""
name = 'recipe'
localname = 'Recipe Index'
shortname = 'Recipe'
def generate(self, docnames=None):
content = defaultdict(list)
# sort the list of recipes in alphabetical order
recipes = self.domain.get_objects()
recipes = sorted(recipes, key=lambda recipe: recipe[0])
# generate the expected output, shown below, from the above using the
# first letter of the recipe as a key to group thing
#
# name, subtype, docname, anchor, extra, qualifier, description
for _name, dispname, typ, docname, anchor, _priority in recipes:
content[dispname[0].lower()].append((
dispname,
0,
docname,
anchor,
docname,
'',
typ,
))
# convert the dict to the sorted list of tuples expected
content = sorted(content.items())
return content, True
|
RecipeIndex
|
python
|
ansible__ansible
|
test/lib/ansible_test/_internal/target.py
|
{
"start": 15524,
"end": 16743
}
|
class ____(CompletionTarget):
"""Generic test target."""
def __init__(
self,
path: str,
module_path: t.Optional[str],
module_prefix: t.Optional[str],
base_path: str,
symlink: t.Optional[bool] = None,
) -> None:
super().__init__()
if symlink is None:
symlink = os.path.islink(to_bytes(path.rstrip(os.path.sep)))
self.name = path
self.path = path
self.base_path = base_path + '/' if base_path else None
self.symlink = symlink
name, ext = os.path.splitext(os.path.basename(self.path))
if module_path and is_subdir(path, module_path) and name != '__init__' and ext in MODULE_EXTENSIONS:
self.module = name[len(module_prefix or ''):].lstrip('_')
self.modules = (self.module,)
else:
self.module = None
self.modules = tuple()
aliases = [self.path, self.module]
parts = self.path.split('/')
for i in range(1, len(parts)):
alias = '%s/' % '/'.join(parts[:i])
aliases.append(alias)
aliases = [a for a in aliases if a]
self.aliases = tuple(sorted(aliases))
|
TestTarget
|
python
|
getsentry__sentry
|
tests/sentry/issues/endpoints/test_group_details.py
|
{
"start": 29988,
"end": 33114
}
|
class ____(APITestCase):
def test_delete_deferred(self) -> None:
self.login_as(user=self.user)
group = self.create_group()
hash = "x" * 32
GroupHash.objects.create(project=group.project, hash=hash, group=group)
url = f"/api/0/issues/{group.id}/"
response = self.client.delete(url, format="json")
assert response.status_code == 202, response.content
# Deletion was deferred, so it should still exist
assert Group.objects.get(id=group.id).status == GroupStatus.PENDING_DELETION
assert GroupHash.objects.filter(group_id=group.id).exists()
def test_delete_and_tasks_run(self) -> None:
self.login_as(user=self.user)
group = self.create_group()
hash = "x" * 32
GroupHash.objects.create(project=group.project, hash=hash, group=group)
url = f"/api/0/issues/{group.id}/"
with self.tasks():
response = self.client.delete(url, format="json")
assert response.status_code == 202, response.content
# Now we killed everything with fire
assert not Group.objects.filter(id=group.id).exists()
assert not GroupHash.objects.filter(group_id=group.id).exists()
with self.tasks(), outbox_runner():
schedule_hybrid_cloud_foreign_key_jobs()
with assume_test_silo_mode(SiloMode.CONTROL):
assert (
AuditLogEntry.objects.get(
event=audit_log.get_event_id("ISSUE_DELETE"),
).data["issue_id"]
== group.id
)
def test_delete_performance_issue(self) -> None:
"""Test that a performance issue can be deleted"""
self.login_as(user=self.user)
group = self.create_group(type=PerformanceSlowDBQueryGroupType.type_id)
GroupHash.objects.create(project=group.project, hash="x" * 32, group=group)
url = f"/api/0/issues/{group.id}/"
with self.tasks():
response = self.client.delete(url, format="json")
assert response.status_code == 202, response.content
assert not Group.objects.filter(id=group.id).exists()
assert not GroupHash.objects.filter(group_id=group.id).exists()
@override_settings(SENTRY_SELF_HOSTED=False)
def test_ratelimit(self) -> None:
self.login_as(user=self.user)
group = self.create_group()
url = f"/api/0/issues/{group.id}/"
with freeze_time("2000-01-01"):
for i in range(10):
self.client.delete(url, sort_by="date", limit=1)
response = self.client.delete(url, sort_by="date", limit=1)
assert response.status_code == 429
def test_collapse_release(self) -> None:
self.login_as(user=self.user)
group = self.create_group()
url = f"/api/0/issues/{group.id}/"
response = self.client.get(url)
assert response.status_code == 200
assert response.data["firstRelease"] is None
response = self.client.get(url, {"collapse": ["release"]})
assert "firstRelease" not in response.data
|
GroupDeleteTest
|
python
|
python-poetry__poetry
|
src/poetry/console/commands/add.py
|
{
"start": 747,
"end": 17061
}
|
class ____(InstallerCommand, InitCommand):
name = "add"
description = "Adds a new dependency to <comment>pyproject.toml</> and installs it."
arguments: ClassVar[list[Argument]] = [
argument("name", "The packages to add.", multiple=True)
]
options: ClassVar[list[Option]] = [
option(
"group",
"-G",
"The group to add the dependency to.",
flag=False,
default=MAIN_GROUP,
),
option(
"dev",
"D",
"Add as a development dependency. (shortcut for '-G dev')",
),
option("editable", "e", "Add vcs/path dependencies as editable."),
option(
"extras",
"E",
"Extras to activate for the dependency.",
flag=False,
multiple=True,
),
option(
"optional",
None,
"Add as an optional dependency to an extra.",
flag=False,
),
option(
"python",
None,
"Python version for which the dependency must be installed.",
flag=False,
),
option(
"platform",
None,
"Platforms for which the dependency must be installed.",
flag=False,
),
option(
"markers",
None,
"Environment markers which describe when the dependency should be installed.",
flag=False,
),
option(
"source",
None,
"Name of the source to use to install the package.",
flag=False,
),
option("allow-prereleases", None, "Accept prereleases."),
option(
"dry-run",
None,
"Output the operations but do not execute anything (implicitly enables"
" --verbose).",
),
option("lock", None, "Do not perform operations (only update the lockfile)."),
]
examples = """\
If you do not specify a version constraint, poetry will choose a suitable one based on\
the available package versions.
You can specify a package in the following forms:
- A single name (<b>requests</b>)
- A name and a constraint (<b>requests@^2.23.0</b>)
- A git url (<b>git+https://github.com/python-poetry/poetry.git</b>)
- A git url with a revision\
(<b>git+https://github.com/python-poetry/poetry.git#develop</b>)
- A subdirectory of a git repository\
(<b>git+https://github.com/python-poetry/poetry.git#subdirectory=tests/fixtures/sample_project</b>)
- A git SSH url (<b>git+ssh://git@github.com/python-poetry/poetry.git</b>)
- A git SSH url with a revision\
(<b>git+ssh://git@github.com/python-poetry/poetry.git#develop</b>)
- A file path (<b>../my-package/my-package.whl</b>)
- A directory (<b>../my-package/</b>)
- A url (<b>https://example.com/packages/my-package-0.1.0.tar.gz</b>)
"""
help = f"""\
The add command adds required packages to your <comment>pyproject.toml</> and installs\
them.
{examples}
"""
loggers: ClassVar[list[str]] = [
"poetry.repositories.pypi_repository",
"poetry.inspection.info",
]
def handle(self) -> int:
from poetry.core.constraints.version import parse_constraint
from tomlkit import array
from tomlkit import inline_table
from tomlkit import nl
from tomlkit import table
from poetry.factory import Factory
packages = self.argument("name")
if self.option("dev"):
group = "dev"
else:
group = self.option("group", self.default_group or MAIN_GROUP)
if self.option("extras") and len(packages) > 1:
raise ValueError(
"You can only specify one package when using the --extras option"
)
optional = self.option("optional")
if optional and group != MAIN_GROUP:
raise ValueError("You can only add optional dependencies to the main group")
# tomlkit types are awkward to work with, treat content as a mostly untyped
# dictionary.
content: dict[str, Any] = self.poetry.file.read()
project_content = content.get("project", table())
poetry_content = content.get("tool", {}).get("poetry", table())
groups_content = content.get("dependency-groups", {})
project_name = (
canonicalize_name(name)
if (name := project_content.get("name", poetry_content.get("name")))
else None
)
use_project_section = False
use_groups_section = False
project_dependency_names = []
# Run-Time Deps incl. extras
if group == MAIN_GROUP:
if (
"dependencies" in project_content
or "optional-dependencies" in project_content
):
use_project_section = True
if optional:
project_section = project_content.get(
"optional-dependencies", {}
).get(optional, array())
else:
project_section = project_content.get("dependencies", array())
project_dependency_names = [
Dependency.create_from_pep_508(dep).name for dep in project_section
]
else:
project_section = array()
poetry_section = poetry_content.get("dependencies", table())
# Dependency Groups
else:
if groups_content or "group" not in poetry_content:
use_groups_section = True
if not groups_content:
groups_content = table(is_super_table=True)
if group not in groups_content:
groups_content[group] = array("[\n]")
project_dependency_names = [
Dependency.create_from_pep_508(dep).name
for dep in groups_content[group]
]
poetry_section = (
poetry_content.get("group", {})
.get(group, {})
.get("dependencies", table())
)
project_section = []
existing_packages = self.get_existing_packages_from_input(
packages, poetry_section, project_dependency_names
)
if existing_packages:
self.notify_about_existing_packages(existing_packages)
packages = [name for name in packages if name not in existing_packages]
if not packages:
self.line("Nothing to add.")
return 0
if optional and not use_project_section:
self.line_error(
"<warning>Optional dependencies will not be added to extras"
" in legacy mode. Consider converting your project to use the [project]"
" section.</warning>"
)
requirements = self._determine_requirements(
packages,
allow_prereleases=self.option("allow-prereleases") or None,
source=self.option("source"),
)
for _constraint in requirements:
version = _constraint.get("version")
if version is not None:
# Validate version constraint
assert isinstance(version, str)
parse_constraint(version)
constraint: dict[str, Any] = inline_table()
for key, value in _constraint.items():
if key == "name":
continue
constraint[key] = value
if optional:
constraint["optional"] = True
if self.option("allow-prereleases"):
constraint["allow-prereleases"] = True
if self.option("extras"):
extras = []
for extra in self.option("extras"):
extras += extra.split()
constraint["extras"] = extras
if self.option("editable"):
if "git" in _constraint or "path" in _constraint:
constraint["develop"] = True
else:
self.line_error(
"\n"
"<error>Failed to add packages. "
"Only vcs/path dependencies support editable installs. "
f"<c1>{_constraint['name']}</c1> is neither."
)
self.line_error("\nNo changes were applied.")
return 1
if python := self.option("python"):
constraint["python"] = python
if platform := self.option("platform"):
constraint["platform"] = platform
if markers := self.option("markers"):
constraint["markers"] = markers
if source := self.option("source"):
constraint["source"] = source
if len(constraint) == 1 and "version" in constraint:
constraint = constraint["version"]
constraint_name = _constraint["name"]
assert isinstance(constraint_name, str)
canonical_constraint_name = canonicalize_name(constraint_name)
if canonical_constraint_name == project_name:
self.line_error(
f"<error>Cannot add dependency on <c1>{constraint_name}</c1> to"
" project with the same name."
)
self.line_error("\nNo changes were applied.")
return 1
with contextlib.suppress(ValueError):
self.poetry.package.dependency_group(group).remove_dependency(
constraint_name
)
dependency = Factory.create_dependency(
constraint_name,
constraint,
groups=[group],
root_dir=self.poetry.file.path.parent,
)
self.poetry.package.add_dependency(dependency)
if use_project_section or use_groups_section:
pep_section = (
project_section if use_project_section else groups_content[group]
)
try:
index = project_dependency_names.index(canonical_constraint_name)
except ValueError:
pep_section.append(dependency.to_pep_508())
else:
pep_section[index] = dependency.to_pep_508()
# create a second constraint for tool.poetry.dependencies with keys
# that cannot be stored in the project section
poetry_constraint: dict[str, Any] = inline_table()
if not isinstance(constraint, str):
for key in ["allow-prereleases", "develop", "source"]:
if value := constraint.get(key):
poetry_constraint[key] = value
if poetry_constraint:
# add marker related keys to avoid ambiguity
for key in ["python", "platform"]:
if value := constraint.get(key):
poetry_constraint[key] = value
else:
poetry_constraint = constraint
if poetry_constraint:
for key in poetry_section:
if canonicalize_name(key) == canonical_constraint_name:
poetry_section[key] = poetry_constraint
break
else:
poetry_section[constraint_name] = poetry_constraint
if optional:
extra_name = canonicalize_name(optional)
# _in_extras must be set after converting the dependency to PEP 508
# and adding it to the project section to avoid a redundant extra marker
dependency._in_extras = [extra_name]
self._add_dependency_to_extras(dependency, extra_name)
# Refresh the locker
if project_section:
assert group == MAIN_GROUP
if optional:
if "optional-dependencies" not in project_content:
project_content["optional-dependencies"] = table()
if optional not in project_content["optional-dependencies"]:
project_content["optional-dependencies"][optional] = project_section
elif "dependencies" not in project_content:
project_content["dependencies"] = project_section
if poetry_section:
if "tool" not in content:
content["tool"] = table()
if "poetry" not in content["tool"]:
content["tool"]["poetry"] = poetry_content
if group == MAIN_GROUP:
if "dependencies" not in poetry_content:
poetry_content["dependencies"] = poetry_section
else:
if "group" not in poetry_content:
poetry_content["group"] = table(is_super_table=True)
groups = poetry_content["group"]
if group not in groups:
groups[group] = table()
groups.add(nl())
if "dependencies" not in groups[group]:
groups[group]["dependencies"] = poetry_section
if groups_content and group != MAIN_GROUP:
if "dependency-groups" not in content:
content["dependency-groups"] = table()
content["dependency-groups"][group] = groups_content[group]
self.poetry.locker.set_pyproject_data(content)
self.installer.set_locker(self.poetry.locker)
# Cosmetic new line
self.line("")
self.installer.set_package(self.poetry.package)
self.installer.dry_run(self.option("dry-run"))
self.installer.verbose(self.io.is_verbose())
self.installer.update(True)
self.installer.execute_operations(not self.option("lock"))
self.installer.whitelist([r["name"] for r in requirements])
status = self.installer.run()
if status == 0 and not self.option("dry-run"):
assert isinstance(content, TOMLDocument)
self.poetry.file.write(content)
return status
def get_existing_packages_from_input(
self,
packages: list[str],
section: dict[str, Any],
project_dependencies: Collection[NormalizedName],
) -> list[str]:
existing_packages = []
for name in packages:
normalized_name = canonicalize_name(name)
if normalized_name in project_dependencies:
existing_packages.append(name)
continue
for key in section:
if normalized_name == canonicalize_name(key):
existing_packages.append(name)
return existing_packages
@property
def _hint_update_packages(self) -> str:
return (
"\nIf you want to update it to the latest compatible version, you can use"
" `poetry update package`.\nIf you prefer to upgrade it to the latest"
" available version, you can use `poetry add package@latest`.\n"
)
def notify_about_existing_packages(self, existing_packages: list[str]) -> None:
self.line(
"The following packages are already present in the pyproject.toml and will"
" be skipped:\n"
)
for name in existing_packages:
self.line(f" - <c1>{name}</c1>")
self.line(self._hint_update_packages)
def _add_dependency_to_extras(
self, dependency: Dependency, extra_name: NormalizedName
) -> None:
extras = dict(self.poetry.package.extras)
extra_deps = []
replaced = False
for dep in extras.get(extra_name, ()):
if dep.name == dependency.name:
extra_deps.append(dependency)
replaced = True
else:
extra_deps.append(dep)
if not replaced:
extra_deps.append(dependency)
extras[extra_name] = extra_deps
self.poetry.package.extras = extras
|
AddCommand
|
python
|
getsentry__sentry
|
src/sentry/taskworker/app.py
|
{
"start": 213,
"end": 315
}
|
class ____(Protocol):
def add(self, key: str, value: str, timeout: int) -> bool: ...
|
AtMostOnceStore
|
python
|
Textualize__textual
|
tests/test_lazy.py
|
{
"start": 785,
"end": 1606
}
|
class ____(App):
def compose(self) -> ComposeResult:
with Reveal(Vertical()):
yield Label(id="foo")
yield Label(id="bar")
yield Label(id="baz")
async def test_lazy_reveal():
app = RevealApp()
async with app.run_test() as pilot:
# No #foo on initial mount
# Only first child should be available initially
assert app.query_one("#foo").display
# Next two aren't mounted yet
assert not app.query("#baz")
# All children should be visible after a pause
await pilot.pause()
for n in range(3):
await pilot.pause(1 / 60)
await pilot.pause()
assert app.query_one("#foo").display
assert app.query_one("#bar").display
assert app.query_one("#baz").display
|
RevealApp
|
python
|
oauthlib__oauthlib
|
oauthlib/openid/connect/core/exceptions.py
|
{
"start": 359,
"end": 408
}
|
class ____(OAuth2Error):
pass
|
OpenIDClientError
|
python
|
getsentry__sentry
|
tests/snuba/api/endpoints/test_organization_trace_item_attributes.py
|
{
"start": 1636,
"end": 12818
}
|
class ____(
OrganizationTraceItemAttributesEndpointTestBase, OurLogTestCase
):
feature_flags = {"organizations:ourlogs-enabled": True}
item_type = SupportedTraceItemType.LOGS
def test_no_feature(self) -> None:
response = self.do_request(features={})
assert response.status_code == 404, response.content
def test_invalid_item_type(self) -> None:
response = self.do_request(query={"itemType": "invalid"})
assert response.status_code == 400, response.content
assert response.data == {
"itemType": [
ErrorDetail(string='"invalid" is not a valid choice.', code="invalid_choice")
],
}
def test_no_projects(self) -> None:
response = self.do_request()
assert response.status_code == 200, response.content
assert response.data == []
def test_substring_matching_logs(self) -> None:
logs = [
self.create_ourlog(
extra_data={"body": "log message 1"},
organization=self.organization,
project=self.project,
attributes={
"test.attribute1": {"string_value": "value1"},
"test.attribute2": {"string_value": "value2"},
"another.attribute": {"string_value": "value3"},
},
),
self.create_ourlog(
extra_data={"body": "log message 2"},
organization=self.organization,
project=self.project,
attributes={
"test.attribute3": {"string_value": "value4"},
"different.attr": {"string_value": "value5"},
},
),
]
self.store_ourlogs(logs)
# Test with empty prefix (should return all attributes)
response = self.do_request(query={"substringMatch": ""})
assert response.status_code == 200, response.content
keys = {item["key"] for item in response.data}
assert len(keys) >= 6
assert "test.attribute1" in keys
assert "test.attribute2" in keys
assert "test.attribute3" in keys
assert "another.attribute" in keys
assert "different.attr" in keys
assert "severity" in keys
# With a prefix only match the attributes that start with "tes"
response = self.do_request(query={"substringMatch": "tes"})
assert response.status_code == 200, response.content
keys = {item["key"] for item in response.data}
assert len(keys) == 3
assert "test.attribute1" in keys
assert "test.attribute2" in keys
assert "test.attribute3" in keys
assert "another.attribute" not in keys
assert "different.attr" not in keys
def test_all_attributes(self) -> None:
logs = [
self.create_ourlog(
organization=self.organization,
project=self.project,
attributes={
"test.attribute1": {"string_value": "value1"},
"test.attribute2": {"string_value": "value2"},
},
),
]
self.store_ourlogs(logs)
response = self.do_request()
assert response.status_code == 200, response.content
keys = {item["key"] for item in response.data}
assert len(keys) >= 3
assert "test.attribute1" in keys
assert "test.attribute2" in keys
assert "severity" in keys
def test_body_attribute(self) -> None:
logs = [
self.create_ourlog(
organization=self.organization,
project=self.project,
attributes={
"message": {"string_value": "value1"},
},
),
]
self.store_ourlogs(logs)
response = self.do_request()
assert response.status_code == 200, response.content
keys = {item["key"] for item in response.data}
assert keys == {"severity", "message", "project", "tags[message,string]"}
def test_disallowed_attributes(self) -> None:
logs = [
self.create_ourlog(
organization=self.organization,
project=self.project,
attributes={
"sentry.item_type": {"string_value": "value1"}, # Disallowed
"sentry.item_type2": {"string_value": "value2"}, # Allowed
},
),
]
self.store_ourlogs(logs)
response = self.do_request()
assert response.status_code == 200, response.content
keys = {item["key"] for item in response.data}
assert keys == {"severity", "message", "project", "sentry.item_type2"}
def test_strip_sentry_prefix_from_message_parameter(self) -> None:
"""Test that sentry.message.parameter.* wildcard matching works in attribute listing"""
logs = [
self.create_ourlog(
organization=self.organization,
project=self.project,
attributes={
"sentry.message.parameter.username": {"string_value": "alice"},
"sentry.message.parameter.ip": {"string_value": "192.168.1.1"},
"sentry.message.parameter.0": {"string_value": "laptop"},
"sentry.message.parameter.1": {"string_value": "charlie"},
},
),
self.create_ourlog(
organization=self.organization,
project=self.project,
attributes={
"sentry.message.parameter.0": {"bool_value": 1},
"sentry.message.parameter.1": {"int_value": 5},
"sentry.message.parameter.2": {"double_value": 10},
"sentry.message.parameter.value": {"double_value": 15},
},
),
]
self.store_ourlogs(logs)
response = self.do_request(query={"attributeType": "string"})
assert response.status_code == 200, response.content
assert sorted(response.data, key=lambda key: key["key"]) == [
{
"key": "message",
"name": "message",
"attributeSource": {
"source_type": "sentry",
},
"secondaryAliases": ["log.body"],
},
{
"key": "message.parameter.0",
"name": "message.parameter.0",
"attributeSource": {
"source_type": "sentry",
"is_transformed_alias": True,
},
},
{
"key": "message.parameter.1",
"name": "message.parameter.1",
"attributeSource": {
"source_type": "sentry",
"is_transformed_alias": True,
},
},
{
"key": "message.parameter.ip",
"name": "message.parameter.ip",
"attributeSource": {
"source_type": "sentry",
"is_transformed_alias": True,
},
},
{
"key": "message.parameter.username",
"name": "message.parameter.username",
"attributeSource": {
"source_type": "sentry",
"is_transformed_alias": True,
},
},
{
"key": "project",
"name": "project",
"attributeSource": {
"source_type": "sentry",
},
},
{
"key": "severity",
"name": "severity",
"attributeSource": {
"source_type": "sentry",
},
"secondaryAliases": ["log.severity_text", "severity_text"],
},
]
sources = {item["attributeSource"]["source_type"] for item in response.data}
assert sources == {"sentry"}
message_param_items = [
item for item in response.data if item["key"].startswith("message.parameter.")
]
for item in message_param_items:
assert item["attributeSource"]["is_transformed_alias"] is True
response = self.do_request(query={"attributeType": "number"})
assert response.status_code == 200, response.content
assert sorted(response.data, key=lambda key: key["key"]) == [
{
"key": "observed_timestamp",
"name": "observed_timestamp",
"attributeSource": {
"source_type": "sentry",
},
},
{
"key": "severity_number",
"name": "severity_number",
"attributeSource": {
"source_type": "sentry",
},
"secondaryAliases": ["log.severity_number"],
},
{
"key": "tags[message.parameter.0,number]",
"name": "message.parameter.0",
"attributeSource": {
"source_type": "sentry",
"is_transformed_alias": True,
},
},
{
"key": "tags[message.parameter.1,number]",
"name": "message.parameter.1",
"attributeSource": {
"source_type": "sentry",
"is_transformed_alias": True,
},
},
{
"key": "tags[message.parameter.2,number]",
"name": "message.parameter.2",
"attributeSource": {
"source_type": "sentry",
"is_transformed_alias": True,
},
},
{
"key": "tags[message.parameter.value,number]",
"name": "message.parameter.value",
"attributeSource": {
"source_type": "sentry",
"is_transformed_alias": True,
},
},
{
"key": "timestamp_precise",
"name": "timestamp_precise",
"attributeSource": {
"source_type": "sentry",
},
},
]
def test_attribute_collision(self) -> None:
logs = [
self.create_ourlog(
organization=self.organization,
project=self.project,
attributes={"timestamp": "bar", "severity": "baz"},
),
]
self.store_ourlogs(logs)
response = self.do_request()
assert response.status_code == 200, response.content
keys = {item["key"] for item in response.data}
assert keys == {
"message",
"project",
"severity",
"tags[severity,string]",
"tags[timestamp,string]",
}
|
OrganizationTraceItemAttributesEndpointLogsTest
|
python
|
wandb__wandb
|
wandb/sdk/artifacts/_generated/fragments.py
|
{
"start": 4501,
"end": 4653
}
|
class ____(GQLResult):
typename__: Typename[Literal["File"]] = "File"
name: str
direct_url: str = Field(alias="directUrl")
|
FileWithUrlFragment
|
python
|
conda__conda
|
conda/plugins/types.py
|
{
"start": 8832,
"end": 9023
}
|
class ____(CondaPlugin):
"""
Return type to use when defining conda health checks plugin hook.
"""
name: str
action: Callable[[str, bool], None]
@dataclass
|
CondaHealthCheck
|
python
|
huggingface__transformers
|
src/transformers/models/deit/modeling_deit.py
|
{
"start": 23721,
"end": 26920
}
|
class ____(DeiTPreTrainedModel):
def __init__(self, config: DeiTConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.deit = DeiTModel(config, add_pooling_layer=False)
# Classifier head
self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
interpolate_pos_encoding: bool = False,
**kwargs: Unpack[TransformersKwargs],
) -> ImageClassifierOutput:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, DeiTForImageClassification
>>> import torch
>>> from PIL import Image
>>> import requests
>>> torch.manual_seed(3) # doctest: +IGNORE_RESULT
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> # note: we are loading a DeiTForImageClassificationWithTeacher from the hub here,
>>> # so the head will be randomly initialized, hence the predictions will be random
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224")
>>> model = DeiTForImageClassification.from_pretrained("facebook/deit-base-distilled-patch16-224")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> # model predicts one of the 1000 ImageNet classes
>>> predicted_class_idx = logits.argmax(-1).item()
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
Predicted class: Polaroid camera, Polaroid Land camera
```"""
outputs: BaseModelOutputWithPooling = self.deit(
pixel_values,
interpolate_pos_encoding=interpolate_pos_encoding,
**kwargs,
)
sequence_output = outputs.last_hidden_state
logits = self.classifier(sequence_output[:, 0, :])
# we don't use the distillation token
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config, **kwargs)
return ImageClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`DeiTForImageClassificationWithTeacher`].
"""
)
|
DeiTForImageClassification
|
python
|
scipy__scipy
|
scipy/optimize/tests/test_optimize.py
|
{
"start": 3407,
"end": 5357
}
|
class ____:
""" Base test case for a simple constrained entropy maximization problem
(the machine translation example of Berger et al in
Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
"""
def setup_method(self):
self.F = np.array([[1, 1, 1],
[1, 1, 0],
[1, 0, 1],
[1, 0, 0],
[1, 0, 0]])
self.K = np.array([1., 0.3, 0.5])
self.startparams = np.zeros(3, np.float64)
self.solution = np.array([0., -0.524869316, 0.487525860])
self.maxiter = 1000
self.funccalls = threading.local()
self.gradcalls = threading.local()
self.trace = threading.local()
def func(self, x):
if not hasattr(self.funccalls, 'c'):
self.funccalls.c = 0
if not hasattr(self.gradcalls, 'c'):
self.gradcalls.c = 0
self.funccalls.c += 1
if self.funccalls.c > 6000:
raise RuntimeError("too many iterations in optimization routine")
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
f = logZ - np.dot(self.K, x)
if not hasattr(self.trace, 't'):
self.trace.t = []
self.trace.t.append(np.copy(x))
return f
def grad(self, x):
if not hasattr(self.gradcalls, 'c'):
self.gradcalls.c = 0
self.gradcalls.c += 1
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
p = np.exp(log_pdot - logZ)
return np.dot(self.F.transpose(), p) - self.K
def hess(self, x):
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
p = np.exp(log_pdot - logZ)
return np.dot(self.F.T,
np.dot(np.diag(p), self.F - np.dot(self.F.T, p)))
def hessp(self, x, p):
return np.dot(self.hess(x), p)
|
CheckOptimize
|
python
|
doocs__leetcode
|
solution/2500-2599/2563.Count the Number of Fair Pairs/Solution.py
|
{
"start": 0,
"end": 325
}
|
class ____:
def countFairPairs(self, nums: List[int], lower: int, upper: int) -> int:
nums.sort()
ans = 0
for i, x in enumerate(nums):
j = bisect_left(nums, lower - x, lo=i + 1)
k = bisect_left(nums, upper - x + 1, lo=i + 1)
ans += k - j
return ans
|
Solution
|
python
|
kamyu104__LeetCode-Solutions
|
Python/bomb-enemy.py
|
{
"start": 38,
"end": 1359
}
|
class ____(object):
def maxKilledEnemies(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
result = 0
if not grid or not grid[0]:
return result
down = [[0 for _ in xrange(len(grid[0]))] for _ in xrange(len(grid))]
right = [[0 for _ in xrange(len(grid[0]))] for _ in xrange(len(grid))]
for i in reversed(xrange(len(grid))):
for j in reversed(xrange(len(grid[0]))):
if grid[i][j] != 'W':
if i + 1 < len(grid):
down[i][j] = down[i + 1][j]
if j + 1 < len(grid[0]):
right[i][j] = right[i][j + 1]
if grid[i][j] == 'E':
down[i][j] += 1
right[i][j] += 1
up = [0 for _ in xrange(len(grid[0]))]
for i in xrange(len(grid)):
left = 0
for j in xrange(len(grid[0])):
if grid[i][j] == 'W':
up[j], left = 0, 0
elif grid[i][j] == 'E':
up[j] += 1
left += 1
else:
result = max(result,
left + up[j] + right[i][j] + down[i][j])
return result
|
Solution
|
python
|
django__django
|
tests/auth_tests/models/invalid_models.py
|
{
"start": 100,
"end": 619
}
|
class ____(AbstractBaseUser):
"""
A user with a non-unique username.
This model is not invalid if it is used with a custom authentication
backend which supports non-unique usernames.
"""
username = models.CharField(max_length=30)
email = models.EmailField(blank=True)
is_staff = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
USERNAME_FIELD = "username"
REQUIRED_FIELDS = ["email"]
objects = UserManager()
|
CustomUserNonUniqueUsername
|
python
|
python__mypy
|
mypyc/analysis/selfleaks.py
|
{
"start": 905,
"end": 5836
}
|
class ____(OpVisitor[GenAndKill]):
"""Analyze whether 'self' may be seen by arbitrary code in '__init__'.
More formally, the set is not empty if along some path from IR entry point
arbitrary code could have been executed that has access to 'self'.
(We don't consider access via 'gc.get_objects()'.)
"""
def __init__(self, self_reg: Register) -> None:
self.self_reg = self_reg
def visit_goto(self, op: Goto) -> GenAndKill:
return CLEAN
def visit_branch(self, op: Branch) -> GenAndKill:
return CLEAN
def visit_return(self, op: Return) -> GenAndKill:
# Consider all exits from the function 'dirty' since they implicitly
# cause 'self' to be returned.
return DIRTY
def visit_unreachable(self, op: Unreachable) -> GenAndKill:
return CLEAN
def visit_assign(self, op: Assign) -> GenAndKill:
if op.src is self.self_reg or op.dest is self.self_reg:
return DIRTY
return CLEAN
def visit_assign_multi(self, op: AssignMulti) -> GenAndKill:
return CLEAN
def visit_set_mem(self, op: SetMem) -> GenAndKill:
return CLEAN
def visit_call(self, op: Call) -> GenAndKill:
fn = op.fn
if fn.class_name and fn.name == "__init__":
self_type = op.fn.sig.args[0].type
assert isinstance(self_type, RInstance), self_type
cl = self_type.class_ir
if not cl.init_self_leak:
return CLEAN
return self.check_register_op(op)
def visit_method_call(self, op: MethodCall) -> GenAndKill:
return self.check_register_op(op)
def visit_load_error_value(self, op: LoadErrorValue) -> GenAndKill:
return CLEAN
def visit_load_literal(self, op: LoadLiteral) -> GenAndKill:
return CLEAN
def visit_get_attr(self, op: GetAttr) -> GenAndKill:
cl = op.class_type.class_ir
if cl.get_method(op.attr):
# Property -- calls a function
return self.check_register_op(op)
return CLEAN
def visit_set_attr(self, op: SetAttr) -> GenAndKill:
cl = op.class_type.class_ir
if cl.get_method(op.attr):
# Property - calls a function
return self.check_register_op(op)
return CLEAN
def visit_load_static(self, op: LoadStatic) -> GenAndKill:
return CLEAN
def visit_init_static(self, op: InitStatic) -> GenAndKill:
return self.check_register_op(op)
def visit_tuple_get(self, op: TupleGet) -> GenAndKill:
return CLEAN
def visit_tuple_set(self, op: TupleSet) -> GenAndKill:
return self.check_register_op(op)
def visit_box(self, op: Box) -> GenAndKill:
return self.check_register_op(op)
def visit_unbox(self, op: Unbox) -> GenAndKill:
return self.check_register_op(op)
def visit_cast(self, op: Cast) -> GenAndKill:
return self.check_register_op(op)
def visit_raise_standard_error(self, op: RaiseStandardError) -> GenAndKill:
return CLEAN
def visit_call_c(self, op: CallC) -> GenAndKill:
return self.check_register_op(op)
def visit_primitive_op(self, op: PrimitiveOp) -> GenAndKill:
return self.check_register_op(op)
def visit_truncate(self, op: Truncate) -> GenAndKill:
return CLEAN
def visit_extend(self, op: Extend) -> GenAndKill:
return CLEAN
def visit_load_global(self, op: LoadGlobal) -> GenAndKill:
return CLEAN
def visit_int_op(self, op: IntOp) -> GenAndKill:
return CLEAN
def visit_comparison_op(self, op: ComparisonOp) -> GenAndKill:
return CLEAN
def visit_float_op(self, op: FloatOp) -> GenAndKill:
return CLEAN
def visit_float_neg(self, op: FloatNeg) -> GenAndKill:
return CLEAN
def visit_float_comparison_op(self, op: FloatComparisonOp) -> GenAndKill:
return CLEAN
def visit_load_mem(self, op: LoadMem) -> GenAndKill:
return CLEAN
def visit_get_element_ptr(self, op: GetElementPtr) -> GenAndKill:
return CLEAN
def visit_set_element(self, op: SetElement) -> GenAndKill:
return CLEAN
def visit_load_address(self, op: LoadAddress) -> GenAndKill:
return CLEAN
def visit_keep_alive(self, op: KeepAlive) -> GenAndKill:
return CLEAN
def visit_unborrow(self, op: Unborrow) -> GenAndKill:
return CLEAN
def check_register_op(self, op: RegisterOp) -> GenAndKill:
if any(src is self.self_reg for src in op.sources()):
return DIRTY
return CLEAN
def analyze_self_leaks(
blocks: list[BasicBlock], self_reg: Register, cfg: CFG
) -> AnalysisResult[None]:
return run_analysis(
blocks=blocks,
cfg=cfg,
gen_and_kill=SelfLeakedVisitor(self_reg),
initial=set(),
backward=False,
kind=MAYBE_ANALYSIS,
)
|
SelfLeakedVisitor
|
python
|
django__django
|
tests/admin_changelist/models.py
|
{
"start": 1178,
"end": 1355
}
|
class ____(models.Model):
name = models.CharField(max_length=30)
age = models.IntegerField(null=True, blank=True)
def __str__(self):
return self.name
|
Musician
|
python
|
geekcomputers__Python
|
JustDialScrapperGUI/Justdial Scrapper GUI.py
|
{
"start": 176,
"end": 6531
}
|
class ____:
def __init__(self, query, location, file_name, progressbar, label_progress):
self.query = query
self.location = location
self.file_name = file_name
self.progressbar = progressbar
self.label_progress = label_progress
@staticmethod
def inner_html(element):
return element.decode_contents(formatter="html")
@staticmethod
def get_name(body):
return body.find("span", {"class": "jcn"}).a.string
@staticmethod
def which_digit(html):
mapping_dict = {
"icon-ji": 9,
"icon-dc": "+",
"icon-fe": "(",
"icon-hg": ")",
"icon-ba": "-",
"icon-lk": 8,
"icon-nm": 7,
"icon-po": 6,
"icon-rq": 5,
"icon-ts": 4,
"icon-vu": 3,
"icon-wx": 2,
"icon-yz": 1,
"icon-acb": 0,
}
return mapping_dict.get(html, "")
def get_phone_number(self, body):
i = 0
phone_no = "No Number!"
try:
for item in body.find("p", {"class": "contact-info"}):
i += 1
if i == 2:
phone_no = ""
try:
for element in item.find_all(class_=True):
classes = []
classes.extend(element["class"])
phone_no += str((self.which_digit(classes[1])))
except Exception:
pass
except Exception:
pass
body = body["data-href"]
soup = BeautifulSoup(body, "html.parser")
for a in soup.find_all("a", {"id": "whatsapptriggeer"}):
# print (a)
phone_no = str(a["href"][-10:])
return phone_no
@staticmethod
def get_rating(body):
rating = 0.0
text = body.find("span", {"class": "star_m"})
if text is not None:
for item in text:
rating += float(item["class"][0][1:]) / 10
return rating
@staticmethod
def get_rating_count(body):
text = body.find("span", {"class": "rt_count"}).string
# Get only digits
rating_count = "".join(i for i in text if i.isdigit())
return rating_count
@staticmethod
def get_address(body):
return body.find("span", {"class": "mrehover"}).text.strip()
@staticmethod
def get_location(body):
text = body.find("a", {"class": "rsmap"})
if not text:
return
text_list = text["onclick"].split(",")
latitude = text_list[3].strip().replace("'", "")
longitude = text_list[4].strip().replace("'", "")
return latitude + ", " + longitude
def start_scrapping_logic(self):
page_number = 1
service_count = 1
total_url = "https://www.justdial.com/{0}/{1}".format(self.location, self.query)
fields = ["Name", "Phone", "Rating", "Rating Count", "Address", "Location"]
out_file = open("{0}.csv".format(self.file_name), "w")
csvwriter = csv.DictWriter(out_file, delimiter=",", fieldnames=fields)
csvwriter.writerow(
{
"Name": "Name", # Shows the name
"Phone": "Phone", # shows the phone
"Rating": "Rating", # shows the ratings
"Rating Count": "Rating Count", # Shows the stars for ex: 4 stars
"Address": "Address", # Shows the address of the place
"Location": "Location", # shows the location
}
)
progress_value = 0
while True:
# Check if reached end of result
if page_number > 50:
progress_value = 100
self.progressbar["value"] = progress_value
break
if progress_value != 0:
progress_value += 1
self.label_progress["text"] = "{0}{1}".format(progress_value, "%")
self.progressbar["value"] = progress_value
url = total_url + "/page-%s" % page_number
print("{0} {1}, {2}".format("Scrapping page number: ", page_number, url))
req = urllib.request.Request(
url, headers={"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64)"}
)
page = urllib.request.urlopen(req)
soup = BeautifulSoup(page.read(), "html.parser")
services = soup.find_all("li", {"class": "cntanr"})
# Iterate through the 10 results in the page
progress_value += 1
self.label_progress["text"] = "{0}{1}".format(progress_value, "%")
self.progressbar["value"] = progress_value
for service_html in services:
try:
# Parse HTML to fetch data
dict_service = {}
name = self.get_name(service_html)
print(name)
phone = self.get_phone_number(service_html)
rating = self.get_rating(service_html)
count = self.get_rating_count(service_html)
address = self.get_address(service_html)
location = self.get_location(service_html)
if name is not None:
dict_service["Name"] = name
if phone is not None:
print("getting phone number")
dict_service["Phone"] = phone
if rating is not None:
dict_service["Rating"] = rating
if count is not None:
dict_service["Rating Count"] = count
if address is not None:
dict_service["Address"] = address
if location is not None:
dict_service["Address"] = location
# Write row to CSV
csvwriter.writerow(dict_service)
print("#" + str(service_count) + " ", dict_service)
service_count += 1
except AttributeError:
print("AttributeError Occurred 101")
page_number += 1
out_file.close()
|
ScrapperLogic
|
python
|
kamyu104__LeetCode-Solutions
|
Python/summary-ranges.py
|
{
"start": 709,
"end": 984
}
|
class ____(object):
# @param {integer[]} nums
# @return {string[]}
def summaryRanges(self, nums):
return [re.sub('->.*>', '->', '->'.join(repr(n) for _, n in g))
for _, g in itertools.groupby(enumerate(nums), lambda i_n: i_n[1]-i_n[0])]
|
Solution2
|
python
|
numba__numba
|
numba/tests/npyufunc/test_caching.py
|
{
"start": 224,
"end": 2238
}
|
class ____(BaseCacheTest):
"""
Since the cache stats is not exposed by ufunc, we test by looking at the
cache debug log.
"""
_numba_parallel_test_ = False
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "ufunc_caching_test_fodder"
regex_data_saved = re.compile(r'\[cache\] data saved to')
regex_index_saved = re.compile(r'\[cache\] index saved to')
regex_data_loaded = re.compile(r'\[cache\] data loaded from')
regex_index_loaded = re.compile(r'\[cache\] index loaded from')
def check_cache_saved(self, cachelog, count):
"""
Check number of cache-save were issued
"""
data_saved = self.regex_data_saved.findall(cachelog)
index_saved = self.regex_index_saved.findall(cachelog)
self.assertEqual(len(data_saved), count)
self.assertEqual(len(index_saved), count)
def check_cache_loaded(self, cachelog, count):
"""
Check number of cache-load were issued
"""
data_loaded = self.regex_data_loaded.findall(cachelog)
index_loaded = self.regex_index_loaded.findall(cachelog)
self.assertEqual(len(data_loaded), count)
self.assertEqual(len(index_loaded), count)
def check_ufunc_cache(self, usecase_name, n_overloads, **kwargs):
"""
Check number of cache load/save.
There should be one per overloaded version.
"""
mod = self.import_module()
usecase = getattr(mod, usecase_name)
# New cache entry saved
with capture_cache_log() as out:
new_ufunc = usecase(**kwargs)
cachelog = out.getvalue()
self.check_cache_saved(cachelog, count=n_overloads)
# Use cached version
with capture_cache_log() as out:
cached_ufunc = usecase(**kwargs)
cachelog = out.getvalue()
self.check_cache_loaded(cachelog, count=n_overloads)
return new_ufunc, cached_ufunc
|
UfuncCacheTest
|
python
|
apache__airflow
|
task-sdk/src/airflow/sdk/api/datamodels/_generated.py
|
{
"start": 11955,
"end": 12637
}
|
class ____(BaseModel):
"""
Schema for TaskInstance model with minimal required fields needed for Runtime.
"""
id: Annotated[UUID, Field(title="Id")]
task_id: Annotated[str, Field(title="Task Id")]
dag_id: Annotated[str, Field(title="Dag Id")]
run_id: Annotated[str, Field(title="Run Id")]
try_number: Annotated[int, Field(title="Try Number")]
dag_version_id: Annotated[UUID, Field(title="Dag Version Id")]
map_index: Annotated[int | None, Field(title="Map Index")] = -1
hostname: Annotated[str | None, Field(title="Hostname")] = None
context_carrier: Annotated[dict[str, Any] | None, Field(title="Context Carrier")] = None
|
TaskInstance
|
python
|
django-haystack__django-haystack
|
test_haystack/test_query.py
|
{
"start": 988,
"end": 3464
}
|
class ____(TestCase):
def test_split_expression(self):
sq = SQ(foo="bar")
self.assertEqual(sq.split_expression("foo"), ("foo", "content"))
self.assertEqual(sq.split_expression("foo__exact"), ("foo", "exact"))
self.assertEqual(sq.split_expression("foo__content"), ("foo", "content"))
self.assertEqual(sq.split_expression("foo__contains"), ("foo", "contains"))
self.assertEqual(sq.split_expression("foo__lt"), ("foo", "lt"))
self.assertEqual(sq.split_expression("foo__lte"), ("foo", "lte"))
self.assertEqual(sq.split_expression("foo__gt"), ("foo", "gt"))
self.assertEqual(sq.split_expression("foo__gte"), ("foo", "gte"))
self.assertEqual(sq.split_expression("foo__in"), ("foo", "in"))
self.assertEqual(sq.split_expression("foo__startswith"), ("foo", "startswith"))
self.assertEqual(sq.split_expression("foo__endswith"), ("foo", "endswith"))
self.assertEqual(sq.split_expression("foo__range"), ("foo", "range"))
self.assertEqual(sq.split_expression("foo__fuzzy"), ("foo", "fuzzy"))
# Unrecognized filter. Fall back to exact.
self.assertEqual(sq.split_expression("foo__moof"), ("foo", "content"))
def test_repr(self):
self.assertEqual(repr(SQ(foo="bar")), "<SQ: AND foo__content=bar>")
self.assertEqual(repr(SQ(foo=1)), "<SQ: AND foo__content=1>")
self.assertEqual(
repr(SQ(foo=datetime.datetime(2009, 5, 12, 23, 17))),
"<SQ: AND foo__content=2009-05-12 23:17:00>",
)
def test_simple_nesting(self):
sq1 = SQ(foo="bar")
sq2 = SQ(foo="bar")
bigger_sq = SQ(sq1 & sq2)
self.assertEqual(
repr(bigger_sq), "<SQ: AND (foo__content=bar AND foo__content=bar)>"
)
another_bigger_sq = SQ(sq1 | sq2)
self.assertEqual(
repr(another_bigger_sq), "<SQ: AND (foo__content=bar OR foo__content=bar)>"
)
one_more_bigger_sq = SQ(sq1 & ~sq2)
self.assertEqual(
repr(one_more_bigger_sq),
"<SQ: AND (foo__content=bar AND NOT (foo__content=bar))>",
)
mega_sq = SQ(bigger_sq & SQ(another_bigger_sq | ~one_more_bigger_sq))
self.assertEqual(
repr(mega_sq),
"<SQ: AND ((foo__content=bar AND foo__content=bar) AND ((foo__content=bar OR foo__content=bar) OR NOT ((foo__content=bar AND NOT (foo__content=bar)))))>",
)
|
SQTestCase
|
python
|
encode__starlette
|
starlette/middleware/authentication.py
|
{
"start": 379,
"end": 1800
}
|
class ____:
def __init__(
self,
app: ASGIApp,
backend: AuthenticationBackend,
on_error: Callable[[HTTPConnection, AuthenticationError], Response] | None = None,
) -> None:
self.app = app
self.backend = backend
self.on_error: Callable[[HTTPConnection, AuthenticationError], Response] = (
on_error if on_error is not None else self.default_on_error
)
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
if scope["type"] not in ["http", "websocket"]:
await self.app(scope, receive, send)
return
conn = HTTPConnection(scope)
try:
auth_result = await self.backend.authenticate(conn)
except AuthenticationError as exc:
response = self.on_error(conn, exc)
if scope["type"] == "websocket":
await send({"type": "websocket.close", "code": 1000})
else:
await response(scope, receive, send)
return
if auth_result is None:
auth_result = AuthCredentials(), UnauthenticatedUser()
scope["auth"], scope["user"] = auth_result
await self.app(scope, receive, send)
@staticmethod
def default_on_error(conn: HTTPConnection, exc: Exception) -> Response:
return PlainTextResponse(str(exc), status_code=400)
|
AuthenticationMiddleware
|
python
|
great-expectations__great_expectations
|
great_expectations/datasource/fluent/sql_datasource.py
|
{
"start": 35710,
"end": 37000
}
|
class ____(_SQLAsset):
"""An asset made from a SQL query
Args:
query: The query to be used to construct the underlying Data Asset
"""
# Instance fields
type: Literal["query"] = "query"
query: str
@pydantic.validator("query")
def query_must_start_with_select(cls, v: str):
query = v.lstrip()
if not (query.upper().startswith("SELECT") and query[6].isspace()):
raise ValueError("query must start with 'SELECT' followed by a whitespace.") # noqa: TRY003 # FIXME CoP
return v
@override
def as_selectable(self) -> sqlalchemy.Selectable:
"""Returns the Selectable that is used to retrieve the data.
This can be used in a subselect FROM clause for queries against this data.
"""
return sa.select(sa.text(self.query.lstrip()[6:])).subquery()
@override
def _create_batch_spec_kwargs(self) -> Dict[str, Any]:
return {
"data_asset_name": self.name,
"query": self.query,
"temp_table_schema_name": None,
"batch_identifiers": {},
}
@override
def _create_batch_spec(self, batch_spec_kwargs: dict) -> RuntimeQueryBatchSpec:
return RuntimeQueryBatchSpec(**batch_spec_kwargs)
@public_api
|
QueryAsset
|
python
|
google__pytype
|
pytype/rewrite/tests/test_basic.py
|
{
"start": 2709,
"end": 3786
}
|
class ____(RewriteTest):
"""Import tests."""
def test_import(self):
self.Check("""
import os
assert_type(os.__name__, str) # attribute of the 'module' class
assert_type(os.name, str) # attribute of the 'os' module
""")
def test_builtins(self):
self.Check("""
assert_type(__builtins__.int, "type[int]")
""")
def test_dotted_import(self):
self.Check("""
import os.path
assert_type(os.path, "module")
""")
def test_from_import(self):
self.Check("""
from os import name, path
assert_type(name, "str")
assert_type(path, "module")
""")
def test_errors(self):
self.CheckWithErrors("""
import nonsense # import-error
import os.nonsense # import-error
from os import nonsense # module-attr
""")
def test_aliases(self):
self.Check("""
import os as so
assert_type(so.name, "str")
import os.path as path1
assert_type(path1, "module")
from os import path as path2
assert_type(path2, "module")
""")
|
ImportsTest
|
python
|
apache__airflow
|
airflow-core/tests/unit/always/test_project_structure.py
|
{
"start": 1039,
"end": 17111
}
|
class ____:
def test_reference_to_providers_from_core(self):
for filename in AIRFLOW_CORE_SOURCES_PATH.glob("example_dags/**/*.py"):
self.assert_file_not_contains(filename, "providers")
def test_deprecated_packages(self):
for filename in AIRFLOW_CORE_SOURCES_PATH.glob("airflow/contrib/**/*.py"):
if filename.name == "__init__.py":
self.assert_file_contains(filename, "This package is deprecated.")
else:
self.assert_file_contains(filename, "This module is deprecated.")
def assert_file_not_contains(self, filename: pathlib.Path, pattern: str):
with open(filename, "rb", 0) as file, mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ) as content:
if content.find(bytes(pattern, "utf-8")) != -1:
pytest.fail(f"File {filename} not contains pattern - {pattern}")
def assert_file_contains(self, filename: pathlib.Path, pattern: str):
with open(filename, "rb", 0) as file, mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ) as content:
if content.find(bytes(pattern, "utf-8")) == -1:
pytest.fail(f"File {filename} contains illegal pattern - {pattern}")
def test_providers_modules_should_have_tests(self):
"""
Assert every module in /providers/ has a corresponding test_ file in providers/providers.
"""
# The test below had a but for quite a while and we missed a lot of modules to have tess
# We should make sure that one goes to 0
# TODO(potiuk) - check if that test actually tests something
OVERLOOKED_TESTS = [
"providers/amazon/tests/unit/amazon/aws/auth_manager/datamodels/test_login.py",
"providers/amazon/tests/unit/amazon/aws/auth_manager/security_manager/test_aws_security_manager_override.py",
"providers/amazon/tests/unit/amazon/aws/executors/batch/test_batch_executor_config.py",
"providers/amazon/tests/unit/amazon/aws/executors/batch/test_boto_schema.py",
"providers/amazon/tests/unit/amazon/aws/executors/ecs/test_ecs_executor_config.py",
"providers/amazon/tests/unit/amazon/aws/executors/aws_lambda/test_utils.py",
"providers/amazon/tests/unit/amazon/aws/executors/aws_lambda/docker/test_app.py",
"providers/amazon/tests/unit/amazon/aws/executors/utils/test_base_config_keys.py",
"providers/amazon/tests/unit/amazon/aws/operators/test_emr.py",
"providers/amazon/tests/unit/amazon/aws/operators/test_sagemaker.py",
"providers/amazon/tests/unit/amazon/aws/sensors/test_emr.py",
"providers/amazon/tests/unit/amazon/aws/sensors/test_sagemaker.py",
"providers/amazon/tests/unit/amazon/aws/test_exceptions.py",
"providers/amazon/tests/unit/amazon/aws/triggers/test_sagemaker_unified_studio.py",
"providers/amazon/tests/unit/amazon/aws/triggers/test_step_function.py",
"providers/amazon/tests/unit/amazon/aws/utils/test_rds.py",
"providers/amazon/tests/unit/amazon/aws/utils/test_sagemaker.py",
"providers/amazon/tests/unit/amazon/aws/waiters/test_base_waiter.py",
"providers/apache/hdfs/tests/unit/apache/hdfs/hooks/test_hdfs.py",
"providers/apache/hdfs/tests/unit/apache/hdfs/sensors/test_hdfs.py",
"providers/apache/hive/tests/unit/apache/hive/plugins/test_hive.py",
"providers/celery/tests/unit/celery/executors/test_celery_executor_utils.py",
"providers/celery/tests/unit/celery/executors/test_default_celery.py",
"providers/cloudant/tests/unit/cloudant/test_cloudant_fake.py",
"providers/cncf/kubernetes/tests/unit/cncf/kubernetes/executors/test_kubernetes_executor_types.py",
"providers/cncf/kubernetes/tests/unit/cncf/kubernetes/executors/test_kubernetes_executor_utils.py",
"providers/cncf/kubernetes/tests/unit/cncf/kubernetes/operators/test_kubernetes_pod.py",
"providers/cncf/kubernetes/tests/unit/cncf/kubernetes/test_exceptions.py",
"providers/cncf/kubernetes/tests/unit/cncf/kubernetes/test_k8s_model.py",
"providers/cncf/kubernetes/tests/unit/cncf/kubernetes/test_kube_client.py",
"providers/cncf/kubernetes/tests/unit/cncf/kubernetes/test_kube_config.py",
"providers/cncf/kubernetes/tests/unit/cncf/kubernetes/test_python_kubernetes_script.py",
"providers/cncf/kubernetes/tests/unit/cncf/kubernetes/test_secret.py",
"providers/cncf/kubernetes/tests/unit/cncf/kubernetes/triggers/test_kubernetes_pod.py",
"providers/cncf/kubernetes/tests/unit/cncf/kubernetes/utils/test_delete_from.py",
"providers/cncf/kubernetes/tests/unit/cncf/kubernetes/utils/test_k8s_hashlib_wrapper.py",
"providers/cncf/kubernetes/tests/unit/cncf/kubernetes/utils/test_xcom_sidecar.py",
"providers/common/compat/tests/unit/common/compat/lineage/test_entities.py",
"providers/common/compat/tests/unit/common/compat/standard/test_operators.py",
"providers/common/compat/tests/unit/common/compat/standard/test_triggers.py",
"providers/common/compat/tests/unit/common/compat/standard/test_utils.py",
"providers/common/messaging/tests/unit/common/messaging/providers/test_base_provider.py",
"providers/common/messaging/tests/unit/common/messaging/providers/test_sqs.py",
"providers/edge3/tests/unit/edge3/models/test_edge_job.py",
"providers/edge3/tests/unit/edge3/models/test_edge_logs.py",
"providers/edge3/tests/unit/edge3/models/test_edge_worker.py",
"providers/edge3/tests/unit/edge3/worker_api/routes/test__v2_compat.py",
"providers/edge3/tests/unit/edge3/worker_api/routes/test__v2_routes.py",
"providers/edge3/tests/unit/edge3/worker_api/test_app.py",
"providers/edge3/tests/unit/edge3/worker_api/test_auth.py",
"providers/edge3/tests/unit/edge3/worker_api/test_datamodels.py",
"providers/edge3/tests/unit/edge3/worker_api/test_datamodels_ui.py",
"providers/fab/tests/unit/fab/auth_manager/api_fastapi/datamodels/test_login.py",
"providers/fab/tests/unit/fab/migrations/test_env.py",
"providers/fab/tests/unit/fab/www/api_connexion/test_exceptions.py",
"providers/fab/tests/unit/fab/www/api_connexion/test_parameters.py",
"providers/fab/tests/unit/fab/www/api_connexion/test_security.py",
"providers/fab/tests/unit/fab/www/api_connexion/test_types.py",
"providers/fab/tests/unit/fab/www/extensions/test_init_appbuilder.py",
"providers/fab/tests/unit/fab/www/extensions/test_init_jinja_globals.py",
"providers/fab/tests/unit/fab/www/extensions/test_init_manifest_files.py",
"providers/fab/tests/unit/fab/www/extensions/test_init_security.py",
"providers/fab/tests/unit/fab/www/extensions/test_init_session.py",
"providers/fab/tests/unit/fab/www/extensions/test_init_views.py",
"providers/fab/tests/unit/fab/www/extensions/test_init_wsgi_middlewares.py",
"providers/fab/tests/unit/fab/www/security/test_permissions.py",
"providers/fab/tests/unit/fab/www/test_airflow_flask_app.py",
"providers/fab/tests/unit/fab/www/test_app.py",
"providers/fab/tests/unit/fab/www/test_constants.py",
"providers/fab/tests/unit/fab/www/test_security_appless.py",
"providers/fab/tests/unit/fab/www/test_security_manager.py",
"providers/fab/tests/unit/fab/www/test_session.py",
"providers/fab/tests/unit/fab/www/test_views.py",
"providers/google/tests/unit/google/cloud/fs/test_gcs.py",
"providers/google/tests/unit/google/cloud/links/test_base.py",
"providers/google/tests/unit/google/cloud/links/test_bigquery.py",
"providers/google/tests/unit/google/cloud/links/test_bigquery_dts.py",
"providers/google/tests/unit/google/cloud/links/test_bigtable.py",
"providers/google/tests/unit/google/cloud/links/test_cloud_build.py",
"providers/google/tests/unit/google/cloud/links/test_cloud_functions.py",
"providers/google/tests/unit/google/cloud/links/test_cloud_memorystore.py",
"providers/google/tests/unit/google/cloud/links/test_cloud_sql.py",
"providers/google/tests/unit/google/cloud/links/test_cloud_storage_transfer.py",
"providers/google/tests/unit/google/cloud/links/test_cloud_tasks.py",
"providers/google/tests/unit/google/cloud/links/test_compute.py",
"providers/google/tests/unit/google/cloud/links/test_data_loss_prevention.py",
"providers/google/tests/unit/google/cloud/links/test_datacatalog.py",
"providers/google/tests/unit/google/cloud/links/test_dataflow.py",
"providers/google/tests/unit/google/cloud/links/test_dataform.py",
"providers/google/tests/unit/google/cloud/links/test_datafusion.py",
"providers/google/tests/unit/google/cloud/links/test_dataprep.py",
"providers/google/tests/unit/google/cloud/links/test_dataproc.py",
"providers/google/tests/unit/google/cloud/links/test_datastore.py",
"providers/google/tests/unit/google/cloud/links/test_kubernetes_engine.py",
"providers/google/tests/unit/google/cloud/links/test_mlengine.py",
"providers/google/tests/unit/google/cloud/links/test_pubsub.py",
"providers/google/tests/unit/google/cloud/links/test_spanner.py",
"providers/google/tests/unit/google/cloud/links/test_stackdriver.py",
"providers/google/tests/unit/google/cloud/links/test_workflows.py",
"providers/google/tests/unit/google/cloud/links/test_translate.py",
"providers/google/tests/unit/google/cloud/operators/vertex_ai/test_auto_ml.py",
"providers/google/tests/unit/google/cloud/operators/vertex_ai/test_batch_prediction_job.py",
"providers/google/tests/unit/google/cloud/operators/vertex_ai/test_custom_job.py",
"providers/google/tests/unit/google/cloud/operators/vertex_ai/test_dataset.py",
"providers/google/tests/unit/google/cloud/operators/vertex_ai/test_endpoint_service.py",
"providers/google/tests/unit/google/cloud/operators/vertex_ai/test_hyperparameter_tuning_job.py",
"providers/google/tests/unit/google/cloud/operators/vertex_ai/test_model_service.py",
"providers/google/tests/unit/google/cloud/operators/vertex_ai/test_pipeline_job.py",
"providers/google/tests/unit/google/cloud/operators/vertex_ai/test_ray.py",
"providers/google/tests/unit/google/cloud/sensors/vertex_ai/test_feature_store.py",
"providers/google/tests/unit/google/cloud/transfers/test_bigquery_to_sql.py",
"providers/google/tests/unit/google/cloud/transfers/test_presto_to_gcs.py",
"providers/google/tests/unit/google/cloud/utils/test_bigquery.py",
"providers/google/tests/unit/google/cloud/utils/test_bigquery_get_data.py",
"providers/google/tests/unit/google/common/hooks/test_operation_helpers.py",
"providers/google/tests/unit/google/test_go_module_utils.py",
"providers/http/tests/unit/http/test_exceptions.py",
"providers/keycloak/tests/unit/keycloak/auth_manager/datamodels/test_token.py",
"providers/microsoft/azure/tests/unit/microsoft/azure/operators/test_adls.py",
"providers/snowflake/tests/unit/snowflake/triggers/test_snowflake_trigger.py",
"providers/standard/tests/unit/standard/operators/test_branch.py",
"providers/standard/tests/unit/standard/operators/test_empty.py",
"providers/standard/tests/unit/standard/operators/test_latest_only.py",
"providers/standard/tests/unit/standard/sensors/test_external_task.py",
"providers/sftp/tests/unit/sftp/test_exceptions.py",
]
modules_files: list[pathlib.Path] = list(
AIRFLOW_PROVIDERS_ROOT_PATH.glob("**/src/airflow/providers/**/*.py")
)
# Exclude .build files
modules_files = (f for f in modules_files if ".build" not in f.parts)
# Exclude .git files
modules_files = (f for f in modules_files if ".git" not in f.parts)
# Exclude .venv files
modules_files = (f for f in modules_files if ".venv" not in f.parts)
# Exclude node_modules
modules_files = (f for f in modules_files if "node_modules" not in f.parts)
# Exclude __init__.py
modules_files = filter(lambda f: f.name != "__init__.py", modules_files)
# Exclude example_dags
modules_files = (f for f in modules_files if "example_dags" not in f.parts)
# Exclude _vendor
modules_files = (f for f in modules_files if "_vendor" not in f.parts)
# Exclude versions file
modules_files = (f for f in modules_files if "versions" not in f.parts)
# Exclude get_provider_info files
modules_files = (f for f in modules_files if "get_provider_info.py" not in f.parts)
# Make path relative
modules_files = list(f.relative_to(AIRFLOW_ROOT_PATH) for f in modules_files)
current_test_files = list(AIRFLOW_PROVIDERS_ROOT_PATH.rglob("**/tests/**/*.py"))
# Make path relative
current_test_files = list(f.relative_to(AIRFLOW_ROOT_PATH) for f in current_test_files)
# Exclude __init__.py
current_test_files = set(f for f in current_test_files if not f.name == "__init__.py")
# Exclude node_modules
current_test_files = set(f for f in current_test_files if "node_modules" not in f.parts)
# Exclude version_compat.py
modules_files = filter(lambda f: f.name != "version_compat.py", modules_files)
modules_files_set = set(modules_files)
expected_test_files = set(
[
pathlib.Path(
f.with_name("test_" + f.name)
.as_posix()
.replace("/src/airflow/providers/", "/tests/unit/")
)
for f in modules_files_set
]
)
expected_test_files = set(expected_test_files) - set(
[pathlib.Path(test_file) for test_file in OVERLOOKED_TESTS]
)
missing_tests_files = [
file.as_posix()
for file in sorted(expected_test_files - expected_test_files.intersection(current_test_files))
]
assert missing_tests_files == [], "Detect missing tests in providers module - please add tests"
added_test_files = current_test_files.intersection(OVERLOOKED_TESTS)
assert set() == added_test_files, (
"Detect added tests in providers module - please remove the tests "
"from OVERLOOKED_TESTS list above"
)
def get_imports_from_file(filepath: str):
with open(filepath) as py_file:
content = py_file.read()
doc_node = ast.parse(content, filepath)
import_names: set[str] = set()
for current_node in ast.walk(doc_node):
if not isinstance(current_node, (ast.Import, ast.ImportFrom)):
continue
for alias in current_node.names:
name = alias.name
fullname = f"{current_node.module}.{name}" if isinstance(current_node, ast.ImportFrom) else name
import_names.add(fullname)
return import_names
def filepath_to_module(path: pathlib.Path, src_folder: pathlib.Path):
path = path.relative_to(src_folder)
return path.as_posix().replace("/", ".")[: -(len(".py"))]
def print_sorted(container: set, indent: str = " ") -> None:
sorted_container = sorted(container)
print(f"{indent}" + f"\n{indent}".join(sorted_container))
|
TestProjectStructure
|
python
|
getsentry__sentry
|
src/sentry/backup/mixins.py
|
{
"start": 226,
"end": 2627
}
|
class ____:
"""
Handles the `ImportFlags.overwrite_configs` setting when it's piped through to a
`RelocationScope.Config` model with at least one `unique=True` field, thereby handling the
collision in the manner the importer requested.
"""
# TODO(getsentry/team-ospo#190): Clean up the type checking in this method.
def write_relocation_import(
self, scope: ImportScope, flags: ImportFlags
) -> tuple[int, ImportKind] | None:
# Get all unique sets that will potentially cause collisions.
uniq_sets = dependencies()[get_model_name(self)].get_uniques_without_foreign_keys() # type: ignore[arg-type]
# Don't use this mixin for models with multiple unique sets; write custom logic instead.
assert len(uniq_sets) <= 1
# Must set `__relocation_custom_ordinal__` on models that use this mixin.
assert getattr(self.__class__, "__relocation_custom_ordinal__", None) is not None
if self.get_relocation_scope() == RelocationScope.Config: # type: ignore[attr-defined]
if len(uniq_sets) == 1:
uniq_set = uniq_sets[0]
query = dict()
for uniq_field_name in uniq_set:
if getattr(self, uniq_field_name, None) is not None:
query[uniq_field_name] = getattr(self, uniq_field_name)
# If all of the fields in the unique set are NULL, we'll avoid a collision, so exit
# early and write a new entry.
if len(query) > 0:
existing = self.__class__.objects.filter(**query).first() # type: ignore[attr-defined]
if existing:
# Re-use the existing data if config overwrite is disabled.
if not flags.overwrite_configs:
return (existing.pk, ImportKind.Existing)
# We are performing an overwrite (ie, keeping the old pk, but using all of
# the imported values).
self.pk = existing.pk
self.save() # type: ignore[attr-defined]
return (self.pk, ImportKind.Overwrite)
# Does not have a single colliding unique field - write as usual.
return super().write_relocation_import(scope, flags) # type: ignore[misc]
|
OverwritableConfigMixin
|
python
|
marshmallow-code__marshmallow
|
src/marshmallow/types.py
|
{
"start": 897,
"end": 1161
}
|
class ____(typing.Protocol):
def dumps(
self, obj: typing.Any, *args: typing.Any, **kwargs: typing.Any
) -> str: ...
def loads(
self, s: str | bytes | bytearray, *args: typing.Any, **kwargs: typing.Any
) -> typing.Any: ...
|
RenderModule
|
python
|
sympy__sympy
|
sympy/polys/polyoptions.py
|
{
"start": 19473,
"end": 19681
}
|
class ____(BooleanOption, Flag, metaclass=OptionType):
"""``series`` flag to polynomial manipulation functions. """
option = 'series'
@classmethod
def default(cls):
return False
|
Series
|
python
|
getsentry__sentry
|
src/sentry/audit_log/events.py
|
{
"start": 6477,
"end": 7979
}
|
class ____(AuditLogEvent):
def __init__(self) -> None:
super().__init__(
event_id=178,
name="PROJECT_PERFORMANCE_ISSUE_DETECTION_CHANGE",
api_name="project.change-performance-issue-detection",
)
def render(self, audit_log_entry: AuditLogEntry) -> str:
from sentry.issues.endpoints.project_performance_issue_settings import (
project_settings_to_group_map as map,
)
data = audit_log_entry.data
items_string = ", ".join(
f"to {'enable' if value else 'disable'} detection of {map[key].description} issue"
for (key, value) in data.items()
if key in map.keys()
)
return "edited project performance issue detector settings " + items_string
def render_project_action(audit_log_entry: AuditLogEntry, action: str):
# Most logs will just be name of the filter, but legacy browser changes can be bool, str, list, or sets
filter_name = audit_log_entry.data["state"]
slug = audit_log_entry.data.get("slug")
message = f"{action} project filter {filter_name}"
if filter_name in ("0", "1") or isinstance(filter_name, (bool, list, set)):
message = f"{action} project filter legacy-browsers"
if isinstance(filter_name, (list, set)):
message += ": {}".format(", ".join(sorted(filter_name)))
if slug:
message += f" for project {slug}"
return message
|
ProjectPerformanceDetectionSettingsAuditLogEvent
|
python
|
pytorch__pytorch
|
torch/_inductor/codegen/cpp.py
|
{
"start": 150918,
"end": 182582
}
|
class ____(CppKernel):
# Subclass CppKernel, CppVecKernel, etc., to customize code generation.
# Override CppOverrides or CppVecOverrides to emit custom ops.
# Earlier, this meant copying codegen_functions() to use your subclasses.
# Now, use kernel_cls and vec_kernel_cls class attributes instead.
# This lets CppKernelProxy subclasses inject custom behavior cleanly.
# No need to duplicate codegen_functions() just to swap kernel classes.
kernel_cls: type[CppKernel] = CppKernel
vec_kernel_cls: type[CppVecKernel] = CppVecKernel
tile2d_kernel_cls: type[CppTile2DKernel] = CppTile2DKernel
def __init__(self, kernel_group):
super().__init__(kernel_group.args, kernel_group.ws.num_threads)
self.kernel_group = kernel_group
self.loop_nest = None
self.call_ranges = None
self.picked_vec_isa: cpu_vec_isa.VecISA = cpu_vec_isa.pick_vec_isa()
self.kernels: list[CppKernel] = []
def data_type_propagation(self, nodes):
for _node in nodes:
assert isinstance(_node, SchedulerNode)
DataTypePropagation.propagate_scheduler_node(_node)
# Check if all the nodes of a given fx graph can support BF16/FP16
def is_lowp_fp_scheduler(self, scheduler_node: SchedulerNode):
if not isinstance(scheduler_node._body, LoopBody):
return True
# Propagate the dtype to check if all the fx node is bf16/fp16
DataTypePropagation.propagate_scheduler_node(scheduler_node)
return (
get_loop_body_lowp_fp(scheduler_node._body)[0] is not None
and not get_loop_body_lowp_fp(scheduler_node._body)[1]
)
def legalize_lowp_fp_dtype_loopbody(self, loop_body: LoopBody):
def add_to_dtype(sub_graph: torch.fx.Graph):
def get_input_dtype(node: torch.fx.Node) -> Optional[torch.dtype]:
"""Get input dtype for nodes that may consumes lowp fp dt"""
if node.target == "store":
return V.graph.get_dtype(node.args[1]) # type: ignore[arg-type]
elif node.target == "to_dtype_bitcast":
return node.args[-1] # type: ignore[return-value]
elif node.target == "to_dtype":
if len(node.args) > 3:
return node.args[3] # type: ignore[return-value]
else:
return node.kwargs.get("src_dtype", None) # type: ignore[return-value]
else:
return None
def get_output_dtype(node: torch.fx.Node) -> Optional[torch.dtype]:
"""Get output dtype for nodes that may produce lowp fp dt"""
if node.target == "load":
assert len(node.args) == 3
return V.graph.get_dtype(node.args[1]) # type: ignore[arg-type]
elif node.target in ["to_dtype", "constant", "index_expr"]:
return node.args[-1] # type: ignore[return-value]
elif node.target == "to_dtype_bitcast":
return node.args[2] # type: ignore[return-value]
else:
return None
def is_lowp_fp_source(node: torch.fx.Node, dt: torch.dtype):
"""Check if the given node produces output with expected low precision floating point data type."""
assert dt in DTYPE_LOWP_FP
return get_output_dtype(node) == dt
def is_lowp_fp_sink(node: torch.fx.Node, dt: torch.dtype):
"""Check if the given node accept input with expected low precision floating point data type."""
assert dt in DTYPE_LOWP_FP
if input_dtype := get_input_dtype(node):
return input_dtype == dt
elif node.target == "to_dtype":
# The `src_dtype` of a `to_dtype` node might miss, in which case the node accept any input dtype.
return True
else:
return False
def is_lowp_fp_source_no_promote(node: torch.fx.Node, dt: torch.dtype):
"""Check if the node is a lowp fp sources which are all directly fed to ops that accepts lowp fp input
thus no need to promote to float
"""
return is_lowp_fp_source(node, dt) and all(
is_lowp_fp_sink(user, dt) for user in node.users
)
sub_graph_nodes = list(sub_graph.nodes)
to_lowp_fp_legalized_nodes = []
for _node in sub_graph_nodes:
if (
_node.target in ["load", "index_expr"]
and (dt := get_output_dtype(_node)) in DTYPE_LOWP_FP
):
# No need to promote to float if all users are ops that accepts lowp fp input
# pyrefly: ignore [bad-argument-type]
if all(is_lowp_fp_sink(user, dt) for user in _node.users):
continue
ops = _node.args[0]
with sub_graph.inserting_after(_node):
to_type_node = sub_graph.call_method(
"to_dtype", args=(ops, _node, torch.float)
)
_node.replace_all_uses_with(
to_type_node, lambda n: n is not to_type_node
)
# pyrefly: ignore [bad-assignment]
metrics.cpp_to_dtype_count += 1
elif (
_node.target == "store"
and (dt := get_input_dtype(_node)) in DTYPE_LOWP_FP
):
ops, name, _, value_var, _ = _node.args
# pyrefly: ignore [bad-argument-type]
if is_lowp_fp_source_no_promote(value_var, dt):
continue
dtype = V.graph.get_dtype(name)
with sub_graph.inserting_before(_node):
to_type_node = sub_graph.call_method(
"to_dtype", args=(ops, value_var, dtype)
)
_node.replace_input_with(value_var, to_type_node)
# pyrefly: ignore [bad-assignment]
metrics.cpp_to_dtype_count += 1
elif _node.target == "reduction":
(
ops,
dtype,
src_dtype,
reduction_type,
value,
) = _node.args
if src_dtype in DTYPE_LOWP_FP:
# Since we always convert the load/store value to float if the tensor is bfloat16/float16.
# Therefore, the reduction should never work with bfloat16/float16 value. Hence, we update
# the bfloat16/float16 reduction by
# 1) updating the src_dtype to float
# and 2) updating the dtype to float if it is bfloat16/float16.
assert dtype in [
torch.float,
torch.bfloat16,
torch.float16,
torch.int64,
]
_node.args = (
ops,
torch.float if dtype in DTYPE_LOWP_FP else dtype,
torch.float,
reduction_type,
value,
)
elif _node.target == "constant" and _node.args[-1] in DTYPE_LOWP_FP:
# No need to promote to float if all users are ops that accepts lowp fp input
(ops, value, dt) = _node.args
if all(is_lowp_fp_sink(user, dt) for user in _node.users): # type: ignore[arg-type]
continue
_node.args = (ops, value, torch.float)
elif _node.target == "to_dtype" and _node.args[-1] in DTYPE_LOWP_FP:
# No need to promote to float if all users are ops that accepts lowp fp input
(ops, x, dt) = _node.args
if all(is_lowp_fp_sink(user, dt) for user in _node.users): # type: ignore[arg-type]
continue
# The legalization always loads the BF16/FP16 tensor as FP32 for computation
# and converts back to BF16/FP16 after the computation.
# Hence, there should be no computation w/ BF16/FP16.
# Therefore, we update the to_dtype by replacing the bf16/fp16 dtype with fp32.
# Save the legalized to_dtype node for the elimination(eliminate_to_dtype step):
# 1) Eliminate the redundant to_dtype node if we have a pattern as follows:
# graph():
# %lowp_fp_legalized = call_method[target=to_dtype](args = (%ops, %input, torch.float))
# %to_dtype2 = call_method[target=to_dtype](args = (%ops, %lowp_fp_legalized, torch.bfloat16/float16))
# Regarding the first to_dtype, it is redundant because
# the second to_type also converts to the torch.bfloat16/torch.float16.
# Hence, we remove the first to_type.
to_lowp_fp_legalized_nodes.append(_node)
_node.args = (ops, x, torch.float)
elif _node.target == "to_dtype_bitcast":
(ops, value_var, dtype, src_dtype) = _node.args
# to_dtype_bitcast act as a lowp fp sink:
# c10::bit_cast requires the source and target have the same bitwidth. Because the input tensor's
# dtype could be promoted, e.g. from float16 to float, we have to cast the tensor to its original
# source dtype before invoking bit_cast.
if src_dtype in DTYPE_LOWP_FP:
# No need to promote to float if it is a user of a lowp fp sources
# which are all directly fed to ops that accepts lowp fp input
if not is_lowp_fp_source_no_promote(value_var, src_dtype):
with sub_graph.inserting_before(_node):
to_type_node = sub_graph.call_method(
"to_dtype", args=(ops, value_var, src_dtype)
)
_node.replace_input_with(value_var, to_type_node)
# pyrefly: ignore [bad-assignment]
metrics.cpp_to_dtype_count += 1
# to_dtype_bitcast act as a lowp fp source:
# We also need to convert the bit-casted tensor back to float to make sure we keep using higher
# precision values for the rest of the computation.
if dtype in DTYPE_LOWP_FP:
# No need to promote to float if all users are ops that accepts lowp fp input
if not (
all(is_lowp_fp_sink(user, dtype) for user in _node.users)
):
ops = _node.args[0]
with sub_graph.inserting_after(_node):
to_type_node = sub_graph.call_method(
"to_dtype", args=(ops, _node, torch.float)
)
_node.replace_all_uses_with(
to_type_node, lambda n: n is not to_type_node
)
# pyrefly: ignore [bad-assignment]
metrics.cpp_to_dtype_count += 1
def eliminate_to_dtype(sub_graph: torch.fx.Graph):
def _eliminate_duplicate_to_node(sub_graph: torch.fx.Graph):
# Eliminate the redundant to_dtype node. Let's consider a pattern as follows:
# graph():
# %to_dtype1 = call_method[target=to_dtype](args = (%ops, %input, torch.float), kwargs = {})
# %to_dtype2 = call_method[target=to_dtype](args = (%ops, %to_dtype1, torch.float), kwargs = {})
# Regarding the first to_dtype, it is redundant because the second to_type also converts to the
# torch.float. Hence, we remove the first to_type
def _used_by_to(to_node: torch.fx.Node):
return all(usr.target == "to_dtype" for usr in to_node.users)
all_to_nodes = [
node for node in sub_graph.nodes if node.target == "to_dtype"
]
all_to_nodes_and_users = [
{node: node.users} for node in all_to_nodes if _used_by_to(node)
]
for node_users in all_to_nodes_and_users:
for node, users in node_users.items():
if node in sub_graph.nodes and (
all(usr.args[-1] == node.args[-1] for usr in users)
or (
node in to_lowp_fp_legalized_nodes
and all(
usr.args[-1] in DTYPE_LOWP_FP for usr in users
)
)
):
val_node = node.all_input_nodes[-1]
node.replace_all_uses_with(val_node)
sub_graph.erase_node(node)
# For debug mode, the graph of LoopBody will attach a new GraphModule as
# owning_module for debugging while the release mode will not. The lint will
# check whether the graph has owning_module to decide if it needs to check
# call_module. LoopBody might contain get_index as a module call. But it
# is just a function. Hence, it cannot pass the lint check for debug mode.
# We bypass the check if the owning_module is None. Eventually, we should call
# get_index via call_function but not call_module.
if sub_graph.owning_module is None:
sub_graph.lint()
_eliminate_duplicate_to_node(sub_graph)
eliminate_to_dtype(sub_graph)
sub_blocks = [loop_body.root_block] + list(loop_body.subblocks.values())
for sub_block in sub_blocks:
add_to_dtype(sub_block.graph)
def legalize_lowp_fp_dtype(self, nodes):
if all(
isinstance(_node, SchedulerNode) and self.is_lowp_fp_scheduler(_node)
for _node in nodes
):
# Mark the load node to load bf16/fp16
for _node in nodes:
sub_blocks = [_node._body.root_block] + list(
_node._body.subblocks.values()
)
for sub_block in sub_blocks:
for fx_node in sub_block.graph.nodes:
if fx_node.target in ["load", "store"]:
assert fx_node.meta
assert OptimizationContext.key in fx_node.meta
opt_ctx: OptimizationContext = fx_node.meta[
OptimizationContext.key
]
assert opt_ctx.dtype in DTYPE_LOWP_FP
# Bypass the legalization as the kernel can run with bf16/fp16 directly
return
for _node in nodes:
assert isinstance(_node, SchedulerNode)
assert isinstance(_node._body, LoopBody)
body: LoopBody = _node._body
if not body.is_memory_copy():
self.legalize_lowp_fp_dtype_loopbody(body)
def codegen_functions(self, fn_list, var_sizes_list):
assert len(fn_list) == len(var_sizes_list)
kernel_group = self.kernel_group
group, reduction_group = max(var_sizes_list, key=lambda sizes: len(sizes[1]))
self.set_ranges(group, reduction_group)
def codegen_kernel(cls, *args):
with kernel_group.new_kernel(cls, *args) as kernel:
# Ugly hack to maintain the metrics kernel count since
# we only count in CppKernelProxy, not those contained in it
# pyrefly: ignore [bad-assignment]
metrics.generated_kernel_count -= 1
run(kernel)
return kernel
def run(kernel):
vars, reduction_vars = kernel.set_ranges(group, reduction_group)
in_suffix = False
for fn, var_sizes in zip(fn_list, var_sizes_list):
if var_sizes in [
(group, reduction_group),
(tuple(itertools.chain(group, reduction_group)), ()),
]:
assert not in_suffix
fn(vars, reduction_vars)
else:
in_suffix = True
assert var_sizes == (
group,
(),
), f"unexpected group: {var_sizes} != {group}, {reduction_group}"
# we can fuse in some extra pointwise into the suffix
with kernel.write_to_suffix():
fn(vars, ())
scalar_kernel = codegen_kernel(self.kernel_cls)
V.graph.removed_buffers |= scalar_kernel.removed_buffers
V.graph.inplaced_to_remove |= scalar_kernel.inplaced_to_remove
self.loop_nest = LoopNest.build(scalar_kernel)
if not self.picked_vec_isa or not self.itervars:
self.kernels = [scalar_kernel]
self.aggregate_reduction_buffers(False, None)
self.loop_nest.set_kernel(self)
return
# Kernels share the same global contexts like V.graph.wrapper_code, V.kernel.args.
# But the generated scalar kernel has updated these global contexts. Hence, the other kernels
# should not do this again to avoid context conflict. By now, we only control the
# config.inplace_buffers. In the future, we could maintain more contexts.
with torch._inductor.config.patch(inplace_buffers=False):
tiling_select = TilingSelect()
tiling_factors, tiling_indices = tiling_select.select_tiling(
fn_list, var_sizes_list
)
assert len(tiling_factors) == len(tiling_indices)
# <TODO> This should be removed after full support for vectorization is implemented.
could_masked_vec = True
all_dtypes = _get_dtype_from_loopbodies(_get_loop_body(fn_list))
if any(dtype not in MASKED_VECTORIZABLE_DTYPES for dtype in all_dtypes):
# can be removed after masked vectorizable dtype are same with vectorizable dtype
could_masked_vec = False
_inner_loop_reduction_outer_not = False
_outer_loop = None
if tiling_indices:
inner_loop_reduction = False
outer_loop_level = tiling_indices[0]
inner_loop_level = outer_loop_level + 1
if len(self.loop_nest.loops) > inner_loop_level:
inner_loop_reduction = self.loop_nest.loops[
inner_loop_level
].is_reduction
outer_loop_reduction = self.loop_nest.loops[
outer_loop_level
].is_reduction
_inner_loop_reduction_outer_not = (
inner_loop_reduction and not outer_loop_reduction
)
if len(tiling_indices) == 1:
# pyrefly: ignore [bad-assignment]
metrics.generated_cpp_vec_kernel_count += 1
loop = self.loop_nest.tile(tiling_indices[0], factor=tiling_factors[0])
vec_kernel = codegen_kernel(
self.vec_kernel_cls, tiling_factors[0], tiling_indices[0]
)
tail_size = loop.size - loop.tiled_size
vec_kernel.active_ranges = {loop.var: (0, loop.tiled_size)}
if config.cpp.enable_loop_tail_vec and could_masked_vec:
tail_kernel = codegen_kernel(
self.vec_kernel_cls,
tiling_factors[0],
tiling_indices[0],
tail_size,
)
else:
tail_kernel = scalar_kernel
scalar_kernel.inner_itervars = [loop.var]
tail_kernel.active_ranges = {loop.var: (loop.tiled_size, loop.size)}
self.kernels = [vec_kernel, tail_kernel]
_outer_loop = loop
elif len(tiling_indices) == 2:
assert (
tiling_indices[1] == len(self.itervars) - 1
and tiling_factors[0] == tiling_factors[1]
)
# pyrefly: ignore [bad-assignment]
metrics.generated_cpp_vec_kernel_count += 2
outer_loop = self.loop_nest.tile(
tiling_indices[0], factor=tiling_factors[0]
)
outer_ranges = {
"main": (0, outer_loop.tiled_size),
"tail": (outer_loop.tiled_size, outer_loop.size),
}
outer_tail_size = outer_loop.size - outer_loop.tiled_size
inner_loop = self.loop_nest.tile(
tiling_indices[1], factor=tiling_factors[0]
)
inner_ranges = {
"main": (0, inner_loop.tiled_size),
"tail": (inner_loop.tiled_size, inner_loop.size),
}
inner_tail_size = inner_loop.size - inner_loop.tiled_size
tile2d_kernel = codegen_kernel(
self.tile2d_kernel_cls,
tiling_factors[0],
tiling_indices,
)
tile2d_kernel.active_ranges = {
outer_loop.var: outer_ranges["main"],
inner_loop.var: inner_ranges["main"],
}
tail_kernel = []
if config.cpp.enable_loop_tail_vec and could_masked_vec:
for outer_r, inner_r in (
("main", "tail"),
("tail", "main"),
("tail", "tail"),
):
_inner_tail_size = (
inner_tail_size if inner_r == "tail" else None
)
_outer_tail_size = (
outer_tail_size if outer_r == "tail" else None
)
kernel = codegen_kernel(
self.tile2d_kernel_cls,
tiling_factors[0],
tiling_indices,
_inner_tail_size,
_outer_tail_size,
)
kernel.active_ranges = {
outer_loop.var: outer_ranges[outer_r],
inner_loop.var: inner_ranges[inner_r],
}
tail_kernel.append(kernel)
else:
vec_kernel = codegen_kernel(
self.vec_kernel_cls, tiling_factors[0], tiling_indices[0]
)
vec_kernel.active_ranges = {
outer_loop.var: outer_ranges["main"],
inner_loop.var: inner_ranges["tail"],
}
vec_kernel.inner_itervars = [inner_loop.var]
tail_kernel.append(vec_kernel)
scalar_kernel.active_ranges = {
outer_loop.var: outer_ranges["tail"],
inner_loop.var: (0, inner_loop.size),
}
scalar_kernel.inner_itervars = [inner_loop.var, outer_loop.var]
tail_kernel.append(scalar_kernel)
self.kernels = [tile2d_kernel] + tail_kernel
_outer_loop = outer_loop
else:
self.kernels = [scalar_kernel]
self.aggregate_reduction_buffers(
_inner_loop_reduction_outer_not, _outer_loop
)
self.loop_nest.set_kernel(self)
def codegen_loop_bodies(self, loop_bodies, var_sizes_list):
for body in loop_bodies:
self.legalize_lowp_fp_dtype_loopbody(body)
DataTypePropagation.propagate_loopbody(body)
self.codegen_functions(loop_bodies, var_sizes_list)
def codegen_nodes(self, nodes: list[SchedulerNode]):
# Legalize BF16 node by adding to_dtype explicitly
self.legalize_lowp_fp_dtype(nodes)
self.data_type_propagation(nodes)
assert len(nodes) >= 1
def fn(node, *index_vars):
node.decide_inplace_update()
node.mark_run()
if isinstance(V.kernel, NullKernelHandler):
return node._body(*index_vars)
else:
return node.codegen(index_vars)
fn_list = [functools.partial(fn, node) for node in nodes]
if (
isinstance(V.local_buffer_context, LocalBufferContext)
and V.local_buffer_context.local_buffers
):
def wrap_fn(fn):
wrapped_fn = V.local_buffer_context.localize_function(
fn,
)
wrapped_fn.original_fn = fn
return wrapped_fn
fn_list = [wrap_fn(fn) for fn in fn_list]
var_sizes_list = [node.group[1] for node in nodes]
self.codegen_functions(fn_list, var_sizes_list)
def codegen_loops(self, code, worksharing):
self.codegen_loops_impl(self.loop_nest, code, worksharing)
def update_stores_with_parallel_reduction(self):
for kernel in self.kernels:
kernel.update_stores_with_parallel_reduction()
def gen_body(self, code: Optional[BracesBuffer] = None):
assert code is not None
if_prefix = "C10_LIKELY"
for kernel in self.kernels:
with contextlib.ExitStack() as stack:
if kernel.codegen_conditions(code, if_prefix):
if_prefix = "C10_UNLIKELY"
stack.enter_context(code.indent())
code.splice(kernel.gen_body())
def aggregate_reduction_buffers(
self, inner_loop_reduction_outer_not: bool, outer_loop: Optional["LoopLevel"]
):
"""
CppKernel/CppVecKernel/CppTile2dKernel have reduction buffers themselves.
Here, we decide how to aggregate them together and place new reduction buffers
under CppKernelProxy.
"""
def aggregate_reduction_prefix_suffix(outer_loop: "LoopLevel"):
assert len(self.kernels) >= 2
main_loop_kernel = self.kernels[0]
tail_loop_kernel = self.kernels[-1]
assert isinstance(main_loop_kernel, self.vec_kernel_cls)
# Prefix
if type(tail_loop_kernel) is self.kernel_cls:
# if tail loop kernel is a scalar kernel, we need to extend tmp_acc -> tmp_acc_arr[] to
# hold the temporary inner loop acc result for outer tail loop
tail_loop_kernel.finalize_reduction_prefix(
main_loop_kernel.tiling_factor
)
main_loop_kernel.finalize_reduction_prefix()
self.reduction_prefix.splice(
tail_loop_kernel.reduction_prefix
+ main_loop_kernel.reduction_prefix
)
else:
main_loop_kernel.finalize_reduction_prefix()
self.reduction_prefix.splice(main_loop_kernel.reduction_prefix)
# Suffix
suffix_buf = BracesBuffer()
with contextlib.ExitStack() as stack:
if main_loop_kernel.codegen_conditions(
suffix_buf, "C10_LIKELY", outer_loop.var
):
stack.enter_context(suffix_buf.indent())
suffix_buf.splice(main_loop_kernel.reduction_suffix)
with contextlib.ExitStack() as stack:
if tail_loop_kernel.codegen_conditions(
suffix_buf, "C10_UNLIKELY", outer_loop.var
):
stack.enter_context(suffix_buf.indent())
if type(tail_loop_kernel) is self.kernel_cls:
reduction_vars = tail_loop_kernel.reduction_var_names
for name in reduction_vars:
new_name = f"{name}_arr[{outer_loop.var}_tail - {cexpr_index(outer_loop.tiled_size)}]"
replace_acc_name(tail_loop_kernel.stores, name, new_name)
replace_acc_name(
tail_loop_kernel.reduction_suffix, name, new_name
)
# If tail loop kernel is a scalar kernel, use direct sum instead of cascade_sum_combine
# as the reduction vars are extended: tmp_acc -> tmp_acc_arr[].
replace_cascade_sum_with_add(tail_loop_kernel.stores)
suffix_buf.splice(
move_code_under_inner_loop(
tail_loop_kernel.reduction_suffix,
outer_loop.var,
f"{outer_loop.var}_tail",
outer_loop.tiled_size,
outer_loop.size,
)
)
else:
suffix_buf.splice(tail_loop_kernel.reduction_suffix)
self.reduction_suffix = suffix_buf
main_kernel = self.kernels[0]
if inner_loop_reduction_outer_not:
assert outer_loop
aggregate_reduction_prefix_suffix(outer_loop)
else:
main_kernel.finalize_reduction_prefix()
self.reduction_prefix.splice(main_kernel.reduction_prefix)
self.reduction_suffix.splice(main_kernel.reduction_suffix)
self.parallel_reduction_prefix.splice(main_kernel.parallel_reduction_prefix)
self.parallel_reduction_suffix.splice(main_kernel.parallel_reduction_suffix)
self.local_reduction_init.splice(main_kernel.local_reduction_init)
self.local_reduction_stores.splice(main_kernel.local_reduction_stores)
self.non_parallel_reduction_prefix.splice(
main_kernel.non_parallel_reduction_prefix
)
self.non_parallel_reduction_suffix.splice(
main_kernel.non_parallel_reduction_suffix
)
|
CppKernelProxy
|
python
|
ray-project__ray
|
rllib/utils/exploration/slate_soft_q.py
|
{
"start": 411,
"end": 1509
}
|
class ____(SoftQ):
@override(SoftQ)
def get_exploration_action(
self,
action_distribution: ActionDistribution,
timestep: Union[int, TensorType],
explore: bool = True,
):
assert (
self.framework == "torch"
), "ERROR: SlateSoftQ only supports torch so far!"
cls = type(action_distribution)
# Re-create the action distribution with the correct temperature
# applied.
action_distribution = cls(
action_distribution.inputs, self.model, temperature=self.temperature
)
batch_size = action_distribution.inputs.size()[0]
action_logp = torch.zeros(batch_size, dtype=torch.float)
self.last_timestep = timestep
# Explore.
if explore:
# Return stochastic sample over (q-value) logits.
action = action_distribution.sample()
# Return the deterministic "sample" (argmax) over (q-value) logits.
else:
action = action_distribution.deterministic_sample()
return action, action_logp
|
SlateSoftQ
|
python
|
pypa__warehouse
|
tests/unit/admin/views/test_flags.py
|
{
"start": 548,
"end": 2761
}
|
class ____:
@pytest.mark.parametrize(
("description", "enabled", "post", "expected_description", "expected_enabled"),
[
(
# Nothing changed when enabled
"old",
True,
{"id": "foo-bar", "description": "old", "enabled": "on"},
"old",
True,
),
(
# Nothing changed when disabled
"old",
False,
{"id": "foo-bar", "description": "old"},
"old",
False,
),
(
# Enable flag
"old",
False,
{"id": "foo-bar", "description": "old", "enabled": "on"},
"old",
True,
),
(
# Disable flag
"old",
True,
{"id": "foo-bar", "description": "old"},
"old",
False,
),
(
# Change description when enabled
"old",
True,
{"id": "foo-bar", "description": "new", "enabled": "on"},
"new",
True,
),
(
# Change description when disabled
"old",
False,
{"id": "foo-bar", "description": "new"},
"new",
False,
),
],
)
def test_edit_flag(
self,
db_request,
description,
enabled,
post,
expected_description,
expected_enabled,
):
# Clear out any existing flags added from migrations
db_request.db.query(AdminFlag).delete()
flag = AdminFlagFactory(id="foo-bar", description=description, enabled=enabled)
db_request.POST = post
db_request.route_path = lambda *a: "/the/redirect"
db_request.flash = lambda *a: None
views.edit_flag(db_request)
db_request.db.flush()
assert flag.enabled == expected_enabled
assert flag.description == expected_description
|
TestEditFlag
|
python
|
numpy__numpy
|
numpy/polynomial/tests/test_polynomial.py
|
{
"start": 17363,
"end": 22880
}
|
class ____:
def test_polyfromroots(self):
res = poly.polyfromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1, 5):
roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2])
tgt = Tlist[i]
res = poly.polyfromroots(roots) * 2**(i - 1)
assert_almost_equal(trim(res), trim(tgt))
def test_polyroots(self):
assert_almost_equal(poly.polyroots([1]), [])
assert_almost_equal(poly.polyroots([1, 2]), [-.5])
for i in range(2, 5):
tgt = np.linspace(-1, 1, i)
res = poly.polyroots(poly.polyfromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
# Testing for larger root values
for i in np.logspace(10, 25, num=1000, base=10):
tgt = np.array([-1, 1, i])
res = poly.polyroots(poly.polyfromroots(tgt))
# Adapting the expected precision according to the root value,
# to take into account numerical calculation error.
assert_almost_equal(res, tgt, 15 - int(np.log10(i)))
for i in np.logspace(10, 25, num=1000, base=10):
tgt = np.array([-1, 1.01, i])
res = poly.polyroots(poly.polyfromroots(tgt))
# Adapting the expected precision according to the root value,
# to take into account numerical calculation error.
assert_almost_equal(res, tgt, 14 - int(np.log10(i)))
def test_polyfit(self):
def f(x):
return x * (x - 1) * (x - 2)
def f2(x):
return x**4 + x**2 + 1
# Test exceptions
assert_raises(ValueError, poly.polyfit, [1], [1], -1)
assert_raises(TypeError, poly.polyfit, [[1]], [1], 0)
assert_raises(TypeError, poly.polyfit, [], [1], 0)
assert_raises(TypeError, poly.polyfit, [1], [[[1]]], 0)
assert_raises(TypeError, poly.polyfit, [1, 2], [1], 0)
assert_raises(TypeError, poly.polyfit, [1], [1, 2], 0)
assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[1, 1])
assert_raises(ValueError, poly.polyfit, [1], [1], [-1,])
assert_raises(ValueError, poly.polyfit, [1], [1], [2, -1, 6])
assert_raises(TypeError, poly.polyfit, [1], [1], [])
# Test fit
x = np.linspace(0, 2)
y = f(x)
#
coef3 = poly.polyfit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(poly.polyval(x, coef3), y)
coef3 = poly.polyfit(x, y, [0, 1, 2, 3])
assert_equal(len(coef3), 4)
assert_almost_equal(poly.polyval(x, coef3), y)
#
coef4 = poly.polyfit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(poly.polyval(x, coef4), y)
coef4 = poly.polyfit(x, y, [0, 1, 2, 3, 4])
assert_equal(len(coef4), 5)
assert_almost_equal(poly.polyval(x, coef4), y)
#
coef2d = poly.polyfit(x, np.array([y, y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
coef2d = poly.polyfit(x, np.array([y, y]).T, [0, 1, 2, 3])
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
yw[0::2] = 0
wcoef3 = poly.polyfit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
wcoef3 = poly.polyfit(x, yw, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
# test scaling with complex values x points whose square
# is zero when summed.
x = [1, 1j, -1, -1j]
assert_almost_equal(poly.polyfit(x, x, 1), [0, 1])
assert_almost_equal(poly.polyfit(x, x, [0, 1]), [0, 1])
# test fitting only even Polyendre polynomials
x = np.linspace(-1, 1)
y = f2(x)
coef1 = poly.polyfit(x, y, 4)
assert_almost_equal(poly.polyval(x, coef1), y)
coef2 = poly.polyfit(x, y, [0, 2, 4])
assert_almost_equal(poly.polyval(x, coef2), y)
assert_almost_equal(coef1, coef2)
def test_polytrim(self):
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, poly.polytrim, coef, -1)
# Test results
assert_equal(poly.polytrim(coef), coef[:-1])
assert_equal(poly.polytrim(coef, 1), coef[:-3])
assert_equal(poly.polytrim(coef, 2), [0])
def test_polyline(self):
assert_equal(poly.polyline(3, 4), [3, 4])
def test_polyline_zero(self):
assert_equal(poly.polyline(3, 0), [3])
def test_fit_degenerate_domain(self):
p = poly.Polynomial.fit([1], [2], deg=0)
assert_equal(p.coef, [2.])
p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=0)
assert_almost_equal(p.coef, [2.05])
with pytest.warns(np.exceptions.RankWarning):
p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=1)
def test_result_type(self):
w = np.array([-1, 1], dtype=np.float32)
p = np.polynomial.Polynomial(w, domain=w, window=w)
v = p(2)
assert_equal(v.dtype, np.float32)
arr = np.polydiv(1, np.float32(1))
assert_equal(arr[0].dtype, np.float64)
|
TestMisc
|
python
|
mlflow__mlflow
|
mlflow/tracing/destination.py
|
{
"start": 3411,
"end": 5060
}
|
class ____(TraceDestination):
"""
A destination representing a Databricks tracing server.
By setting this destination in the :py:func:`mlflow.tracing.set_destination` function,
MLflow will log traces to the specified experiment.
If neither experiment_id nor experiment_name is specified, an active experiment
when traces are created will be used as the destination.
If both are specified, they must refer to the same experiment.
Attributes:
experiment_id: The ID of the experiment to log traces to.
experiment_name: The name of the experiment to log traces to.
"""
experiment_id: str | None = None
experiment_name: str | None = None
def __post_init__(self):
if self.experiment_id is not None:
self.experiment_id = str(self.experiment_id)
if self.experiment_name is not None:
from mlflow.tracking._tracking_service.utils import _get_store
# NB: Use store directly rather than fluent API to avoid dependency on MLflowClient
experiment_id = _get_store().get_experiment_by_name(self.experiment_name).experiment_id
if self.experiment_id is not None and self.experiment_id != experiment_id:
raise MlflowException.invalid_parameter_value(
"experiment_id and experiment_name must refer to the same experiment"
)
self.experiment_id = experiment_id
@property
def type(self) -> str:
return "databricks"
def to_location(self) -> TraceLocationBase:
return MlflowExperimentLocation(experiment_id=self.experiment_id)
|
Databricks
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/execution/context/system.py
|
{
"start": 8002,
"end": 9728
}
|
class ____(IPlanContext):
"""Context for the orchestration of a run.
This context assumes inability to run user code directly.
"""
def __init__(
self,
plan_data: PlanData,
log_manager: DagsterLogManager,
executor: Executor,
output_capture: Optional[dict[StepOutputHandle, Any]],
resume_from_failure: bool = False,
):
self._plan_data = plan_data
self._log_manager = log_manager
self._executor = executor
self._output_capture = output_capture
self._resume_from_failure = resume_from_failure
@property
def plan_data(self) -> PlanData:
return self._plan_data
@property
def reconstructable_job(self) -> ReconstructableJob:
if not isinstance(self.job, ReconstructableJob):
raise DagsterInvariantViolationError(
"reconstructable_pipeline property must be a ReconstructableJob"
)
return self.job
@property
def log(self) -> DagsterLogManager:
return self._log_manager
@property
def executor(self) -> Executor:
return self._executor
@property
def output_capture(self) -> Optional[dict[StepOutputHandle, Any]]:
return self._output_capture
def for_step(self, step: ExecutionStep) -> "IStepContext":
return StepOrchestrationContext(
plan_data=self.plan_data,
log_manager=self._log_manager.with_tags(**step.logging_tags),
executor=self.executor,
step=step,
output_capture=self.output_capture,
)
@property
def resume_from_failure(self) -> bool:
return self._resume_from_failure
|
PlanOrchestrationContext
|
python
|
huggingface__transformers
|
tests/models/deepseek_v2/test_modeling_deepseek_v2.py
|
{
"start": 7391,
"end": 11288
}
|
class ____(unittest.TestCase):
def test_deepseek_v2_lite(self):
EXPECTED_TEXT = ['An attention function can be described as mapping a query and a set of key-value pairs to an output, where the query, keys, values, and output are all vectors.\n\nAttention functions are used in a variety of applications, including natural language processing, computer vision, and reinforcement learning.\n\nThe attention function is a function that takes a query and a set of key-value pairs as input and outputs a vector'] # fmt: skip
tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/DeepSeek-V2-Lite")
model = DeepseekV2ForCausalLM.from_pretrained(
"deepseek-ai/DeepSeek-V2-Lite",
device_map=torch_device,
dtype=torch.bfloat16,
quantization_config=BitsAndBytesConfig(load_in_8bit=True),
)
input_text = [
"An attention function can be described as mapping a query and a set of key-value pairs to an output, where the query, keys, values, and output are all vectors." # fmt: skip
]
model_inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
generated_ids = model.generate(**model_inputs, max_new_tokens=50, do_sample=False)
generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(generated_text, EXPECTED_TEXT)
def test_logits_eager(self):
input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338]
model = DeepseekV2ForCausalLM.from_pretrained(
"deepseek-ai/DeepSeek-V2-Lite",
device_map=torch_device,
dtype=torch.bfloat16,
quantization_config=BitsAndBytesConfig(load_in_8bit=True),
attn_implementation="eager",
)
with torch.no_grad():
out = model(torch.tensor([input_ids]).to(torch_device))
EXPECTED_MEAN = torch.tensor([[-6.1232, -5.0952, -4.4493, -2.6536, -2.0608, -2.3991, -3.8013, -2.8681]], device=torch_device) # fmt: skip
torch.testing.assert_close(out.logits.float().mean(-1), EXPECTED_MEAN, atol=1e-3, rtol=1e-3)
EXPECTED_SLICE = torch.tensor([-1.2500, -0.9961, -0.0194, -3.1562, 1.2812, -2.7656, -0.8438, -3.0469, -2.7812, -0.6328, -0.4160, -1.9688, -2.4219, -1.0391, -3.8906], device=torch_device) # fmt: skip
torch.testing.assert_close(out.logits[0, 0, :15].float(), EXPECTED_SLICE, atol=1e-3, rtol=1e-3)
def test_batch_fa2(self):
EXPECTED_TEXT = [
"Simply put, the theory of relativity states that \nthe laws of physics are the same for all observers, regardless of their \nrelative motion.\nThe theory of relativity is a theory of space, time, and gravity.\nThe theory of", # fmt: skip
"My favorite all time favorite condiment is ketchup. I love ketchup. I love ketchup on my hot dogs, hamburgers, french fries, and even on my eggs. I love ketchup. I love ketchup so much that I", # fmt: skip
]
prompts = [
"Simply put, the theory of relativity states that ",
"My favorite all time favorite condiment is ketchup.",
]
tokenizer = AutoTokenizer.from_pretrained(
"deepseek-ai/DeepSeek-V2-Lite", pad_token="</s>", padding_side="right"
)
model = DeepseekV2ForCausalLM.from_pretrained(
"deepseek-ai/DeepSeek-V2-Lite",
device_map=torch_device,
dtype=torch.bfloat16,
quantization_config=BitsAndBytesConfig(load_in_8bit=True),
)
inputs = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device)
generated_ids = model.generate(**inputs, max_new_tokens=40, do_sample=False)
generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT, generated_text)
|
DeepseekV2IntegrationTest
|
python
|
vyperlang__vyper
|
tests/functional/builtins/codegen/test_abi_decode_fuzz.py
|
{
"start": 8458,
"end": 15485
}
|
class ____:
nesting: int = 0
num_dynamic_types: int = 0 # number of dynamic types in the type
breadth: int = 0 # e.g. int16[50] has higher breadth than int16[1]
width: int = 0 # size of type
def _type_stats(typ: VyperType) -> _TypeStats:
def _finalize(): # little trick to save re-typing the arguments
width = typ.memory_bytes_required
return _TypeStats(
nesting=nesting, num_dynamic_types=num_dynamic_types, breadth=breadth, width=width
)
if typ._is_prim_word:
nesting = 0
breadth = 1
num_dynamic_types = 0
return _finalize()
if isinstance(typ, (BytesT, StringT)):
nesting = 0
breadth = 1 # idk
num_dynamic_types = 1
return _finalize()
if isinstance(typ, TupleT):
substats = [_type_stats(t) for t in typ.member_types]
nesting = 1 + max(s.nesting for s in substats)
breadth = max(typ.length, *[s.breadth for s in substats])
num_dynamic_types = sum(s.num_dynamic_types for s in substats)
return _finalize()
if isinstance(typ, StructT):
substats = [_type_stats(t) for t in typ.tuple_members()]
nesting = 1 + max(s.nesting for s in substats)
breadth = max(len(typ.member_types), *[s.breadth for s in substats])
num_dynamic_types = sum(s.num_dynamic_types for s in substats)
return _finalize()
if isinstance(typ, DArrayT):
substat = _type_stats(typ.value_type)
nesting = 1 + substat.nesting
breadth = max(typ.count, substat.breadth)
num_dynamic_types = 1 + substat.num_dynamic_types
return _finalize()
if isinstance(typ, SArrayT):
substat = _type_stats(typ.value_type)
nesting = 1 + substat.nesting
breadth = max(typ.count, substat.breadth)
num_dynamic_types = substat.num_dynamic_types
return _finalize()
raise RuntimeError("unreachable")
@pytest.fixture(scope="module")
def payload_copier(get_contract_from_ir):
# some contract which will return the buffer passed to it
# note: hardcode the location of the bytestring
ir = [
"with",
"length",
["calldataload", 36],
["seq", ["calldatacopy", 0, 68, "length"], ["return", 0, "length"]],
]
return get_contract_from_ir(["deploy", 0, ir, 0])
PARALLELISM = 1 # increase on fuzzer box
# NOTE: this is a heavy test. 100 types * 100 payloads per type can take
# 3-4minutes on a regular CPU core.
@pytest.mark.parametrize("_n", list(range(PARALLELISM)))
@hp.given(typ=vyper_type())
@hp.settings(max_examples=100, **_settings)
def test_abi_decode_fuzz(_n, typ, get_contract, tx_failed, payload_copier, env):
source_fragments, typ = typ
# import time
# t0 = time.time()
# print("ENTER", typ)
wrapped_type = calculate_type_for_external_return(typ)
stats = _type_stats(typ)
# for k, v in asdict(stats).items():
# event(k, v)
hp.target(stats.num_dynamic_types)
# hp.target(typ.abi_type.is_dynamic() + typ.abi_type.is_complex_type()))
# add max_mutations bytes worth of padding so we don't just get caught
# by bytes length check at function entry
type_bound = wrapped_type.abi_type.size_bound()
buffer_bound = type_bound + MAX_MUTATIONS
preamble = "\n\n".join(source_fragments)
type_str = str(typ) # annotation in vyper code
code = f"""
{preamble}
@external
def run(xs: Bytes[{buffer_bound}]) -> {type_str}:
ret: {type_str} = abi_decode(xs, {type_str})
return ret
interface Foo:
def foo(xs: Bytes[{buffer_bound}]) -> {type_str}: view # STATICCALL
def bar(xs: Bytes[{buffer_bound}]) -> {type_str}: nonpayable # CALL
@external
def run2(xs: Bytes[{buffer_bound}], copier: Foo) -> {type_str}:
assert len(xs) <= {type_bound}
return staticcall copier.foo(xs)
@external
def run3(xs: Bytes[{buffer_bound}], copier: Foo) -> {type_str}:
assert len(xs) <= {type_bound}
return (extcall copier.bar(xs))
"""
try:
c = get_contract(code)
except EvmError as e:
if env.contract_size_limit_error in str(e):
hp.assume(False)
# print(code)
hp.note(code)
c = get_contract(code)
@hp.given(data=payload_from(wrapped_type))
@hp.settings(max_examples=100, **_settings)
def _fuzz(data):
hp.note(f"type: {typ}")
hp.note(f"abi_t: {wrapped_type.abi_type.selector_name()}")
hp.note(data.hex())
try:
expected = spec_decode(wrapped_type, data)
# unwrap if necessary
if needs_external_call_wrap(typ):
assert isinstance(expected, tuple)
(expected,) = expected
hp.note(f"expected {expected}")
assert expected == c.run(data)
assert expected == c.run2(data, payload_copier.address)
assert expected == c.run3(data, payload_copier.address)
except DecodeError:
# note EvmError includes reverts *and* exceptional halts.
# we can get OOG during abi decoding due to how
# `_abi_payload_size()` works
hp.note("expect failure")
with tx_failed(EvmError):
c.run(data)
with tx_failed(EvmError):
c.run2(data, payload_copier.address)
with tx_failed(EvmError):
c.run3(data, payload_copier.address)
_fuzz()
# t1 = time.time()
# print(f"elapsed {t1 - t0}s")
@pytest.mark.parametrize("_n", list(range(PARALLELISM)))
@hp.given(typ=vyper_type())
@hp.settings(max_examples=100, **_settings)
def test_abi_decode_no_wrap_fuzz(_n, typ, get_contract, tx_failed, env):
source_fragments, typ = typ
# import time
# t0 = time.time()
# print("ENTER", typ)
stats = _type_stats(typ)
hp.target(stats.num_dynamic_types)
# add max_mutations bytes worth of padding so we don't just get caught
# by bytes length check at function entry
type_bound = typ.abi_type.size_bound()
buffer_bound = type_bound + MAX_MUTATIONS
type_str = str(typ) # annotation in vyper code
preamble = "\n\n".join(source_fragments)
code = f"""
{preamble}
@external
def run(xs: Bytes[{buffer_bound}]) -> {type_str}:
ret: {type_str} = abi_decode(xs, {type_str}, unwrap_tuple=False)
return ret
"""
try:
c = get_contract(code)
except EvmError as e:
if env.contract_size_limit_error in str(e):
hp.assume(False)
@hp.given(data=payload_from(typ))
@hp.settings(max_examples=100, **_settings)
def _fuzz(data):
hp.note(code)
hp.note(data.hex())
try:
expected = spec_decode(typ, data)
hp.note(f"expected {expected}")
assert expected == c.run(data)
except DecodeError:
hp.note("expect failure")
with tx_failed(EvmError):
c.run(data)
_fuzz()
# t1 = time.time()
# print(f"elapsed {t1 - t0}s")
|
_TypeStats
|
python
|
getsentry__sentry
|
src/sentry/feedback/endpoints/organization_feedback_categories.py
|
{
"start": 2511,
"end": 14570
}
|
class ____(OrganizationEndpoint):
owner = ApiOwner.FEEDBACK
publish_status = {
"GET": ApiPublishStatus.EXPERIMENTAL,
}
permission_classes = (OrganizationUserReportsPermission,)
def get(self, request: Request, organization: Organization) -> Response:
"""
Gets categories of feedbacks for an organization.
Returns groups of labels, which correspond to categories, for feedbacks that can be filtered by:
- A list of projects
- The date range that they were first seen in (defaults to the last 7 days)
If the request is successful, the return format is:
{
"categories": [
{
"primaryLabel": str,
"associatedLabels": list[str],
"feedbackCount": int,
}
...
],
"success": True,
"numFeedbacksContext": int,
}
It is returned as a list in the order of feedback count.
Returns 500 if the Seer endpoint fails.
:pparam string organization_id_or_slug: the id or slug of the organization.
:qparam int project: project IDs to filter by
:qparam string statsPeriod: filter feedbacks by date range (e.g. "14d")
:qparam string start: start date range (alternative to statsPeriod)
:qparam string end: end date range (alternative to statsPeriod)
:auth: required
"""
if not features.has(
"organizations:user-feedback-ai-categorization-features",
organization,
actor=request.user,
) or not has_seer_access(organization, actor=request.user):
return Response(
{"detail": "AI categorization is not available for this organization."}, status=403
)
try:
start, end = get_date_range_from_stats_period(
request.GET,
optional=False,
default_stats_period=timedelta(days=7),
)
except InvalidParams:
raise ParseError(detail="Invalid or missing date range")
projects = self.get_projects(request, organization)
# Sort first, then convert each element to a string
numeric_project_ids = sorted([project.id for project in projects])
project_ids = [str(project_id) for project_id in numeric_project_ids]
hashed_project_ids = hash_from_values(project_ids)
if end - start < timedelta(days=2):
# Hour granularity date range.
categorization_cache_key = f"feedback_categorization:{organization.id}:{start.strftime('%Y-%m-%d-%H')}:{end.strftime('%Y-%m-%d-%H')}:{hashed_project_ids}"
else:
# Day granularity date range. Date range is long enough that the categories won't change much (as long as the same day is selected)
categorization_cache_key = f"feedback_categorization:{organization.id}:{start.strftime('%Y-%m-%d')}:{end.strftime('%Y-%m-%d')}:{hashed_project_ids}"
has_cache = features.has(
"organizations:user-feedback-ai-summaries-cache", organization, actor=request.user
)
if has_cache:
cache_entry = cache.get(categorization_cache_key)
if cache_entry:
return Response(
{
"categories": cache_entry["categories"],
"success": True,
"numFeedbacksContext": cache_entry["numFeedbacksContext"],
}
)
recent_feedbacks = query_recent_feedbacks_with_ai_labels(
organization_id=organization.id,
project_ids=numeric_project_ids,
start=start,
end=end,
limit=MAX_FEEDBACKS_CONTEXT,
)
if len(recent_feedbacks) < MIN_FEEDBACKS_CONTEXT:
logger.error("Too few feedbacks to generate categories")
return Response(
{
"categories": None,
"success": False,
"numFeedbacksContext": 0,
}
)
context_feedbacks = []
total_chars = 0
for feedback in recent_feedbacks:
total_chars += len(feedback["feedback"])
total_chars += sum(len(label) for label in feedback["labels"])
if total_chars > MAX_FEEDBACKS_CONTEXT_CHARS:
break
context_feedbacks.append(
LabelGroupFeedbacksContext(feedback=feedback["feedback"], labels=feedback["labels"])
)
# Gets the top labels by feedbacks to augment the context that the LLM has, instead of just asking it to generate categories without knowing the most common labels
top_labels_result = query_top_ai_labels_by_feedback_count(
organization_id=organization.id,
project_ids=numeric_project_ids,
start=start,
end=end,
limit=NUM_TOP_LABELS,
)
# Guaranteed to be non-empty since recent_feedbacks is non-empty
top_labels = [result["label"] for result in top_labels_result]
seer_request = LabelGroupsRequest(
labels=top_labels,
feedbacks_context=context_feedbacks,
)
if len(context_feedbacks) >= THRESHOLD_TO_GET_ASSOCIATED_LABELS:
try:
response = make_signed_seer_api_request(
connection_pool=seer_summarization_connection_pool,
path=SEER_LABEL_GROUPS_ENDPOINT_PATH,
body=json.dumps(seer_request).encode("utf-8"),
timeout=SEER_TIMEOUT_S,
retries=SEER_RETRIES,
)
except Exception:
logger.exception("Seer failed to generate user feedback label groups")
return Response(
{"detail": "Failed to generate user feedback label groups"}, status=500
)
if response.status < 200 or response.status >= 300:
logger.error(
"Seer failed to generate user feedback label groups",
extra={"status_code": response.status, "response_data": response.data},
)
return Response(
{"detail": "Failed to generate user feedback label groups"}, status=500
)
label_groups = response.json()["data"]
else:
# If there are less than THRESHOLD_TO_GET_ASSOCIATED_LABELS feedbacks, we don't ask for associated labels
# The more feedbacks there are, the LLM does a better job of generating associated labels since it has more context
label_groups = [
FeedbackLabelGroup(primaryLabel=label, associatedLabels=[]) for label in top_labels
]
# If the LLM just forgets or adds extra primary labels, log it but still generate categories
if len(label_groups) != len(top_labels):
logger.warning(
"Number of label groups does not match number of primary labels passed in Seer",
extra={
"label_groups": label_groups,
"top_labels": top_labels,
},
)
# If the LLM hallucinates primary label(s), log it but still generate categories
for label_group in label_groups:
if label_group["primaryLabel"] not in top_labels:
logger.warning(
"LLM hallucinated primary label",
extra={"label_group": label_group},
)
# Sometimes, the LLM will give us associated labels that, to put it bluntly, are not associated labels.
# For example, if the primary label is "Navigation", the LLM might give us "Usability" or "User Interface" as associated labels.
# In a case like that, "Usability" and "User Interface" are obviously more general, so will most likely have more feedbacks associated with them than "Navigation".
# One way to filter these out is to check the counts of each associated label, and compare that to the counts of the primary label.
# If the count of the associated label is >3/4 of the count of the primary label, we can assume that the associated label is not a valid associated label.
# Even if it is valid, we don't really care, it matters more that we get rid of it in the situations that it is invalid (which is pretty often).
# Stores each label as an individual label group (so a list of lists, each inside list containing a single label)
# This is done to get the counts of each label individually, so we can filter out invalid associated labels
flattened_label_groups: list[list[str]] = []
for label_group in label_groups:
flattened_label_groups.append([label_group["primaryLabel"]])
flattened_label_groups.extend([[label] for label in label_group["associatedLabels"]])
individual_label_counts = query_label_group_counts(
organization_id=organization.id,
project_ids=numeric_project_ids,
start=start,
end=end,
labels_groups=flattened_label_groups,
)
label_to_count = {}
for label_lst, count in zip(flattened_label_groups, individual_label_counts):
label_to_count[label_lst[0]] = count
label_groups_lists: list[list[str]] = []
for i, label_group in enumerate(label_groups):
primary_label = label_group["primaryLabel"]
associated_labels = label_group["associatedLabels"]
label_groups_lists.append([primary_label])
for associated_label in associated_labels:
# Once we have MAX_GROUP_LABELS total labels, stop adding more
if len(label_groups_lists[i]) >= MAX_GROUP_LABELS:
break
# Ensure the associated label has feedbacks associated with it, and it doesn't have *too many* feedbacks associated with it
# Worst case, if the associated label is wrong, <= 3/4 of the feedbacks associated with it are wrong
if (
label_to_count[associated_label] * 4 <= label_to_count[primary_label] * 3
and label_to_count[associated_label] != 0
):
label_groups_lists[i].append(associated_label)
# label_groups_lists might be empty if the LLM just decides not to give us any primary labels (leading to ValueError, then 500)
# This will be logged since top_labels is guaranteed to be non-empty, but label_groups_lists will be empty
label_feedback_counts = query_label_group_counts(
organization_id=organization.id,
project_ids=numeric_project_ids,
start=start,
end=end,
labels_groups=label_groups_lists,
)
categories = []
for i, list_group in enumerate(label_groups_lists):
primaryLabel = list_group[0]
associatedLabels = list_group[1:]
categories.append(
{
"primaryLabel": primaryLabel,
"associatedLabels": associatedLabels,
"feedbackCount": label_feedback_counts[i],
}
)
categories.sort(key=lambda x: x["feedbackCount"], reverse=True)
categories = categories[:MAX_RETURN_CATEGORIES]
if has_cache:
cache.set(
categorization_cache_key,
{"categories": categories, "numFeedbacksContext": len(context_feedbacks)},
timeout=CATEGORIES_CACHE_TIMEOUT,
)
return Response(
{
"categories": categories,
"success": True,
"numFeedbacksContext": len(context_feedbacks),
}
)
|
OrganizationFeedbackCategoriesEndpoint
|
python
|
doocs__leetcode
|
solution/1400-1499/1411.Number of Ways to Paint N × 3 Grid/Solution2.py
|
{
"start": 0,
"end": 985
}
|
class ____:
def numOfWays(self, n: int) -> int:
def f1(x: int) -> bool:
last = -1
for _ in range(3):
if x % 3 == last:
return False
last = x % 3
x //= 3
return True
def f2(x: int, y: int) -> bool:
for _ in range(3):
if x % 3 == y % 3:
return False
x //= 3
y //= 3
return True
mod = 10**9 + 7
m = 27
valid = {i for i in range(m) if f1(i)}
d = defaultdict(list)
for i in valid:
for j in valid:
if f2(i, j):
d[i].append(j)
f = [int(i in valid) for i in range(m)]
for _ in range(n - 1):
g = [0] * m
for i in valid:
for j in d[i]:
g[j] = (g[j] + f[i]) % mod
f = g
return sum(f) % mod
|
Solution
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 937627,
"end": 938387
}
|
class ____(sgqlc.types.relay.Connection):
"""A list of repository invitations."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("RepositoryInvitationEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("RepositoryInvitation"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
|
RepositoryInvitationConnection
|
python
|
sympy__sympy
|
sympy/codegen/fnodes.py
|
{
"start": 18205,
"end": 18265
}
|
class ____(FFunction):
_required_standard = 95
|
F95Function
|
python
|
realpython__materials
|
python-with-statement/timer.py
|
{
"start": 39,
"end": 343
}
|
class ____:
def __enter__(self):
self.start = perf_counter()
def __exit__(self, *_):
end = perf_counter()
print(f"Elapsed time: {end - self.start:.4f} seconds")
if __name__ == "__main__":
with Timer():
# The code to measure goes here...
sleep(0.5)
|
Timer
|
python
|
scrapy__scrapy
|
tests/test_spidermiddleware_referer.py
|
{
"start": 21884,
"end": 22064
}
|
class ____(MixinStrictOrigin, TestRefererMiddleware):
settings = {
"REFERRER_POLICY": "scrapy.spidermiddlewares.referer.StrictOriginPolicy"
}
|
TestSettingsStrictOrigin
|
python
|
pypa__pipenv
|
pipenv/exceptions.py
|
{
"start": 6106,
"end": 6529
}
|
class ____(PipenvFileError):
def __init__(self, filename="Pipfile.lock", extra=None, **kwargs):
extra = kwargs.pop("extra", [])
message = "{} {} {}".format(
"[bold]You need to run[/bold]",
"[bold red]$ pipenv lock[/bold red]",
"[bold]before you can continue.[/bold]",
)
super().__init__(filename, message=message, extra=extra, **kwargs)
|
LockfileNotFound
|
python
|
mlflow__mlflow
|
tests/dspy/test_dspy_autolog.py
|
{
"start": 10479,
"end": 10739
}
|
class ____(dspy.Signature):
"""Answer questions with short factoid answers."""
context = dspy.InputField(desc="may contain relevant facts")
question = dspy.InputField()
answer = dspy.OutputField(desc="often between 1 and 5 words")
|
GenerateAnswer
|
python
|
sympy__sympy
|
sympy/liealgebras/type_c.py
|
{
"start": 77,
"end": 4426
}
|
class ____(Standard_Cartan):
def __new__(cls, n):
if n < 3:
raise ValueError("n cannot be less than 3")
return Standard_Cartan.__new__(cls, "C", n)
def dimension(self):
"""Dimension of the vector space V underlying the Lie algebra
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("C3")
>>> c.dimension()
3
"""
n = self.n
return n
def basic_root(self, i, j):
"""Generate roots with 1 in ith position and a -1 in jth position
"""
n = self.n
root = [0]*n
root[i] = 1
root[j] = -1
return root
def simple_root(self, i):
"""The ith simple root for the C series
Every lie algebra has a unique root system.
Given a root system Q, there is a subset of the
roots such that an element of Q is called a
simple root if it cannot be written as the sum
of two elements in Q. If we let D denote the
set of simple roots, then it is clear that every
element of Q can be written as a linear combination
of elements of D with all coefficients non-negative.
In C_n, the first n-1 simple roots are the same as
the roots in A_(n-1) (a 1 in the ith position, a -1
in the (i+1)th position, and zeroes elsewhere). The
nth simple root is the root in which there is a 2 in
the nth position and zeroes elsewhere.
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("C3")
>>> c.simple_root(2)
[0, 1, -1]
"""
n = self.n
if i < n:
return self.basic_root(i-1,i)
else:
root = [0]*self.n
root[n-1] = 2
return root
def positive_roots(self):
"""Generates all the positive roots of A_n
This is half of all of the roots of C_n; by multiplying all the
positive roots by -1 we get the negative roots.
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("A3")
>>> c.positive_roots()
{1: [1, -1, 0, 0], 2: [1, 0, -1, 0], 3: [1, 0, 0, -1], 4: [0, 1, -1, 0],
5: [0, 1, 0, -1], 6: [0, 0, 1, -1]}
"""
n = self.n
posroots = {}
k = 0
for i in range(0, n-1):
for j in range(i+1, n):
k += 1
posroots[k] = self.basic_root(i, j)
k += 1
root = self.basic_root(i, j)
root[j] = 1
posroots[k] = root
for i in range(0, n):
k += 1
root = [0]*n
root[i] = 2
posroots[k] = root
return posroots
def roots(self):
"""
Returns the total number of roots for C_n"
"""
n = self.n
return 2*(n**2)
def cartan_matrix(self):
"""The Cartan matrix for C_n
The Cartan matrix matrix for a Lie algebra is
generated by assigning an ordering to the simple
roots, (alpha[1], ...., alpha[l]). Then the ijth
entry of the Cartan matrix is (<alpha[i],alpha[j]>).
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType('C4')
>>> c.cartan_matrix()
Matrix([
[ 2, -1, 0, 0],
[-1, 2, -1, 0],
[ 0, -1, 2, -1],
[ 0, 0, -2, 2]])
"""
n = self.n
m = 2 * eye(n)
for i in range(1, n - 1):
m[i, i+1] = -1
m[i, i-1] = -1
m[0,1] = -1
m[n-1, n-2] = -2
return m
def basis(self):
"""
Returns the number of independent generators of C_n
"""
n = self.n
return n*(2*n + 1)
def lie_algebra(self):
"""
Returns the Lie algebra associated with C_n"
"""
n = self.n
return "sp(" + str(2*n) + ")"
def dynkin_diagram(self):
n = self.n
diag = "---".join("0" for i in range(1, n)) + "=<=0\n"
diag += " ".join(str(i) for i in range(1, n+1))
return diag
|
TypeC
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/circular2.py
|
{
"start": 154,
"end": 214
}
|
class ____:
a_attr: object
_T = TypeVar("_T", bound=A)
|
A
|
python
|
sympy__sympy
|
sympy/polys/monomials.py
|
{
"start": 11043,
"end": 14876
}
|
class ____:
"""Code generator of fast monomial arithmetic functions. """
@cacheit
def __new__(cls, ngens):
obj = super().__new__(cls)
obj.ngens = ngens
return obj
def __getnewargs__(self):
return (self.ngens,)
def _build(self, code, name):
ns = {}
exec(code, ns)
return ns[name]
def _vars(self, name):
return [ "%s%s" % (name, i) for i in range(self.ngens) ]
@cacheit
def mul(self):
name = "monomial_mul"
template = dedent("""\
def %(name)s(A, B):
(%(A)s,) = A
(%(B)s,) = B
return (%(AB)s,)
""")
A = self._vars("a")
B = self._vars("b")
AB = [ "%s + %s" % (a, b) for a, b in zip(A, B) ]
code = template % {"name": name, "A": ", ".join(A), "B": ", ".join(B), "AB": ", ".join(AB)}
return self._build(code, name)
@cacheit
def pow(self):
name = "monomial_pow"
template = dedent("""\
def %(name)s(A, k):
(%(A)s,) = A
return (%(Ak)s,)
""")
A = self._vars("a")
Ak = [ "%s*k" % a for a in A ]
code = template % {"name": name, "A": ", ".join(A), "Ak": ", ".join(Ak)}
return self._build(code, name)
@cacheit
def mulpow(self):
name = "monomial_mulpow"
template = dedent("""\
def %(name)s(A, B, k):
(%(A)s,) = A
(%(B)s,) = B
return (%(ABk)s,)
""")
A = self._vars("a")
B = self._vars("b")
ABk = [ "%s + %s*k" % (a, b) for a, b in zip(A, B) ]
code = template % {"name": name, "A": ", ".join(A), "B": ", ".join(B), "ABk": ", ".join(ABk)}
return self._build(code, name)
@cacheit
def ldiv(self):
name = "monomial_ldiv"
template = dedent("""\
def %(name)s(A, B):
(%(A)s,) = A
(%(B)s,) = B
return (%(AB)s,)
""")
A = self._vars("a")
B = self._vars("b")
AB = [ "%s - %s" % (a, b) for a, b in zip(A, B) ]
code = template % {"name": name, "A": ", ".join(A), "B": ", ".join(B), "AB": ", ".join(AB)}
return self._build(code, name)
@cacheit
def div(self):
name = "monomial_div"
template = dedent("""\
def %(name)s(A, B):
(%(A)s,) = A
(%(B)s,) = B
%(RAB)s
return (%(R)s,)
""")
A = self._vars("a")
B = self._vars("b")
RAB = [ "r%(i)s = a%(i)s - b%(i)s\n if r%(i)s < 0: return None" % {"i": i} for i in range(self.ngens) ]
R = self._vars("r")
code = template % {"name": name, "A": ", ".join(A), "B": ", ".join(B), "RAB": "\n ".join(RAB), "R": ", ".join(R)}
return self._build(code, name)
@cacheit
def lcm(self):
name = "monomial_lcm"
template = dedent("""\
def %(name)s(A, B):
(%(A)s,) = A
(%(B)s,) = B
return (%(AB)s,)
""")
A = self._vars("a")
B = self._vars("b")
AB = [ "%s if %s >= %s else %s" % (a, a, b, b) for a, b in zip(A, B) ]
code = template % {"name": name, "A": ", ".join(A), "B": ", ".join(B), "AB": ", ".join(AB)}
return self._build(code, name)
@cacheit
def gcd(self):
name = "monomial_gcd"
template = dedent("""\
def %(name)s(A, B):
(%(A)s,) = A
(%(B)s,) = B
return (%(AB)s,)
""")
A = self._vars("a")
B = self._vars("b")
AB = [ "%s if %s <= %s else %s" % (a, a, b, b) for a, b in zip(A, B) ]
code = template % {"name": name, "A": ", ".join(A), "B": ", ".join(B), "AB": ", ".join(AB)}
return self._build(code, name)
@public
|
MonomialOps
|
python
|
kamyu104__LeetCode-Solutions
|
Python/number-of-increasing-paths-in-a-grid.py
|
{
"start": 1467,
"end": 2281
}
|
class ____(object):
def countPaths(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
MOD = 10**9+7
directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
def memoization(grid, i, j, lookup):
if not lookup[i][j]:
lookup[i][j] = 1
for di, dj in directions:
ni, nj = i+di, j+dj
if 0 <= ni < len(grid) and 0 <= nj < len(grid[0]) and grid[i][j] < grid[ni][nj]:
lookup[i][j] = (lookup[i][j]+memoization(grid, ni, nj, lookup)) % MOD
return lookup[i][j]
lookup = [[0]*len(grid[0]) for _ in xrange(len(grid))]
return sum(memoization(grid, i, j, lookup) for i in xrange(len(grid)) for j in xrange(len(grid[0]))) % MOD
|
Solution2
|
python
|
skorch-dev__skorch
|
skorch/probabilistic.py
|
{
"start": 27337,
"end": 35005
}
|
class ____(ClassifierMixin, GPBase):
__doc__ = get_gp_binary_clf_doc(NeuralNet.__doc__)
def __init__(
self,
module,
*args,
likelihood=gpytorch.likelihoods.BernoulliLikelihood,
criterion=gpytorch.mlls.VariationalELBO,
train_split=ValidSplit(5, stratified=True),
threshold=0.5,
**kwargs
):
super().__init__(
module,
*args,
criterion=criterion,
likelihood=likelihood,
train_split=train_split,
**kwargs
)
self.threshold = threshold
@property
def _default_callbacks(self):
return [
('epoch_timer', EpochTimer()),
('train_loss', PassthroughScoring(
name='train_loss',
on_train=True,
)),
('valid_loss', PassthroughScoring(
name='valid_loss',
)),
# add train accuracy because by default, there is no valid split
('train_acc', EpochScoring(
'accuracy',
name='train_acc',
lower_is_better=False,
on_train=True,
)),
('valid_acc', EpochScoring(
'accuracy',
name='valid_acc',
lower_is_better=False,
)),
('print_log', PrintLog()),
]
@property
def classes_(self):
return [0, 1]
# pylint: disable=signature-differs
def check_data(self, X, y):
super().check_data(X, y)
if (not is_dataset(X)) and (get_dim(y) != 1):
raise ValueError("The target data should be 1-dimensional.")
def predict_proba(self, X):
"""Return probability estimates for the samples.
If the module's forward method returns multiple outputs as a
tuple, it is assumed that the first output contains the
relevant information and the other values are ignored. If all
values are relevant, consider using
:meth:`.forward` instead.
Parameters
----------
X : input data, compatible with skorch.dataset.Dataset
By default, you should be able to pass:
* numpy arrays
* torch tensors
* pandas DataFrame or Series
* scipy sparse CSR matrices
* a dictionary of the former three
* a list/tuple of the former three
* a Dataset
If this doesn't work with your data, you have to pass a
``Dataset`` that can deal with the data.
Returns
-------
y_proba : numpy ndarray
Probabilities for the samples, with the first column corresponding to
class 0 and the second to class 1.
"""
nonlin = self._get_predict_nonlinearity()
y_probas = []
for yi in self.forward_iter(X, training=False):
posterior = yi[0] if isinstance(yi, tuple) else yi
y_probas.append(to_numpy(nonlin(posterior.mean)))
y_proba = np.concatenate(y_probas, 0).reshape(-1, 1)
return np.hstack((1 - y_proba, y_proba))
def predict(self, X):
"""Return class labels for samples in X.
If the module's forward method returns multiple outputs as a
tuple, it is assumed that the first output contains the
relevant information and the other values are ignored. If all
values are relevant, consider using
:meth:`.forward` instead.
Parameters
----------
X : input data, compatible with skorch.dataset.Dataset
By default, you should be able to pass:
* numpy arrays
* torch tensors
* pandas DataFrame or Series
* scipy sparse CSR matrices
* a dictionary of the former three
* a list/tuple of the former three
* a Dataset
If this doesn't work with your data, you have to pass a
``Dataset`` that can deal with the data.
Returns
-------
y_pred : numpy ndarray
Predicted target values for ``X``.
"""
y_proba = self.predict_proba(X)
return (y_proba[:, 1] > self.threshold).astype('uint8')
# BB: I could never get any reasonable results using ``SoftmaxLikelihood``. In
# fact, it always produces NaN. Probably I use it wrongly but there are no
# complete examples that I could find. I leave the commented code here for now,
# in the hopes that there is an easy fix in the future.
# class _GPClassifier(GPBase):
# def __init__(
# self,
# module,
# *args,
# likelihood=gpytorch.likelihoods.SoftmaxLikelihood,
# criterion=gpytorch.mlls.VariationalELBO,
# train_split=ValidSplit(5, stratified=True),
# classes=None,
# **kwargs
# ):
# super().__init__(
# module,
# *args,
# criterion=criterion,
# likelihood=likelihood,
# train_split=train_split,
# **kwargs
# )
# self.classes = classes
# @property
# def _default_callbacks(self):
# return [
# ('epoch_timer', EpochTimer()),
# ('train_loss', PassthroughScoring(
# name='train_loss',
# on_train=True,
# )),
# ('valid_loss', PassthroughScoring(
# name='valid_loss',
# )),
# # add train accuracy because by default, there is no valid split
# ('train_acc', EpochScoring(
# 'accuracy',
# name='train_acc',
# lower_is_better=False,
# on_train=True,
# )),
# ('valid_acc', EpochScoring(
# 'accuracy',
# name='valid_acc',
# lower_is_better=False,
# )),
# ('print_log', PrintLog()),
# ]
# @property
# def classes_(self):
# if self.classes is not None:
# if not len(self.classes):
# raise AttributeError("{} has no attribute 'classes_'".format(
# self.__class__.__name__))
# return self.classes
# return self.classes_inferred_
# # pylint: disable=signature-differs
# def check_data(self, X, y):
# if (
# (y is None) and
# (not is_dataset(X)) and
# (self.iterator_train is DataLoader)
# ):
# msg = ("No y-values are given (y=None). You must either supply a "
# "Dataset as X or implement your own DataLoader for "
# "training (and your validation) and supply it using the "
# "``iterator_train`` and ``iterator_valid`` parameters "
# "respectively.")
# raise ValueError(msg)
# if y is not None:
# # pylint: disable=attribute-defined-outside-init
# self.classes_inferred_ = np.unique(y)
# def predict_proba(self, X):
# """TODO"""
# nonlin = self._get_predict_nonlinearity()
# y_probas = []
# for yi in self.forward_iter(X, training=False):
# posterior = yi[0] if isinstance(yi, tuple) else yi
# y_probas.append(to_numpy(nonlin(posterior.mean)))
# y_proba = np.concatenate(y_probas, 0)
# return y_proba
# def predict(self, X):
# """TODO
# """
# return self.predict_proba(X).argmax(axis=1)
|
GPBinaryClassifier
|
python
|
astropy__astropy
|
astropy/utils/masked/tests/test_functions.py
|
{
"start": 16650,
"end": 16747
}
|
class ____(TestMaskedArrayConcatenation, LongitudeSetup):
pass
|
TestMaskedLongitudeConcatenation
|
python
|
numba__numba
|
numba/core/datamodel/models.py
|
{
"start": 9449,
"end": 9752
}
|
class ____(PrimitiveModel):
"""
Passed as opaque pointers
"""
_ptr_type = ir.IntType(8).as_pointer()
def __init__(self, dmm, fe_type):
be_type = self._ptr_type
super(OpaqueModel, self).__init__(dmm, fe_type, be_type)
@register_default(types.MemInfoPointer)
|
OpaqueModel
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/coercions.py
|
{
"start": 22903,
"end": 26023
}
|
class ____(RoleImpl):
__slots__ = ()
def _implicit_coercions(
self,
element: Any,
resolved: Any,
argname: Optional[str] = None,
**kw: Any,
) -> Any:
if resolved._is_from_clause:
if (
isinstance(resolved, selectable.Alias)
and resolved.element._is_select_base
):
self._warn_for_implicit_coercion(resolved)
return self._post_coercion(resolved.element, **kw)
else:
self._warn_for_implicit_coercion(resolved)
return self._post_coercion(resolved.select(), **kw)
else:
self._raise_for_expected(element, argname, resolved)
def _warn_for_implicit_coercion(self, elem):
util.warn(
"Coercing %s object into a select() for use in IN(); "
"please pass a select() construct explicitly"
% (elem.__class__.__name__)
)
@util.preload_module("sqlalchemy.sql.elements")
def _literal_coercion(self, element, *, expr, operator, **kw): # type: ignore[override] # noqa: E501
if util.is_non_string_iterable(element):
non_literal_expressions: Dict[
Optional[_ColumnExpressionArgument[Any]],
_ColumnExpressionArgument[Any],
] = {}
element = list(element)
for o in element:
if not _is_literal(o):
if not isinstance(
o, util.preloaded.sql_elements.ColumnElement
) and not hasattr(o, "__clause_element__"):
self._raise_for_expected(element, **kw)
else:
non_literal_expressions[o] = o
if non_literal_expressions:
return elements.ClauseList(
*[
(
non_literal_expressions[o]
if o in non_literal_expressions
else expr._bind_param(operator, o)
)
for o in element
]
)
else:
return expr._bind_param(operator, element, expanding=True)
else:
self._raise_for_expected(element, **kw)
def _post_coercion(self, element, *, expr, operator, **kw):
if element._is_select_base:
# for IN, we are doing scalar_subquery() coercion without
# a warning
return element.scalar_subquery()
elif isinstance(element, elements.ClauseList):
assert not len(element.clauses) == 0
return element.self_group(against=operator)
elif isinstance(element, elements.BindParameter):
element = element._clone(maintain_key=True)
element.expanding = True
element.expand_op = operator
return element
elif isinstance(element, selectable.Values):
return element.scalar_values()
else:
return element
|
InElementImpl
|
python
|
numpy__numpy
|
benchmarks/benchmarks/bench_ufunc.py
|
{
"start": 14899,
"end": 15242
}
|
class ____(Benchmark):
def setup(self):
self.x = np.asarray(1.0)
self.y = np.asarray(1.0 + 1j)
self.z = complex(1.0, 1.0)
def time_add_scalar(self):
(self.x + self.x)
def time_add_scalar_conv(self):
(self.x + 1.0)
def time_add_scalar_conv_complex(self):
(self.y + self.z)
|
Scalar
|
python
|
kamyu104__LeetCode-Solutions
|
Python/number-of-pairs-of-strings-with-concatenation-equal-to-target.py
|
{
"start": 130,
"end": 832
}
|
class ____(object):
def numOfPairs(self, nums, target):
"""
:type nums: List[str]
:type target: str
:rtype: int
"""
lookup = collections.Counter()
result = 0
for num in nums:
cnt1, cnt2 = lookup[-(len(target)-len(num))], lookup[len(target)-len(num)]
if target.startswith(num):
result += cnt1
lookup[len(num)] += 1
if target.endswith(num):
result += cnt2
lookup[-len(num)] += 1
return result
# Time: O(n * l), n is the size of nums, l is the average length of the digit string in nums
# Space: O(n)
import collections
|
Solution
|
python
|
django__django
|
tests/backends/models.py
|
{
"start": 372,
"end": 582
}
|
class ____(models.Model):
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
|
Person
|
python
|
nedbat__coveragepy
|
tests/test_plugins.py
|
{
"start": 1853,
"end": 5501
}
|
class ____(CoverageTest):
"""Test Plugins construction."""
def test_implicit_boolean(self) -> None:
self.make_file(
"plugin1.py",
"""\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
pass
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""",
)
config = FakeConfig("plugin1", {})
plugins = make_plugins([], config)
assert not plugins
plugins = make_plugins(["plugin1"], config)
assert plugins
def test_importing_and_configuring(self) -> None:
self.make_file(
"plugin1.py",
"""\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
def __init__(self, options):
self.options = options
self.this_is = "me"
def coverage_init(reg, options):
reg.add_file_tracer(Plugin(options))
""",
)
config = FakeConfig("plugin1", {"a": "hello"})
plugins = list(make_plugins(["plugin1"], config))
assert len(plugins) == 1
assert plugins[0].this_is == "me" # type: ignore
assert plugins[0].options == {"a": "hello"} # type: ignore
assert config.asked_for == ["plugin1"]
def test_importing_and_configuring_more_than_one(self) -> None:
self.make_file(
"plugin1.py",
"""\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
def __init__(self, options):
self.options = options
self.this_is = "me"
def coverage_init(reg, options):
reg.add_file_tracer(Plugin(options))
""",
)
self.make_file(
"plugin2.py",
"""\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
def __init__(self, options):
self.options = options
def coverage_init(reg, options):
reg.add_file_tracer(Plugin(options))
""",
)
config = FakeConfig("plugin1", {"a": "hello"})
plugins = list(make_plugins(["plugin1", "plugin2"], config))
assert len(plugins) == 2
assert plugins[0].this_is == "me" # type: ignore
assert plugins[0].options == {"a": "hello"} # type: ignore
assert plugins[1].options == {} # type: ignore
assert config.asked_for == ["plugin1", "plugin2"]
# The order matters...
config = FakeConfig("plugin1", {"a": "second"})
plugins = list(make_plugins(["plugin2", "plugin1"], config))
assert len(plugins) == 2
assert plugins[0].options == {} # type: ignore
assert plugins[1].this_is == "me" # type: ignore
assert plugins[1].options == {"a": "second"} # type: ignore
def test_cant_import(self) -> None:
with pytest.raises(ImportError, match="No module named '?plugin_not_there'?"):
_ = make_plugins(["plugin_not_there"], NullConfig())
def test_plugin_must_define_coverage_init(self) -> None:
self.make_file(
"no_plugin.py",
"""\
from coverage import CoveragePlugin
Nothing = 0
""",
)
msg_pat = "Plugin module 'no_plugin' didn't define a coverage_init function"
with pytest.raises(PluginError, match=msg_pat):
list(make_plugins(["no_plugin"], NullConfig()))
|
LoadPluginsTest
|
python
|
huggingface__transformers
|
tests/models/parakeet/test_modeling_parakeet.py
|
{
"start": 5977,
"end": 6759
}
|
class ____(ModelTesterMixin, unittest.TestCase):
all_model_classes = (ParakeetEncoder,) if is_torch_available() else ()
test_resize_embeddings = False
test_torch_exportable = True
def setUp(self):
self.model_tester = ParakeetEncoderModelTester(self)
self.config_tester = ConfigTester(self, config_class=ParakeetEncoderConfig, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="ParakeetEncoder does not use inputs_embeds")
def test_model_get_set_embeddings(self):
pass
|
ParakeetEncoderModelTest
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/packager.py
|
{
"start": 1191,
"end": 30490
}
|
class ____:
"""
A class for writing the Excel XLSX Packager file.
This module is used in conjunction with XlsxWriter to create an
Excel XLSX container file.
From Wikipedia: The Open Packaging Conventions (OPC) is a
container-file technology initially created by Microsoft to store
a combination of XML and non-XML files that together form a single
entity such as an Open XML Paper Specification (OpenXPS)
document. http://en.wikipedia.org/wiki/Open_Packaging_Conventions.
At its simplest an Excel XLSX file contains the following elements::
____ [Content_Types].xml
|
|____ docProps
| |____ app.xml
| |____ core.xml
|
|____ xl
| |____ workbook.xml
| |____ worksheets
| | |____ sheet1.xml
| |
| |____ styles.xml
| |
| |____ theme
| | |____ theme1.xml
| |
| |_____rels
| |____ workbook.xml.rels
|
|_____rels
|____ .rels
The Packager class coordinates the classes that represent the
elements of the package and writes them into the XLSX file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self) -> None:
"""
Constructor.
"""
super().__init__()
self.tmpdir = ""
self.in_memory = False
self.workbook = None
self.worksheet_count = 0
self.chartsheet_count = 0
self.chart_count = 0
self.drawing_count = 0
self.table_count = 0
self.num_vml_files = 0
self.num_comment_files = 0
self.named_ranges = []
self.filenames = []
###########################################################################
#
# Private API.
#
###########################################################################
def _set_tmpdir(self, tmpdir) -> None:
# Set an optional user defined temp directory.
self.tmpdir = tmpdir
def _set_in_memory(self, in_memory) -> None:
# Set the optional 'in_memory' mode.
self.in_memory = in_memory
def _add_workbook(self, workbook) -> None:
# Add the Excel::Writer::XLSX::Workbook object to the package.
self.workbook = workbook
self.chart_count = len(workbook.charts)
self.drawing_count = len(workbook.drawings)
self.num_vml_files = workbook.num_vml_files
self.num_comment_files = workbook.num_comment_files
self.named_ranges = workbook.named_ranges
for worksheet in self.workbook.worksheets():
if worksheet.is_chartsheet:
self.chartsheet_count += 1
else:
self.worksheet_count += 1
def _create_package(self):
# Write the xml files that make up the XLSX OPC package.
self._write_content_types_file()
self._write_root_rels_file()
self._write_workbook_rels_file()
self._write_worksheet_files()
self._write_chartsheet_files()
self._write_workbook_file()
self._write_chart_files()
self._write_drawing_files()
self._write_vml_files()
self._write_comment_files()
self._write_table_files()
self._write_shared_strings_file()
self._write_styles_file()
self._write_custom_file()
self._write_theme_file()
self._write_worksheet_rels_files()
self._write_chartsheet_rels_files()
self._write_drawing_rels_files()
self._write_rich_value_rels_files()
self._add_image_files()
self._add_vba_project()
self._add_vba_project_signature()
self._write_vba_project_rels_file()
self._write_core_file()
self._write_app_file()
self._write_metadata_file()
self._write_feature_bag_property()
self._write_rich_value_files()
return self.filenames
def _filename(self, xml_filename):
# Create a temp filename to write the XML data to and store the Excel
# filename to use as the name in the Zip container.
if self.in_memory:
os_filename = StringIO()
else:
(fd, os_filename) = tempfile.mkstemp(dir=self.tmpdir)
os.close(fd)
self.filenames.append((os_filename, xml_filename, False))
return os_filename
def _write_workbook_file(self) -> None:
# Write the workbook.xml file.
workbook = self.workbook
workbook._set_xml_writer(self._filename("xl/workbook.xml"))
workbook._assemble_xml_file()
def _write_worksheet_files(self) -> None:
# Write the worksheet files.
index = 1
for worksheet in self.workbook.worksheets():
if worksheet.is_chartsheet:
continue
if worksheet.constant_memory:
worksheet._opt_reopen()
worksheet._write_single_row()
worksheet._set_xml_writer(
self._filename("xl/worksheets/sheet" + str(index) + ".xml")
)
worksheet._assemble_xml_file()
index += 1
def _write_chartsheet_files(self) -> None:
# Write the chartsheet files.
index = 1
for worksheet in self.workbook.worksheets():
if not worksheet.is_chartsheet:
continue
worksheet._set_xml_writer(
self._filename("xl/chartsheets/sheet" + str(index) + ".xml")
)
worksheet._assemble_xml_file()
index += 1
def _write_chart_files(self) -> None:
# Write the chart files.
if not self.workbook.charts:
return
index = 1
for chart in self.workbook.charts:
# Check that the chart has at least one data series.
if not chart.series:
raise EmptyChartSeries(
f"Chart{index} must contain at least one "
f"data series. See chart.add_series()."
)
chart._set_xml_writer(
self._filename("xl/charts/chart" + str(index) + ".xml")
)
chart._assemble_xml_file()
index += 1
def _write_drawing_files(self) -> None:
# Write the drawing files.
if not self.drawing_count:
return
index = 1
for drawing in self.workbook.drawings:
drawing._set_xml_writer(
self._filename("xl/drawings/drawing" + str(index) + ".xml")
)
drawing._assemble_xml_file()
index += 1
def _write_vml_files(self) -> None:
# Write the comment VML files.
index = 1
for worksheet in self.workbook.worksheets():
if not worksheet.has_vml and not worksheet.has_header_vml:
continue
if worksheet.has_vml:
vml = Vml()
vml._set_xml_writer(
self._filename("xl/drawings/vmlDrawing" + str(index) + ".vml")
)
vml._assemble_xml_file(
worksheet.vml_data_id,
worksheet.vml_shape_id,
worksheet.comments_list,
worksheet.buttons_list,
)
index += 1
if worksheet.has_header_vml:
vml = Vml()
vml._set_xml_writer(
self._filename("xl/drawings/vmlDrawing" + str(index) + ".vml")
)
vml._assemble_xml_file(
worksheet.vml_header_id,
worksheet.vml_header_id * 1024,
None,
None,
worksheet.header_images_list,
)
self._write_vml_drawing_rels_file(worksheet, index)
index += 1
def _write_comment_files(self) -> None:
# Write the comment files.
index = 1
for worksheet in self.workbook.worksheets():
if not worksheet.has_comments:
continue
comment = Comments()
comment._set_xml_writer(self._filename("xl/comments" + str(index) + ".xml"))
comment._assemble_xml_file(worksheet.comments_list)
index += 1
def _write_shared_strings_file(self) -> None:
# Write the sharedStrings.xml file.
sst = SharedStrings()
sst.string_table = self.workbook.str_table
if not self.workbook.str_table.count:
return
sst._set_xml_writer(self._filename("xl/sharedStrings.xml"))
sst._assemble_xml_file()
def _write_app_file(self) -> None:
# Write the app.xml file.
properties = self.workbook.doc_properties
app = App()
# Add the Worksheet parts.
worksheet_count = 0
for worksheet in self.workbook.worksheets():
if worksheet.is_chartsheet:
continue
# Don't write/count veryHidden sheets.
if worksheet.hidden != 2:
app._add_part_name(worksheet.name)
worksheet_count += 1
# Add the Worksheet heading pairs.
app._add_heading_pair(["Worksheets", worksheet_count])
# Add the Chartsheet parts.
for worksheet in self.workbook.worksheets():
if not worksheet.is_chartsheet:
continue
app._add_part_name(worksheet.name)
# Add the Chartsheet heading pairs.
app._add_heading_pair(["Charts", self.chartsheet_count])
# Add the Named Range heading pairs.
if self.named_ranges:
app._add_heading_pair(["Named Ranges", len(self.named_ranges)])
# Add the Named Ranges parts.
for named_range in self.named_ranges:
app._add_part_name(named_range)
app._set_properties(properties)
app.doc_security = self.workbook.read_only
app._set_xml_writer(self._filename("docProps/app.xml"))
app._assemble_xml_file()
def _write_core_file(self) -> None:
# Write the core.xml file.
properties = self.workbook.doc_properties
core = Core()
core._set_properties(properties)
core._set_xml_writer(self._filename("docProps/core.xml"))
core._assemble_xml_file()
def _write_metadata_file(self) -> None:
# Write the metadata.xml file.
if not self.workbook.has_metadata:
return
metadata = Metadata()
metadata.has_dynamic_functions = self.workbook.has_dynamic_functions
metadata.num_embedded_images = len(self.workbook.embedded_images.images)
metadata._set_xml_writer(self._filename("xl/metadata.xml"))
metadata._assemble_xml_file()
def _write_feature_bag_property(self) -> None:
# Write the featurePropertyBag.xml file.
feature_property_bags = self.workbook._has_feature_property_bags()
if not feature_property_bags:
return
property_bag = FeaturePropertyBag()
property_bag.feature_property_bags = feature_property_bags
property_bag._set_xml_writer(
self._filename("xl/featurePropertyBag/featurePropertyBag.xml")
)
property_bag._assemble_xml_file()
def _write_rich_value_files(self) -> None:
if not self.workbook.embedded_images.has_images():
return
self._write_rich_value()
self._write_rich_value_types()
self._write_rich_value_structure()
self._write_rich_value_rel()
def _write_rich_value(self) -> None:
# Write the rdrichvalue.xml file.
filename = self._filename("xl/richData/rdrichvalue.xml")
xml_file = RichValue()
xml_file.embedded_images = self.workbook.embedded_images.images
xml_file._set_xml_writer(filename)
xml_file._assemble_xml_file()
def _write_rich_value_types(self) -> None:
# Write the rdRichValueTypes.xml file.
filename = self._filename("xl/richData/rdRichValueTypes.xml")
xml_file = RichValueTypes()
xml_file._set_xml_writer(filename)
xml_file._assemble_xml_file()
def _write_rich_value_structure(self) -> None:
# Write the rdrichvaluestructure.xml file.
filename = self._filename("xl/richData/rdrichvaluestructure.xml")
xml_file = RichValueStructure()
xml_file.has_embedded_descriptions = self.workbook.has_embedded_descriptions
xml_file._set_xml_writer(filename)
xml_file._assemble_xml_file()
def _write_rich_value_rel(self) -> None:
# Write the richValueRel.xml file.
filename = self._filename("xl/richData/richValueRel.xml")
xml_file = RichValueRel()
xml_file.num_embedded_images = len(self.workbook.embedded_images.images)
xml_file._set_xml_writer(filename)
xml_file._assemble_xml_file()
def _write_custom_file(self) -> None:
# Write the custom.xml file.
properties = self.workbook.custom_properties
custom = Custom()
if not properties:
return
custom._set_properties(properties)
custom._set_xml_writer(self._filename("docProps/custom.xml"))
custom._assemble_xml_file()
def _write_content_types_file(self) -> None:
# Write the ContentTypes.xml file.
content = ContentTypes()
content._add_image_types(self.workbook.image_types)
self._get_table_count()
worksheet_index = 1
chartsheet_index = 1
for worksheet in self.workbook.worksheets():
if worksheet.is_chartsheet:
content._add_chartsheet_name("sheet" + str(chartsheet_index))
chartsheet_index += 1
else:
content._add_worksheet_name("sheet" + str(worksheet_index))
worksheet_index += 1
for i in range(1, self.chart_count + 1):
content._add_chart_name("chart" + str(i))
for i in range(1, self.drawing_count + 1):
content._add_drawing_name("drawing" + str(i))
if self.num_vml_files:
content._add_vml_name()
for i in range(1, self.table_count + 1):
content._add_table_name("table" + str(i))
for i in range(1, self.num_comment_files + 1):
content._add_comment_name("comments" + str(i))
# Add the sharedString rel if there is string data in the workbook.
if self.workbook.str_table.count:
content._add_shared_strings()
# Add vbaProject (and optionally vbaProjectSignature) if present.
if self.workbook.vba_project:
content._add_vba_project()
if self.workbook.vba_project_signature:
content._add_vba_project_signature()
# Add the custom properties if present.
if self.workbook.custom_properties:
content._add_custom_properties()
# Add the metadata file if present.
if self.workbook.has_metadata:
content._add_metadata()
# Add the metadata file if present.
if self.workbook._has_feature_property_bags():
content._add_feature_bag_property()
# Add the RichValue file if present.
if self.workbook.embedded_images.has_images():
content._add_rich_value()
content._set_xml_writer(self._filename("[Content_Types].xml"))
content._assemble_xml_file()
def _write_styles_file(self) -> None:
# Write the style xml file.
xf_formats = self.workbook.xf_formats
palette = self.workbook.palette
font_count = self.workbook.font_count
num_formats = self.workbook.num_formats
border_count = self.workbook.border_count
fill_count = self.workbook.fill_count
custom_colors = self.workbook.custom_colors
dxf_formats = self.workbook.dxf_formats
has_comments = self.workbook.has_comments
styles = Styles()
styles._set_style_properties(
[
xf_formats,
palette,
font_count,
num_formats,
border_count,
fill_count,
custom_colors,
dxf_formats,
has_comments,
]
)
styles._set_xml_writer(self._filename("xl/styles.xml"))
styles._assemble_xml_file()
def _write_theme_file(self) -> None:
# Write the theme xml file.
theme = Theme()
theme._set_xml_writer(self._filename("xl/theme/theme1.xml"))
theme._assemble_xml_file(self.workbook.theme_xml)
def _write_table_files(self) -> None:
# Write the table files.
index = 1
for worksheet in self.workbook.worksheets():
table_props = worksheet.tables
if not table_props:
continue
for table_props in table_props:
table = Table()
table._set_xml_writer(
self._filename("xl/tables/table" + str(index) + ".xml")
)
table._set_properties(table_props)
table._assemble_xml_file()
index += 1
def _get_table_count(self) -> None:
# Count the table files. Required for the [Content_Types] file.
for worksheet in self.workbook.worksheets():
for _ in worksheet.tables:
self.table_count += 1
def _write_root_rels_file(self) -> None:
# Write the _rels/.rels xml file.
rels = Relationships()
rels._add_document_relationship("/officeDocument", "xl/workbook.xml")
rels._add_package_relationship("/metadata/core-properties", "docProps/core.xml")
rels._add_document_relationship("/extended-properties", "docProps/app.xml")
if self.workbook.custom_properties:
rels._add_document_relationship("/custom-properties", "docProps/custom.xml")
rels._set_xml_writer(self._filename("_rels/.rels"))
rels._assemble_xml_file()
def _write_workbook_rels_file(self) -> None:
# Write the _rels/.rels xml file.
rels = Relationships()
worksheet_index = 1
chartsheet_index = 1
for worksheet in self.workbook.worksheets():
if worksheet.is_chartsheet:
rels._add_document_relationship(
"/chartsheet", "chartsheets/sheet" + str(chartsheet_index) + ".xml"
)
chartsheet_index += 1
else:
rels._add_document_relationship(
"/worksheet", "worksheets/sheet" + str(worksheet_index) + ".xml"
)
worksheet_index += 1
rels._add_document_relationship("/theme", "theme/theme1.xml")
rels._add_document_relationship("/styles", "styles.xml")
# Add the sharedString rel if there is string data in the workbook.
if self.workbook.str_table.count:
rels._add_document_relationship("/sharedStrings", "sharedStrings.xml")
# Add vbaProject if present.
if self.workbook.vba_project:
rels._add_ms_package_relationship("/vbaProject", "vbaProject.bin")
# Add the metadata file if required.
if self.workbook.has_metadata:
rels._add_document_relationship("/sheetMetadata", "metadata.xml")
# Add the RichValue files if present.
if self.workbook.embedded_images.has_images():
rels._add_rich_value_relationship()
# Add the checkbox/FeaturePropertyBag file if present.
if self.workbook._has_feature_property_bags():
rels._add_feature_bag_relationship()
rels._set_xml_writer(self._filename("xl/_rels/workbook.xml.rels"))
rels._assemble_xml_file()
def _write_worksheet_rels_files(self) -> None:
# Write data such as hyperlinks or drawings.
index = 0
for worksheet in self.workbook.worksheets():
if worksheet.is_chartsheet:
continue
index += 1
external_links = (
worksheet.external_hyper_links
+ worksheet.external_drawing_links
+ worksheet.external_vml_links
+ worksheet.external_background_links
+ worksheet.external_table_links
+ worksheet.external_comment_links
)
if not external_links:
continue
# Create the worksheet .rels dirs.
rels = Relationships()
for link_data in external_links:
rels._add_document_relationship(*link_data)
# Create .rels file such as /xl/worksheets/_rels/sheet1.xml.rels.
rels._set_xml_writer(
self._filename("xl/worksheets/_rels/sheet" + str(index) + ".xml.rels")
)
rels._assemble_xml_file()
def _write_chartsheet_rels_files(self) -> None:
# Write the chartsheet .rels files for links to drawing files.
index = 0
for worksheet in self.workbook.worksheets():
if not worksheet.is_chartsheet:
continue
index += 1
external_links = (
worksheet.external_drawing_links + worksheet.external_vml_links
)
if not external_links:
continue
# Create the chartsheet .rels xlsx_dir.
rels = Relationships()
for link_data in external_links:
rels._add_document_relationship(*link_data)
# Create .rels file such as /xl/chartsheets/_rels/sheet1.xml.rels.
rels._set_xml_writer(
self._filename("xl/chartsheets/_rels/sheet" + str(index) + ".xml.rels")
)
rels._assemble_xml_file()
def _write_drawing_rels_files(self) -> None:
# Write the drawing .rels files for worksheets with charts or drawings.
index = 0
for worksheet in self.workbook.worksheets():
if worksheet.drawing:
index += 1
if not worksheet.drawing_links:
continue
# Create the drawing .rels xlsx_dir.
rels = Relationships()
for drawing_data in worksheet.drawing_links:
rels._add_document_relationship(*drawing_data)
# Create .rels file such as /xl/drawings/_rels/sheet1.xml.rels.
rels._set_xml_writer(
self._filename("xl/drawings/_rels/drawing" + str(index) + ".xml.rels")
)
rels._assemble_xml_file()
def _write_vml_drawing_rels_file(self, worksheet, index) -> None:
# Write the vmlDdrawing .rels files for worksheets with images in
# headers or footers.
# Create the drawing .rels dir.
rels = Relationships()
for drawing_data in worksheet.vml_drawing_links:
rels._add_document_relationship(*drawing_data)
# Create .rels file such as /xl/drawings/_rels/vmlDrawing1.vml.rels.
rels._set_xml_writer(
self._filename("xl/drawings/_rels/vmlDrawing" + str(index) + ".vml.rels")
)
rels._assemble_xml_file()
def _write_vba_project_rels_file(self) -> None:
# Write the vbaProject.rels xml file if signed macros exist.
vba_project_signature = self.workbook.vba_project_signature
if not vba_project_signature:
return
# Create the vbaProject .rels dir.
rels = Relationships()
rels._add_ms_package_relationship(
"/vbaProjectSignature", "vbaProjectSignature.bin"
)
rels._set_xml_writer(self._filename("xl/_rels/vbaProject.bin.rels"))
rels._assemble_xml_file()
def _write_rich_value_rels_files(self) -> None:
# Write the richValueRel.xml.rels for embedded images.
if not self.workbook.embedded_images.has_images():
return
# Create the worksheet .rels dirs.
rels = Relationships()
index = 1
for image in self.workbook.embedded_images.images:
image_extension = image.image_type.lower()
image_file = f"../media/image{index}.{image_extension}"
rels._add_document_relationship("/image", image_file)
index += 1
# Create .rels file such as /xl/worksheets/_rels/sheet1.xml.rels.
rels._set_xml_writer(self._filename("/xl/richData/_rels/richValueRel.xml.rels"))
rels._assemble_xml_file()
def _add_image_files(self) -> None:
# pylint: disable=consider-using-with
# Write the /xl/media/image?.xml files.
workbook = self.workbook
index = 1
images = workbook.embedded_images.images + workbook.images
for image in images:
xml_image_name = (
"xl/media/image" + str(index) + "." + image._image_extension
)
if not self.in_memory:
# In file mode we just write or copy the image file.
os_filename = self._filename(xml_image_name)
if image.image_data:
# The data is in a byte stream. Write it to the target.
os_file = open(os_filename, mode="wb")
os_file.write(image.image_data.getvalue())
os_file.close()
else:
copy(image.filename, os_filename)
# Allow copies of Windows read-only images to be deleted.
try:
os.chmod(
os_filename, os.stat(os_filename).st_mode | stat.S_IWRITE
)
except OSError:
pass
else:
# For in-memory mode we read the image into a stream.
if image.image_data:
# The data is already in a byte stream.
os_filename = image.image_data
else:
image_file = open(image.filename, mode="rb")
image_data = image_file.read()
os_filename = BytesIO(image_data)
image_file.close()
self.filenames.append((os_filename, xml_image_name, True))
index += 1
def _add_vba_project_signature(self) -> None:
# pylint: disable=consider-using-with
# Copy in a vbaProjectSignature.bin file.
vba_project_signature = self.workbook.vba_project_signature
vba_project_signature_is_stream = self.workbook.vba_project_signature_is_stream
if not vba_project_signature:
return
xml_vba_signature_name = "xl/vbaProjectSignature.bin"
if not self.in_memory:
# In file mode we just write or copy the VBA project signature file.
os_filename = self._filename(xml_vba_signature_name)
if vba_project_signature_is_stream:
# The data is in a byte stream. Write it to the target.
os_file = open(os_filename, mode="wb")
os_file.write(vba_project_signature.getvalue())
os_file.close()
else:
copy(vba_project_signature, os_filename)
else:
# For in-memory mode we read the vba into a stream.
if vba_project_signature_is_stream:
# The data is already in a byte stream.
os_filename = vba_project_signature
else:
vba_file = open(vba_project_signature, mode="rb")
vba_data = vba_file.read()
os_filename = BytesIO(vba_data)
vba_file.close()
self.filenames.append((os_filename, xml_vba_signature_name, True))
def _add_vba_project(self) -> None:
# pylint: disable=consider-using-with
# Copy in a vbaProject.bin file.
vba_project = self.workbook.vba_project
vba_project_is_stream = self.workbook.vba_project_is_stream
if not vba_project:
return
xml_vba_name = "xl/vbaProject.bin"
if not self.in_memory:
# In file mode we just write or copy the VBA file.
os_filename = self._filename(xml_vba_name)
if vba_project_is_stream:
# The data is in a byte stream. Write it to the target.
os_file = open(os_filename, mode="wb")
os_file.write(vba_project.getvalue())
os_file.close()
else:
copy(vba_project, os_filename)
else:
# For in-memory mode we read the vba into a stream.
if vba_project_is_stream:
# The data is already in a byte stream.
os_filename = vba_project
else:
vba_file = open(vba_project, mode="rb")
vba_data = vba_file.read()
os_filename = BytesIO(vba_data)
vba_file.close()
self.filenames.append((os_filename, xml_vba_name, True))
|
Packager
|
python
|
google__jax
|
jax/_src/pallas/mosaic/pipeline.py
|
{
"start": 44219,
"end": 81973
}
|
class ____:
"""Sequences input and output copies and waits for a pipeline."""
def __init__(
self,
step: jax.Array,
indices: tuple[int | jax.Array, ...],
grid: tuple[int | jax.Array, ...],
grid_offsets: tuple[int | jax.Array, ...],
num_stages: int,
first_cycle=None,
last_cycle=None,
init_accumulators=None,
trace_scopes=True,
):
"""Initializes scheduler.
Args:
step: inner step number.
indices: current grid indices.
grid: pallas grid for BufferedRefs.
grid_offsets: offsets for grid indices (used for megacore).
num_stages: number of stages in the pipeline.
first_cycle: whether this is the first invocation of the pipeline.
last_cycle: whether this is the last invocation of the pipeline.
init_accumulators: do we zero-initialize accumulator state for this
invocation of the pipeline.
trace_scopes: whether to use named_scope to trace blocks in the pipeline.
"""
self.step = step
self.grid = grid
self.grid_offsets = grid_offsets
self.num_stages = num_stages
self.first_cycle = first_cycle
self.last_cycle = last_cycle
self.init_accumulators = init_accumulators
self.trace_scopes = trace_scopes
# Total number of linear steps.
self.num_steps = _grid_size(grid)
# First and last inner step conditionals.
self.first_step = step == 0
self.last_step = step == self.num_steps - 1
# First and last total step conditionals.
self.first_step_ever = first_cycle & self.first_step
self.last_step_ever = last_cycle & self.last_step
# Derived grid indices for present, previous, and next steps.
self.indices = tuple(
i + j for i, j in zip(indices, grid_offsets, strict=True)
)
self.prev_indices = tuple(
i + j
for i, j in zip(_prev_index(indices, grid), grid_offsets, strict=True)
)
next_indices = _next_index(indices, grid)
self.next_indices = tuple(
i + j
for i, j in zip(next_indices, grid_offsets, strict=True)
)
self.add_offset = lambda x: tuple(i + j for i, j in zip(x, grid_offsets,
strict=True))
# TODO(justinfu): Don't recompute these on each iteration.
# fetch_indices stores the grid indices indexed by the amount of lookahead.
# i.e. fetch_indices[2] contains the grid indices 2 iterations
# ahead.
self.fetch_indices = [self.indices, self.next_indices]
fetch_indices = next_indices
for _ in range(self.num_stages-1):
fetch_indices = _next_index(fetch_indices, grid)
self.fetch_indices.append(tuple(
i + j
for i, j in zip(fetch_indices, grid_offsets, strict=True)
))
@contextmanager
def _named_scope(self, name):
if self.trace_scopes:
with jax.named_scope(name):
yield
else:
yield
def grid_env(self):
return pallas_core.grid_env(
list(map(pallas_core.GridAxis, self.indices, self.grid)))
def out_of_fetch(self, buffered_ref):
"""Returns whether there are no more blocks to fetch."""
# Currently this is based on the iteration, but if we want to support
# lookahead this will depend on whether the lookahead reached the end.
if not buffered_ref.is_buffered:
return False
return self.step >= (self.num_steps - buffered_ref.buffer_count + 1)
def has_changed(self, buffered_ref):
if not buffered_ref.is_buffered:
return False
indices = buffered_ref.compute_index(*self.indices)
prev_indices = buffered_ref.compute_index(*self.prev_indices)
return _tuples_differ(indices, prev_indices)
def will_change_current(self, buffered_ref):
if not buffered_ref.is_buffered:
return False
indices = buffered_ref.compute_index(*self.indices)
next_indices = buffered_ref.compute_index(*self.next_indices)
return _tuples_differ(indices, next_indices)
def will_change_fetch(self, buffered_ref):
if not buffered_ref.is_buffered:
return False
if buffered_ref.buffer_count < 2:
raise NotImplementedError()
indices = buffered_ref.compute_index(
*self.fetch_indices[buffered_ref.buffer_count-2])
next_indices = buffered_ref.compute_index(
*self.fetch_indices[buffered_ref.buffer_count-1])
return _tuples_differ(indices, next_indices)
def alias_local_refs(self, buffered_ref, ref):
return buffered_ref.bind_existing_ref(ref, self.indices)
def unalias_local_refs(self, buffered_ref):
return buffered_ref.unbind_refs()
# SCHEDULE ----------------------------------------------------------------
# Below is the sequence of conditional waits and copies used for inputs,
# outputs, and in-out accumulators.
def initialize_step(self, buffered_ref, src_ref, schedule=None, step=0):
if schedule is None:
schedule = _default_schedule
# TODO(justinfu): Should cache this, but it doesn't actually do computation
# in both default & fixed schedules right now so it doesn't increase
# the Jaxpr size.
do_copy = schedule["prologue_copy_in"](self, buffered_ref, src_ref)
with self._named_scope(f"ep_initialize_{step}"):
if step == 0:
@pl.when(self.first_step_ever)
def _init_slots():
buffered_ref.init_slots()
buffered_ref = buffered_ref.load_slots()
if not buffered_ref.is_input or not buffered_ref.is_buffered:
return buffered_ref
if (step + 1) >= buffered_ref.buffer_count:
return buffered_ref
if buffered_ref.use_lookahead:
if step == 0:
# We always fetch the first block.
@pl.when(do_copy)
def _start():
buffered_ref.copy_in(src_ref,
self.add_offset(buffered_ref.next_fetch_indices)) # pylint: disable=cell-var-from-loop
buffered_ref = buffered_ref.advance_copy_in_slot(do_copy)
else:
buffered_ref, _ = fetch_with_lookahead(
buffered_ref,
src_ref,
self.grid,
self.grid_offsets,
predicate=self.first_step_ever & do_copy,
max_num_fetches=1,
)
else:
if step == 0:
predicate = do_copy
fetch_indices = self.fetch_indices[step]
else:
fetch_indices = self.fetch_indices[step]
prev_grid_indices = self.fetch_indices[step - 1]
block_indices = buffered_ref.compute_index(*fetch_indices)
prev_block_indices = buffered_ref.compute_index(*prev_grid_indices)
block_changed = _tuples_differ(block_indices, prev_block_indices)
predicate = do_copy & block_changed
@pl.when(predicate) # pylint: disable=cell-var-from-loop
def _start():
buffered_ref.copy_in(src_ref, fetch_indices) # pylint: disable=cell-var-from-loop
buffered_ref = buffered_ref.advance_copy_in_slot(predicate)
return buffered_ref
def wait_in(self, buffered_ref, src_ref, schedule=None) -> "BufferedRef":
if schedule is None:
schedule = _default_schedule
pred = schedule["wait_in"](self, buffered_ref, src_ref)
@self._named_scope("ep_wait_in")
def _wait():
if buffered_ref.is_input:
buffered_ref.wait_in(src_ref, self.indices)
if buffered_ref.is_accumulator:
# In most cases we won't be waiting when init_accumulators is True,
# so this is usually just setting what we just copied.
buffered_ref.set_accumulator(self.init_accumulators)
@self._named_scope("ep_set_accum")
def _no_wait():
if buffered_ref.is_accumulator:
@pl.when(self.first_step | self.has_changed(buffered_ref))
def _set_accumulator():
# In most cases we will skip waiting when init_accumulators is True,
# so this is usually just setting the accumulator to 0.
buffered_ref.set_accumulator(self.init_accumulators)
lax.cond(pred, _wait, _no_wait)
return buffered_ref
def copy_in(self, buffered_ref, src_ref, schedule=None) -> "BufferedRef":
if schedule is None:
schedule = _default_schedule
pred = schedule['copy_in'](self, buffered_ref, src_ref)
if not buffered_ref.is_input:
return buffered_ref
if buffered_ref.use_lookahead:
buffered_ref, _ = fetch_with_lookahead(
buffered_ref, src_ref, self.grid, self.grid_offsets, predicate=True
)
else:
@pl.when(pred)
@self._named_scope("ep_copy_in")
def _send():
if buffered_ref.is_input and buffered_ref.is_buffered:
buffered_ref.copy_in(src_ref,
self.fetch_indices[buffered_ref.buffer_count-1])
buffered_ref = buffered_ref.advance_copy_in_slot(
pred & buffered_ref.is_input)
return buffered_ref
# --> Call prefetch here to grab the first inputs of next cycle.
# convenience method for prefetch callbacks.
def prefetch(self, buffered_ref, src_ref, schedule=None):
if schedule is None:
schedule = _default_schedule
pred = schedule['prefetch'](self, buffered_ref, src_ref)
if not buffered_ref.is_input or not buffered_ref.is_buffered:
return
if buffered_ref.use_lookahead:
buffered_ref = buffered_ref.with_next_fetch(
jax.tree.map(jnp.zeros_like, buffered_ref.next_fetch_sreg))
@pl.when(pred)
def _start():
buffered_ref.copy_in(
src_ref, self.add_offset(buffered_ref.next_fetch_sreg)) # pylint: disable=cell-var-from-loop
buffered_ref = buffered_ref.advance_copy_in_slot(pred)
buffered_ref, final_copy_in_slot = fetch_with_lookahead(
buffered_ref,
src_ref,
self.grid,
self.grid_offsets,
predicate=pred,
update_slots=False,
)
@pl.when(pred)
def _():
bref = buffered_ref.with_slot_index(copy_in_slot=final_copy_in_slot)
bref.save_slots()
else:
pred = pred & self.last_step
grid_indices = self.indices
for i in range(buffered_ref.buffer_count - 1):
next_grid_indices = self.fetch_indices[i+1]
block_indices = buffered_ref.compute_index(*grid_indices)
next_block_indices = buffered_ref.compute_index(*next_grid_indices)
if i == 0:
# If the prefetch predicate triggers, we already know that the
# first block needs to be copied.
should_prefetch = True
else:
should_prefetch = _tuples_differ(block_indices, next_block_indices)
@pl.when(pred & should_prefetch)
def _():
buffered_ref.copy_in(src_ref, next_grid_indices) # pylint: disable=cell-var-from-loop
buffered_ref = buffered_ref.advance_copy_in_slot(pred & should_prefetch)
grid_indices = next_grid_indices
buffered_ref.save_slots()
return
def wait_out(self, buffered_ref, dst_ref, schedule=None) -> "BufferedRef":
if schedule is None:
schedule = _default_schedule
pred = schedule['wait_out'](self, buffered_ref, dst_ref)
@pl.when(pred)
@self._named_scope("ep_wait_out")
def _wait():
if buffered_ref.is_output:
# Note: As implemented, the current scheduler cannot support multiple
# buffering on outputs. In order to do so properly, we need to save
# the indices for which the copy_out was issued, and wait on them
# here. In the current schedule we always immediately wait_out
# on the iteration after the copy_out, so the prev_indices is always
# the correct grid index to wait on.
buffered_ref.wait_out(dst_ref, self.prev_indices)
return buffered_ref.advance_wait_out_slot(pred & buffered_ref.is_output)
# --> Call "postyeet" here, after last output copy is finished from previous
# cycle
def copy_out(self, buffered_ref, dst_ref, schedule=None) -> "BufferedRef":
if schedule is None:
schedule = _default_schedule
pred = schedule['copy_out'](self, buffered_ref, dst_ref)
@self._named_scope("ep_copy_out")
def _copy_out_and_accumulate():
if buffered_ref.is_accumulator:
buffered_ref.accumulate()
if buffered_ref.is_output:
buffered_ref.copy_out(dst_ref, self.indices)
@self._named_scope("ep_accum")
def _just_accumulate():
if buffered_ref.is_accumulator:
# We accumulate on the last step because we will set the accumulator
# on the next first step. We can optimize this away if it becomes
# a problem, but it is probably not worth the complexity to support
# chains of different pipelines that want to reuse the accumulator with
# slightly different schedules.
@pl.when(self.last_step)
def _accumulate():
buffered_ref.accumulate()
lax.cond(pred, _copy_out_and_accumulate, _just_accumulate)
return buffered_ref.advance_copy_out_slot(pred & buffered_ref.is_output)
def finalize(self, buffered_ref, dst_ref, schedule=None):
if schedule is None:
schedule = _default_schedule
pred = schedule['epilogue_wait_out'](self, buffered_ref, dst_ref)
@pl.when(pred)
@self._named_scope("ep_finalize")
def _end():
if buffered_ref.is_output:
buffered_ref.wait_out(dst_ref, self.indices)
buffered_ref.save_slots()
def advance_slots(self, buffered_ref, schedule=None):
if schedule is None:
schedule = _default_schedule
if buffered_ref.is_input:
pred = schedule['advance_wait_in'](self, buffered_ref, schedule)
buffered_ref = buffered_ref.advance_wait_in_slot(pred)
# Currently we advance copy_in and output slots after their respective
# operation.
return buffered_ref
# END SCHEDULE --------------------------------------------------------------
# Scheduling overrides.
# When trying to fuse across pipelines that use accumulator arguments, we
# sometimes need to mess with the default scheduling above to avoid data-races
# or to maximize performance. A schedule is simply a set of functions that
# calculate predicates for whether or not the pipeline input and output
# BufferedRefs should do copies and waits.
# Copy of the default pipeline schedule. The default schedule tacitly assumes
# that the source and target HBM Refs change with each cycle.
_default_schedule = dict(
prologue_copy_in=lambda s, bref, _: s.first_step_ever,
# We assume that the source ref changed for prefetch.
wait_in=lambda s, bref, _: s.has_changed(bref) | s.first_step,
advance_wait_in=lambda s, bref, _: (
s.will_change_current(bref) | s.last_step),
copy_in=lambda s, bref, _: s.will_change_fetch(bref) & ~s.out_of_fetch(
bref),
# We assume that the source ref changed. E.g. because of a CM DMA.
prefetch=lambda s, bref, _: (
(s.will_change_fetch(bref) | s.last_step) & ~s.last_step_ever
),
# We assume that the target ref changed. E.g. because of a CM DMA.
wait_out=lambda s, bref, _: (s.has_changed(bref) | s.first_step) & ~s.first_step_ever,
# We assume that the target ref is changing. E.g. because of a CM DMA.
copy_out=lambda s, bref, _: s.will_change_current(bref) | s.last_step,
epilogue_wait_out=lambda s, bref, _: s.last_step_ever,
)
# Alternative schedule needed for accumulators reading and writing to a fixed
# HBM reference to avoid HBM data races for trivially small grids: only
# read/write when tiles change or at the very beginning or end of a fused
# pipeline schedule.
_fixed_schedule = dict(
prologue_copy_in=lambda s, bref, _: s.first_step_ever,
# We don't assume that the source ref changed for prefetch.
wait_in=lambda s, bref, _: s.has_changed(bref) | s.first_step_ever,
advance_wait_in=lambda s, bref, _: s.will_change_current(bref),
copy_in=lambda s, bref, _: s.will_change_fetch(bref) & ~s.out_of_fetch(
bref),
# We don't assume that the source ref changed.
prefetch=lambda s, bref, _: s.will_change_fetch(bref) & ~s.last_step_ever,
# We don't assume that the target ref changed.
wait_out=lambda s, bref, _: (s.has_changed(bref) & ~s.first_step_ever),
# We don't assume that the target ref is changing.
copy_out=lambda s, bref, _: s.will_change_current(bref) | s.last_step_ever,
epilogue_wait_out=lambda s, bref, _: s.last_step_ever,
)
def skip_input_copies_when_init_accumulators(schedule) -> Any:
"""Skip input copies in schedule when init_accumulators is True."""
new_schedule = {**schedule}
for k in ["prologue_copy_in", "wait_in", "copy_in"]:
def new_pred(original_pred_fn, *a):
pred = original_pred_fn(*a)
if a[1].is_accumulator or a[1].is_input_output:
pred &= jnp.logical_not(a[0].init_accumulators)
return pred
new_schedule[k] = functools.partial(
new_pred,
schedule[k],
)
return new_schedule
_default_schedule = skip_input_copies_when_init_accumulators(_default_schedule)
_fixed_schedule = skip_input_copies_when_init_accumulators(_fixed_schedule)
def get_pipeline_schedule(schedule) -> Any:
"""Retrieve a named pipeline schedule or pass through fully specified one."""
predefined_schedules = {
'default': _default_schedule,
'fixed': _fixed_schedule
}
if isinstance(schedule, str):
return predefined_schedules[schedule].copy()
return schedule
# Main pipeline methods
def make_pipeline_allocations(
*refs,
in_specs=(),
out_specs=(),
should_accumulate_out=False,
needs_swap_ref=True,
grid=None,
):
"""Create BufferedRefs for the pipeline.
This function creates buffered refs for an inner pipeline that can be
created at the top-level of a pallas call such that they may be reused across
multiple invocations of the inner pipeline.
Args:
in_specs: input pallas block specs
out_specs: output pallas block specs
should_accumulate_out: booleans to indicate which outputs should be treated
as accumulators.
needs_swap_ref: whether a swap slots tracker needs to be allocated.
grid: grid to use for the pipeline.
Returns:
A list of BufferedRefs, one corresponding to each ref specified in the
in_specs and out_specs.
"""
# TODO(levskaya): generalize argument tree handling here and in emit_pipeline.
num_in_specs = len(in_specs)
if not isinstance(in_specs, (list, tuple)):
in_specs = (in_specs,)
if not isinstance(out_specs, (list, tuple)):
out_specs = (out_specs,)
if isinstance(in_specs, list):
in_specs = tuple(in_specs)
if isinstance(out_specs, list):
out_specs = tuple(out_specs)
in_refs = refs[:num_in_specs]
out_refs = refs[num_in_specs:]
def make_input_bref(in_spec, in_ref):
buffer_count = 2
use_lookahead = False
if in_spec.pipeline_mode is not None:
buffer_count = in_spec.pipeline_mode.buffer_count
use_lookahead = in_spec.pipeline_mode.use_lookahead
if use_lookahead and grid is None:
raise ValueError("Grid must be specified when using lookahead.")
return BufferedRef.input(in_spec, in_ref.dtype, buffer_count,
needs_swap_ref=needs_swap_ref,
grid_rank=len(grid),
use_lookahead=use_lookahead,
source_memory_space=in_ref.memory_space)
in_brefs = jax.tree.map(make_input_bref, in_specs, in_refs)
def make_output_bref(out_spec, out_ref, accumulate):
buffer_count = 2
if out_spec.pipeline_mode is not None:
buffer_count = out_spec.pipeline_mode.buffer_count
if out_spec.pipeline_mode.use_lookahead:
raise ValueError("Output buffering does not support lookahead.")
if accumulate:
return BufferedRef.accumulator(out_spec, out_ref.dtype, buffer_count,
needs_swap_ref=needs_swap_ref,
source_memory_space=out_ref.memory_space)
return BufferedRef.output(out_spec, out_ref.dtype, buffer_count,
needs_swap_ref=needs_swap_ref,
source_memory_space=out_ref.memory_space)
out_brefs = jax.tree.map(
make_output_bref, out_specs, out_refs, should_accumulate_out)
return (*in_brefs, *out_brefs)
def _partition_grid(
grid: tuple[int | jax.Array, ...],
core_axis: int | str | None,
dimension_semantics: tuple[GridDimensionSemantics, ...] | None,
) -> tuple[tuple[int | jax.Array, ...], tuple[int | jax.Array, ...]]:
if core_axis is None:
# We aren't partitioning the grid
return grid, (0,) * len(grid)
if isinstance(core_axis, int):
num_cores = pl.num_programs(core_axis)
core_id = pl.program_id(core_axis)
else:
num_cores = jax.lax.axis_size(core_axis)
core_id = jax.lax.axis_index(core_axis)
# Check that num_cores is statically known
if not isinstance(num_cores, int):
raise NotImplementedError(
f"Cannot partition grid over dynamic number of cores: {core_axis=}"
)
if num_cores == 1:
# We aren't partitioning the grid
return grid, (0,) * len(grid)
# If dimension_semantics aren't provided, we assume it is all arbitrary.
if dimension_semantics is None:
dimension_semantics = (ARBITRARY,) * len(grid)
if len(dimension_semantics) != len(grid):
raise ValueError("dimension_semantics must be the same length as grid.")
parallel_dimensions = {i for i, d in enumerate(dimension_semantics)
if d == PARALLEL}
# If there are no parallel dimensions, we can't partition the grid
if not parallel_dimensions:
# TODO(sharadmv): enable running kernel on just one core
raise NotImplementedError(
"Cannot partition over cores without parallel grid dimensions:"
f" {dimension_semantics=}"
)
if all(not isinstance(grid[i], int) for i in parallel_dimensions):
raise NotImplementedError(
f"Cannot partition cores over only dynamic grid dimensions: {grid=}"
)
# Try to find a divisible dimension to partition the grid on
divisible_dimensions = {
i for i in parallel_dimensions
if isinstance(grid[i], int) and grid[i] % num_cores == 0
}
if divisible_dimensions:
first_divisible_dimension, *_ = (
i for i in range(len(dimension_semantics)) if i in divisible_dimensions
)
partitioned_dim_size = grid[first_divisible_dimension] // num_cores
partitioned_dim_offset = core_id * partitioned_dim_size
new_grid = jax_util.tuple_update(
grid, first_divisible_dimension, partitioned_dim_size
)
offsets = jax_util.tuple_update(
(0,) * len(grid), first_divisible_dimension, partitioned_dim_offset
)
else:
# No divisible dimensions, so we can't evenly partition the grid. Let's pick
# the largest dimension and try to divide it as evenly as possible.
# TODO(sharadmv): take the product of many nondivisible dimensions to
# potentially divide it more evenly
largest_parallel_dimension = max(grid[i] for i in parallel_dimensions
if isinstance(grid[i], int)) # type: ignore
partition_dimension, *_ = (
i
for i, d in enumerate(grid)
if isinstance(d, int) and d == largest_parallel_dimension
)
base_num_iters, rem = divmod(grid[partition_dimension], num_cores)
assert rem > 0, rem
# We have some remainder iterations that we need to assign somewhere. We
# know that rem < num_cores, so we can assign one extra iteration to each
# core except for the last (num_cores - rem).
num_iters = jnp.where(core_id < rem, base_num_iters + 1,
base_num_iters)
new_grid = jax_util.tuple_update(grid, partition_dimension, num_iters)
# Ordinarily, we would compute the offset as:
# grid_offset = pl.program_id(core_axis) * num_iters
# However, since we have some cores that don't have an extra iteration, we
# need to adjust the offset by `rem`.
grid_offset = jnp.where(
core_id < rem,
core_id * num_iters,
core_id * base_num_iters + rem,
)
offsets = jax_util.tuple_update(
(0,) * len(grid), partition_dimension, grid_offset
)
return new_grid, offsets # type: ignore[return-value]
def sync_copy(src: REF | BufferedRef, dst: REF | BufferedRef, indices):
"""Perform a synchronous copy from src to dst."""
bref: BufferedRef
hbm_ref: REF
if isinstance(src, BufferedRef):
bref = src
if isinstance(dst, BufferedRef):
raise ValueError("Only one of src or dst can be a BufferedRef.")
hbm_ref = dst
copy_in = False
else:
if not isinstance(dst, BufferedRef):
raise ValueError("One of src or dst must be a BufferedRef.")
bref = dst
hbm_ref = src
copy_in = True
hbm_slice = bref.get_dma_slice(hbm_ref.shape, hbm_ref.dtype, indices)
bref_slice = tuple(
pl.ds(0, s.size)
for s, bd in zip(hbm_slice, bref.block_shape)
if not (bd is None or isinstance(bd, pl.Squeezed))
)
if copy_in:
tpu_helpers.sync_copy(hbm_ref.at[hbm_slice],
bref.current_ref.at[bref_slice]) # type: ignore[union-attr]
else:
tpu_helpers.sync_copy(bref.current_ref.at[bref_slice], # type: ignore[union-attr]
hbm_ref.at[hbm_slice])
def emit_pipeline(
body,
*,
grid: tuple[int | jax.Array, ...],
in_specs=(),
out_specs=(),
should_accumulate_out: bool = False,
core_axis: int | None = None,
core_axis_name: str | None = None,
dimension_semantics: tuple[GridDimensionSemantics, ...] | None = None,
trace_scopes: bool = True,
no_pipelining: bool = False,
):
"""Creates a function to emit a manual pallas pipeline.
This has the same semantics as pallas_call but is meant to be called inside
pallas_call for nesting grids. This is useful when you need to have separate
windowing strategies for communication and computation.
The new argument `should_accumulate_out` can be used to specify which outputs
we should accumulate into automatically within and across pipeline
invocations.
Args:
body: pallas kernel to set up pipeline for.
grid: a pallas grid definition.
in_specs: input pallas block specs
out_specs: output pallas block specs
should_accumulate_out: booleans to indicate which outputs should be treated
as accumulators.
core_axis: optional int, indicates whether or not to partition the grid
along the core axis.
core_axis_name: optional str, indicates whether or not to partition the grid
along the core axis.
dimension_semantics: optional tuple of GridDimensionSemantics (e.g. PARALLEL
or ARBITRARY).
trace_scopes: optional bool, indicates whether to annotate each region in
the pipeline using named_scope.
no_pipelining: If True, turns off pipelining and all copies will be made
synchronous. This is useful for debugging multiple-buffering related bugs.
"""
if any(not isinstance(d, (int, jax.Array)) for d in grid):
grid_types = tuple(type(d) for d in grid)
raise ValueError(
f"Grid must consist of Python integers and JAX Arrays: {grid_types}"
)
if not (core_axis is None or core_axis_name is None):
raise ValueError("core_axis and core_axis_name cannot both be provided.")
core_axis_ = core_axis_name if core_axis is None else core_axis
grid, grid_offsets = _partition_grid(grid, core_axis_, dimension_semantics)
num_steps = _grid_size(grid)
if not isinstance(in_specs, (list, tuple)):
in_specs = (in_specs,)
if not isinstance(out_specs, (list, tuple)):
out_specs = (out_specs,)
if isinstance(in_specs, list):
in_specs = tuple(in_specs)
if isinstance(out_specs, list):
out_specs = tuple(out_specs)
should_accumulate_out = _broadcast_pytree_to(should_accumulate_out, out_specs)
get_buffer_count = lambda spec: (spec.pipeline_mode.buffer_count if
(spec is not None and spec.pipeline_mode is not None) else 2)
flattened_specs = jax.tree.leaves((in_specs, out_specs))
max_buffer_count = max((2, *map(get_buffer_count, flattened_specs)))
def pipeline(
*refs: Any,
scratches=None,
allocations=None,
first_cycle: CondVal = True,
last_cycle: CondVal = True,
init_accumulators: CondVal = False,
prefetch=None,
postyeet=None,
schedule=None,
body_prologue=None,
):
"""
Run the pipeline.
Args:
*ref_args: a list of pallas refs (or more generally a list of pytrees of
pallas refs)
scratches: scratch buffers for the inner kernel
allocations: a list of BufferedRefs, one corresponding to each ref
first_cycle: boolean indicating if this is the first invocation of the
inner pipeline cycle.
last_cycle: boolean indicating if this is the last invocation of the
inner pipeline cycle.
init_accumulators: whether to zero-init accumulators during this cycle.
prefetch: callback called as fn(*brefs, scheduler) that is used to fetch
the next cycle invocations first inputs. Called during the inputs phase
in the final inner step.
postyeet: callback called as fn(*brefs, scheduler) that is used to finish
any writes or transfers from the last output of the previous cycle.
Called during the outputs phase in the first inner step.
schedule: manually specified pipeline schedules for brefs, None indicates
default schedule.
body_prologue: For running code within the grid environment before the
body is run. Useful for updating manual refs.
"""
if scratches is None:
scratches = ()
if allocations is None:
# run with inline scoped allocations
# Prefetch and postyeet are arbitrary functions that can copy
# into or out of any of the BufferedRefs. Thus, we need a ref
# for the scheduler to mark when the prefetch or postyeet
# functions perform a copy and the slots need to be
# swapped. Without prefetch and postyeet, the swapping logic can
# be performed without the need for state.
needs_swap_ref = prefetch is not None or postyeet is not None
return primitives.run_scoped(
lambda allocations: pipeline(
*refs,
scratches=scratches,
allocations=allocations,
first_cycle=first_cycle,
last_cycle=last_cycle,
init_accumulators=init_accumulators,
prefetch=prefetch,
postyeet=postyeet,
schedule=schedule,
),
make_pipeline_allocations(
*refs,
in_specs=in_specs,
out_specs=out_specs,
should_accumulate_out=should_accumulate_out,
needs_swap_ref=needs_swap_ref,
grid=grid,
),
)
if isinstance(allocations, list):
allocations = tuple(allocations)
# Normalize custom schedule arguments.
if schedule is None:
schedule = map_brefs(lambda x: None, allocations)
if not isinstance(schedule, (list, tuple)):
schedule = map_brefs(lambda x: schedule, allocations)
if isinstance(schedule, list):
schedule = tuple(schedule)
schedule = map_brefs(
lambda _, x: get_pipeline_schedule(x), allocations, schedule)
def make_scheduler(step, indices):
return Scheduler(
step,
indices,
grid,
grid_offsets=grid_offsets,
num_stages=max_buffer_count,
first_cycle=first_cycle,
last_cycle=last_cycle,
init_accumulators=init_accumulators,
trace_scopes=trace_scopes,
)
def loop_body(step, carry):
unaliased_brefs, indices = carry
scheduler = make_scheduler(step, indices)
with scheduler.grid_env():
# prepare any local VMEM aliases
brefs = map_brefs(scheduler.alias_local_refs, unaliased_brefs, refs)
# loop input handling phase
brefs = map_brefs(scheduler.copy_in, brefs, refs, schedule)
brefs = map_brefs(scheduler.wait_in, brefs, refs, schedule)
# prefetch inputs for the *next* invocation of this pipeline
with scheduler._named_scope("ep_prefetch"):
if prefetch is not None:
do_prefetch = step == num_steps - 1
map_brefs(lambda x: x.save_slots(do_prefetch), brefs)
lax.cond(do_prefetch,
lambda: prefetch(*brefs, scheduler),
lambda: None)
brefs = map_brefs(lambda x: x.load_slots(do_prefetch), brefs)
# run the kernel!
if body_prologue is not None:
body_prologue()
current_refs = map_brefs(lambda x: x.current_ref, brefs)
with scheduler._named_scope("ep_run_kernel"):
body(*current_refs, *scratches)
# loop output handling phase
brefs = map_brefs(scheduler.copy_out, brefs, refs, schedule)
brefs = map_brefs(scheduler.wait_out, brefs, refs, schedule)
# handle writes for the *last* invocation of this pipeline's outputs
with scheduler._named_scope("ep_postyeet"):
if postyeet is not None:
do_postyeet = step == 0
map_brefs(lambda x: x.save_slots(do_postyeet), brefs)
lax.cond(do_postyeet,
lambda: postyeet(*brefs, scheduler),
lambda: None)
brefs = map_brefs(lambda x: x.load_slots(do_postyeet), brefs)
brefs = map_brefs(scheduler.advance_slots, brefs, schedule)
# Unbind window_refs for VMEM-backed buffers. Without this
# we will be returning TransformedRefs which are not valid
# JAX types.
brefs = map_brefs(scheduler.unalias_local_refs, brefs)
return brefs, _next_index(indices, grid)
if no_pipelining:
# Debugging mode where all copies are synchronous.
initial_indices = (0,) * len(grid)
scheduler = make_scheduler(0, initial_indices)
brefs = map_brefs(scheduler.alias_local_refs, allocations, refs)
map_brefs(lambda bref: bref.init_slots(), brefs)
if postyeet is not None or prefetch is not None:
raise NotImplementedError("Prefetch/Postyeet not supported")
if any(bref.is_accumulator for bref in brefs):
raise NotImplementedError("Accumulators not supported")
@functools.partial(jax.lax.fori_loop, 0, num_steps,
init_val=(brefs, initial_indices))
def _loop_body(step, carry):
brefs, indices = carry
scheduler = make_scheduler(step, indices)
with scheduler.grid_env():
# prepare any local VMEM aliases
brefs = map_brefs(scheduler.alias_local_refs, brefs, refs)
# loop input handling phase
copy_in = lambda bref, ref: sync_copy(ref, bref, indices)
map_inputs(copy_in, brefs, refs)
# run the kernel!
if body_prologue is not None:
body_prologue()
current_refs = map_brefs(lambda x: x.current_ref, brefs)
with scheduler._named_scope("ep_run_kernel"):
body(*current_refs, *scratches)
# loop output handling phase
copy_out = lambda bref, ref: sync_copy(bref, ref, indices)
map_outputs(copy_out, brefs, refs)
brefs = map_brefs(scheduler.unalias_local_refs, brefs)
return brefs, _next_index(indices, grid)
else:
@pl.when(num_steps > 0)
def _():
# pipeline prologue
initial_indices = (0,) * len(grid)
scheduler = make_scheduler(0, initial_indices)
brefs = allocations
with scheduler.grid_env():
# We issue num_stages-1 prefetch copies per buffer.
# We iterate over steps in the outer loop because we want to
# queue all iteration 0 prefetches before iteration 1, and so on.
for step in range(scheduler.num_stages - 1):
brefs = map_brefs(functools.partial(
scheduler.initialize_step, step=step),
brefs, refs, schedule)
# pipeline loop
brefs, next_indices = lax.fori_loop(
0, num_steps, loop_body, (brefs, initial_indices)
)
# pipeline epilogue
final_indices = _prev_index(next_indices, grid)
scheduler = make_scheduler(num_steps - 1, final_indices)
with scheduler.grid_env():
map_brefs(scheduler.finalize, brefs, refs, schedule)
return pipeline
def emit_pipeline_with_allocations(
body,
*,
grid,
in_specs=(),
out_specs=(),
should_accumulate_out=False,
):
"""Creates pallas pipeline and top-level allocation preparation functions.
Args:
body: pallas kernel to set up pipeline for.
grid: a pallas grid definition.
in_specs: input pallas block specs
out_specs: output pallas block specs
should_accumulate_out: booleans to indicate which outputs should be treated
as accumulators.
Returns:
(emit_pipeline, make_allocations) function pair, where:
emit_pipeline is the pallas pipeline function.
make_allocations is a function to create buffered refs for the inner
pipeline that can be created at the top-level of a pallas call to be
reused across multiple invocations of the inner pipeline.
"""
make_allocations = functools.partial(make_pipeline_allocations,
in_specs=in_specs,
out_specs=out_specs,
should_accumulate_out=should_accumulate_out,
grid=grid)
pipeline = emit_pipeline(
body,
grid=grid,
in_specs=in_specs,
out_specs=out_specs,
should_accumulate_out=should_accumulate_out)
return pipeline, make_allocations
|
Scheduler
|
python
|
getsentry__sentry
|
src/sentry/notifications/api/endpoints/user_notification_email.py
|
{
"start": 623,
"end": 2799
}
|
class ____(UserEndpoint):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
"PUT": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.ALERTS_NOTIFICATIONS
def get(self, request: Request, user) -> Response:
"""
Fetches the user's email notification settings.
Returns a dictionary where the keys are the IDs of the projects
and the values are the email addresses to be used for notifications for that project.
"""
email_options = UserOption.objects.filter(
key="mail:email", user=user, project_id__isnull=False
).select_related("user")
return self.respond({str(option.project_id): option.value for option in email_options})
def put(self, request: Request, user) -> Response:
"""
Updates the user's email notification settings.
The request data should be a dictionary where the keys are the IDs of the projects
and the values are the email addresses to be used for notifications for that project.
All email addresses must be verified and belong to the user.
If any email address is not verified or does not belong to the user, a 400 error is returned.
If the update is successful, a 204 status code is returned.
"""
data = request.data
# Make sure target emails exist and are verified
emails_to_check = set(data.values())
emails = UserEmail.objects.filter(user=user, email__in=emails_to_check, is_verified=True)
# TODO(mgaeta): Is there a better way to check this?
if len(emails) != len(emails_to_check):
return Response({"detail": INVALID_EMAIL_MSG}, status=status.HTTP_400_BAD_REQUEST)
with transaction.atomic(using=router.db_for_write(UserOption)):
for id, value in data.items():
user_option, CREATED = UserOption.objects.get_or_create(
user=user,
key="mail:email",
project_id=int(id),
)
user_option.update(value=str(value))
return Response(status=status.HTTP_204_NO_CONTENT)
|
UserNotificationEmailEndpoint
|
python
|
huggingface__transformers
|
src/transformers/models/glm4/modeling_glm4.py
|
{
"start": 15847,
"end": 16383
}
|
class ____(PreTrainedModel):
config: Glm4Config
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["Glm4DecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": Glm4DecoderLayer,
"attentions": Glm4Attention,
}
@auto_docstring
|
Glm4PreTrainedModel
|
python
|
doocs__leetcode
|
solution/2300-2399/2389.Longest Subsequence With Limited Sum/Solution.py
|
{
"start": 0,
"end": 203
}
|
class ____:
def answerQueries(self, nums: List[int], queries: List[int]) -> List[int]:
nums.sort()
s = list(accumulate(nums))
return [bisect_right(s, q) for q in queries]
|
Solution
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.