language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
getsentry__sentry-python
|
sentry_sdk/integrations/redis/__init__.py
|
{
"start": 553,
"end": 1659
}
|
class ____(Integration):
identifier = "redis"
def __init__(self, max_data_size=_DEFAULT_MAX_DATA_SIZE, cache_prefixes=None):
# type: (Optional[int], Optional[list[str]]) -> None
self.max_data_size = max_data_size
self.cache_prefixes = cache_prefixes if cache_prefixes is not None else []
if max_data_size is not None:
warnings.warn(
"The `max_data_size` parameter of `RedisIntegration` is "
"deprecated and will be removed in version 3.0 of sentry-sdk.",
DeprecationWarning,
stacklevel=2,
)
@staticmethod
def setup_once():
# type: () -> None
try:
from redis import StrictRedis, client
except ImportError:
raise DidNotEnable("Redis client not installed")
_patch_redis(StrictRedis, client)
_patch_redis_cluster()
_patch_rb()
try:
_patch_rediscluster()
except Exception:
logger.exception("Error occurred while patching `rediscluster` library")
|
RedisIntegration
|
python
|
huggingface__transformers
|
src/transformers/models/biogpt/tokenization_biogpt.py
|
{
"start": 1274,
"end": 12158
}
|
class ____(PreTrainedTokenizer):
"""
Construct an FAIRSEQ Transformer tokenizer. Moses tokenization followed by Byte-Pair Encoding.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Merges file.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file,
merges_file,
unk_token="<unk>",
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
pad_token="<pad>",
**kwargs,
):
try:
import sacremoses
except ImportError:
raise ImportError(
"You need to install sacremoses to use BioGptTokenizer. "
"See https://pypi.org/project/sacremoses/ for installation."
)
self.lang = "en"
self.sm = sacremoses
# cache of sm.MosesTokenizer instance
self.cache_moses_tokenizer = {}
self.cache_moses_detokenizer = {}
""" Initialisation"""
with open(vocab_file, encoding="utf-8") as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.encoder.items()}
with open(merges_file, encoding="utf-8") as merges_handle:
merges = merges_handle.read().split("\n")[:-1]
merges = [tuple(merge.split()[:2]) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
super().__init__(
bos_token=bos_token,
eos_token=eos_token,
sep_token=sep_token,
unk_token=unk_token,
pad_token=pad_token,
**kwargs,
)
@property
def vocab_size(self):
"""Returns vocab size"""
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def moses_tokenize(self, text, lang):
if lang not in self.cache_moses_tokenizer:
moses_tokenizer = self.sm.MosesTokenizer(lang=lang)
self.cache_moses_tokenizer[lang] = moses_tokenizer
return self.cache_moses_tokenizer[lang].tokenize(
text, aggressive_dash_splits=True, return_str=False, escape=True
)
def moses_detokenize(self, tokens, lang):
if lang not in self.cache_moses_detokenizer:
moses_detokenizer = self.sm.MosesDetokenizer(lang=lang)
self.cache_moses_detokenizer[lang] = moses_detokenizer
return self.cache_moses_detokenizer[lang].detokenize(tokens)
def bpe(self, token):
word = tuple(token[:-1]) + (token[-1] + "</w>",)
if token in self.cache:
return self.cache[token]
pairs = get_pairs(word)
if not pairs:
return token + "</w>"
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
if word == "\n </w>":
word = "\n</w>"
self.cache[token] = word
return word
def _tokenize(self, text, bypass_tokenizer=False):
"""Returns a tokenized string."""
if bypass_tokenizer:
text = text.split()
else:
text = self.moses_tokenize(text, self.lang)
split_tokens = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(token).split(" ")))
return split_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
# remove BPE
tokens = [t.replace(" ", "").replace("</w>", " ") for t in tokens]
tokens = "".join(tokens).split()
# detokenize
text = self.moses_detokenize(tokens, self.lang)
return text
def build_inputs_with_special_tokens(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BioGPT sequence has the following format:
- single sequence: `</s> X `
- pair of sequences: `</s> A </s> B `
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.sep_token_id] + token_ids_0
sep = [self.sep_token_id]
return sep + token_ids_0 + sep + token_ids_1
def get_special_tokens_mask(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False
) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
# no bos used in fairseq
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1))
return [1] + ([0] * len(token_ids_0))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
merge_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
)
with open(vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!"
)
index = token_index
writer.write(" ".join(bpe_tokens) + "\n")
index += 1
return vocab_file, merge_file
def __getstate__(self):
state = self.__dict__.copy()
state["sm"] = None
return state
def __setstate__(self, d):
self.__dict__ = d
try:
import sacremoses
except ImportError:
raise ImportError(
"You need to install sacremoses to use XLMTokenizer. "
"See https://pypi.org/project/sacremoses/ for installation."
)
self.sm = sacremoses
__all__ = ["BioGptTokenizer"]
|
BioGptTokenizer
|
python
|
pallets__werkzeug
|
src/werkzeug/datastructures/structures.py
|
{
"start": 33677,
"end": 34895
}
|
class ____( # type: ignore[misc]
ImmutableMultiDictMixin[K, V], _OrderedMultiDict[K, V]
):
"""An immutable :class:`OrderedMultiDict`.
.. deprecated:: 3.1
Will be removed in Werkzeug 3.2. Use ``ImmutableMultiDict`` instead.
.. versionadded:: 0.6
"""
def __init__(
self,
mapping: (
MultiDict[K, V]
| cabc.Mapping[K, V | list[V] | tuple[V, ...] | set[V]]
| cabc.Iterable[tuple[K, V]]
| None
) = None,
) -> None:
super().__init__()
if mapping is not None:
for k, v in iter_multi_items(mapping):
_OrderedMultiDict.add(self, k, v)
def _iter_hashitems(self) -> cabc.Iterable[t.Any]:
return enumerate(self.items(multi=True))
def copy(self) -> _OrderedMultiDict[K, V]: # type: ignore[override]
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return _OrderedMultiDict(self)
def __copy__(self) -> te.Self:
return self
|
_ImmutableOrderedMultiDict
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_relationships.py
|
{
"start": 113410,
"end": 117179
}
|
class ____(_RelationshipErrors, fixtures.MappedTest):
"""'viewonly' mappings with a complex join condition."""
@classmethod
def define_tables(cls, metadata):
Table(
"t1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(50)),
)
Table(
"t2",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(50)),
Column("t1id", Integer, ForeignKey("t1.id")),
)
Table(
"t3",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(50)),
)
Table(
"t2tot3",
metadata,
Column("t2id", Integer, ForeignKey("t2.id")),
Column("t3id", Integer, ForeignKey("t3.id")),
)
@classmethod
def setup_classes(cls):
class T1(cls.Comparable):
pass
class T2(cls.Comparable):
pass
class T3(cls.Comparable):
pass
def test_basic(self):
T1, t2, T2, T3, t3, t2tot3, t1 = (
self.classes.T1,
self.tables.t2,
self.classes.T2,
self.classes.T3,
self.tables.t3,
self.tables.t2tot3,
self.tables.t1,
)
self.mapper_registry.map_imperatively(
T1,
t1,
properties={
"t3s": relationship(
T3,
primaryjoin=sa.and_(
t1.c.id == t2.c.t1id,
t2.c.id == t2tot3.c.t2id,
t3.c.id == t2tot3.c.t3id,
),
viewonly=True,
foreign_keys=t3.c.id,
remote_side=t2.c.t1id,
)
},
)
self.mapper_registry.map_imperatively(
T2,
t2,
properties={
"t1": relationship(T1),
"t3s": relationship(T3, secondary=t2tot3),
},
)
self.mapper_registry.map_imperatively(T3, t3)
sess = fixture_session()
sess.add(T2(data="t2", t1=T1(data="t1"), t3s=[T3(data="t3")]))
sess.flush()
sess.expunge_all()
a = sess.query(T1).first()
eq_(a.t3s, [T3(data="t3")])
def test_remote_side_escalation(self):
T1, t2, T2, T3, t3, t2tot3, t1 = (
self.classes.T1,
self.tables.t2,
self.classes.T2,
self.classes.T3,
self.tables.t3,
self.tables.t2tot3,
self.tables.t1,
)
self.mapper_registry.map_imperatively(
T1,
t1,
properties={
"t3s": relationship(
T3,
primaryjoin=sa.and_(
t1.c.id == t2.c.t1id,
t2.c.id == t2tot3.c.t2id,
t3.c.id == t2tot3.c.t3id,
),
viewonly=True,
foreign_keys=t3.c.id,
)
},
)
self.mapper_registry.map_imperatively(
T2,
t2,
properties={
"t1": relationship(T1),
"t3s": relationship(T3, secondary=t2tot3),
},
)
self.mapper_registry.map_imperatively(T3, t3)
self._assert_raises_no_local_remote(configure_mappers, "T1.t3s")
|
ViewOnlyComplexJoin
|
python
|
eriklindernoren__ML-From-Scratch
|
mlfromscratch/supervised_learning/regression.py
|
{
"start": 143,
"end": 418
}
|
class ____():
""" Regularization for Lasso Regression """
def __init__(self, alpha):
self.alpha = alpha
def __call__(self, w):
return self.alpha * np.linalg.norm(w)
def grad(self, w):
return self.alpha * np.sign(w)
|
l1_regularization
|
python
|
bottlepy__bottle
|
test/test_securecookies.py
|
{
"start": 101,
"end": 1304
}
|
class ____(unittest.TestCase):
def setUp(self):
self.data = touni('υηι¢σ∂є')
self.secret = tob('secret')
bottle.app.push()
bottle.response.bind()
def tear_down(self):
bottle.app.pop()
def get_pairs(self):
for k, v in bottle.response.headerlist:
if k == 'Set-Cookie':
key, value = v.split(';')[0].split('=', 1)
yield key.lower().strip(), value.strip()
def set_pairs(self, pairs):
header = ','.join(['%s=%s' % (k, v) for k, v in pairs])
bottle.request.bind({'HTTP_COOKIE': header})
def testValid(self):
bottle.response.set_cookie('key', self.data, secret=self.secret)
pairs = self.get_pairs()
self.set_pairs(pairs)
result = bottle.request.get_cookie('key', secret=self.secret)
self.assertEqual(self.data, result)
def testWrongKey(self):
bottle.response.set_cookie('key', self.data, secret=self.secret)
pairs = self.get_pairs()
self.set_pairs([(k + 'xxx', v) for (k, v) in pairs])
result = bottle.request.get_cookie('key', secret=self.secret)
self.assertEqual(None, result)
|
TestSignedCookies
|
python
|
scipy__scipy
|
scipy/fftpack/tests/test_real_transforms.py
|
{
"start": 12586,
"end": 12717
}
|
class ____(_TestIDCTBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 4
|
TestIDCTIVInt
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/channels.py
|
{
"start": 1174933,
"end": 1192808
}
|
class ____:
def encode(
self,
*args: Any,
angle: Optional[str | AnyAngle | IntoCondition | Map] = Undefined,
color: Optional[str | AnyColor | IntoCondition | Map] = Undefined,
column: Optional[str | Column | IntoCondition | Map] = Undefined,
description: Optional[str | AnyDescription | IntoCondition | Map] = Undefined,
detail: Optional[OneOrSeq[str | Detail | IntoCondition | Map]] = Undefined,
facet: Optional[str | Facet | IntoCondition | Map] = Undefined,
fill: Optional[str | AnyFill | IntoCondition | Map] = Undefined,
fillOpacity: Optional[str | AnyFillOpacity | IntoCondition | Map] = Undefined,
href: Optional[str | AnyHref | IntoCondition | Map] = Undefined,
key: Optional[str | Key | IntoCondition | Map] = Undefined,
latitude: Optional[str | AnyLatitude | IntoCondition | Map] = Undefined,
latitude2: Optional[str | AnyLatitude2 | IntoCondition | Map] = Undefined,
longitude: Optional[str | AnyLongitude | IntoCondition | Map] = Undefined,
longitude2: Optional[str | AnyLongitude2 | IntoCondition | Map] = Undefined,
opacity: Optional[str | AnyOpacity | IntoCondition | Map] = Undefined,
order: Optional[OneOrSeq[str | AnyOrder | IntoCondition | Map]] = Undefined,
radius: Optional[str | AnyRadius | IntoCondition | Map] = Undefined,
radius2: Optional[str | AnyRadius2 | IntoCondition | Map] = Undefined,
row: Optional[str | Row | IntoCondition | Map] = Undefined,
shape: Optional[str | AnyShape | IntoCondition | Map] = Undefined,
size: Optional[str | AnySize | IntoCondition | Map] = Undefined,
stroke: Optional[str | AnyStroke | IntoCondition | Map] = Undefined,
strokeDash: Optional[str | AnyStrokeDash | IntoCondition | Map] = Undefined,
strokeOpacity: Optional[
str | AnyStrokeOpacity | IntoCondition | Map
] = Undefined,
strokeWidth: Optional[str | AnyStrokeWidth | IntoCondition | Map] = Undefined,
text: Optional[str | AnyText | IntoCondition | Map] = Undefined,
theta: Optional[str | AnyTheta | IntoCondition | Map] = Undefined,
theta2: Optional[str | AnyTheta2 | IntoCondition | Map] = Undefined,
time: Optional[str | Time | IntoCondition | Map] = Undefined,
tooltip: Optional[OneOrSeq[str | AnyTooltip | IntoCondition | Map]] = Undefined,
url: Optional[str | AnyUrl | IntoCondition | Map] = Undefined,
x: Optional[str | AnyX | IntoCondition | Map] = Undefined,
x2: Optional[str | AnyX2 | IntoCondition | Map] = Undefined,
xError: Optional[str | AnyXError | IntoCondition | Map] = Undefined,
xError2: Optional[str | AnyXError2 | IntoCondition | Map] = Undefined,
xOffset: Optional[str | AnyXOffset | IntoCondition | Map] = Undefined,
y: Optional[str | AnyY | IntoCondition | Map] = Undefined,
y2: Optional[str | AnyY2 | IntoCondition | Map] = Undefined,
yError: Optional[str | AnyYError | IntoCondition | Map] = Undefined,
yError2: Optional[str | AnyYError2 | IntoCondition | Map] = Undefined,
yOffset: Optional[str | AnyYOffset | IntoCondition | Map] = Undefined,
) -> Self:
"""
Map properties of the data to visual properties of the chart (see :class:`FacetedEncoding`).
Parameters
----------
angle : str, :class:`Angle`, Dict, :class:`AngleDatum`, :class:`AngleValue`
Rotation angle of point and text marks.
color : str, :class:`Color`, Dict, :class:`ColorDatum`, :class:`ColorValue`
Color of the marks - either fill or stroke color based on the ``filled``
property of mark definition. By default, ``color`` represents fill color for
``"area"``, ``"bar"``, ``"tick"``, ``"text"``, ``"trail"``, ``"circle"``,
and ``"square"`` / stroke color for ``"line"`` and ``"point"``.
**Default value:** If undefined, the default color depends on `mark config
<https://vega.github.io/vega-lite/docs/config.html#mark-config>`__'s
``color`` property.
*Note:* 1) For fine-grained control over both fill and stroke colors of the
marks, please use the ``fill`` and ``stroke`` channels. The ``fill`` or
``stroke`` encodings have higher precedence than ``color``, thus may
override the ``color`` encoding if conflicting encodings are specified. 2)
See the scale documentation for more information about customizing `color
scheme <https://vega.github.io/vega-lite/docs/scale.html#scheme>`__.
column : str, :class:`Column`, Dict
A field definition for the horizontal facet of trellis plots.
description : str, :class:`Description`, Dict, :class:`DescriptionValue`
A text description of this mark for ARIA accessibility (SVG output only).
For SVG output the ``"aria-label"`` attribute will be set to this
description.
detail : str, :class:`Detail`, Dict, List
Additional levels of detail for grouping data in aggregate views and in
line, trail, and area marks without mapping data to a specific visual
channel.
facet : str, :class:`Facet`, Dict
A field definition for the (flexible) facet of trellis plots.
If either ``row`` or ``column`` is specified, this channel will be ignored.
fill : str, :class:`Fill`, Dict, :class:`FillDatum`, :class:`FillValue`
Fill color of the marks. **Default value:** If undefined, the default color
depends on `mark config
<https://vega.github.io/vega-lite/docs/config.html#mark-config>`__'s
``color`` property.
*Note:* The ``fill`` encoding has higher precedence than ``color``, thus may
override the ``color`` encoding if conflicting encodings are specified.
fillOpacity : str, :class:`FillOpacity`, Dict, :class:`FillOpacityDatum`, :class:`FillOpacityValue`
Fill opacity of the marks.
**Default value:** If undefined, the default opacity depends on `mark config
<https://vega.github.io/vega-lite/docs/config.html#mark-config>`__'s
``fillOpacity`` property.
href : str, :class:`Href`, Dict, :class:`HrefValue`
A URL to load upon mouse click.
key : str, :class:`Key`, Dict
A data field to use as a unique key for data binding. When a visualization's
data is updated, the key value will be used to match data elements to
existing mark instances. Use a key channel to enable object constancy for
transitions over dynamic data.
latitude : str, :class:`Latitude`, Dict, :class:`LatitudeDatum`
Latitude position of geographically projected marks.
latitude2 : str, :class:`Latitude2`, Dict, :class:`Latitude2Datum`, :class:`Latitude2Value`
Latitude-2 position for geographically projected ranged ``"area"``,
``"bar"``, ``"rect"``, and ``"rule"``.
longitude : str, :class:`Longitude`, Dict, :class:`LongitudeDatum`
Longitude position of geographically projected marks.
longitude2 : str, :class:`Longitude2`, Dict, :class:`Longitude2Datum`, :class:`Longitude2Value`
Longitude-2 position for geographically projected ranged ``"area"``,
``"bar"``, ``"rect"``, and ``"rule"``.
opacity : str, :class:`Opacity`, Dict, :class:`OpacityDatum`, :class:`OpacityValue`
Opacity of the marks.
**Default value:** If undefined, the default opacity depends on `mark config
<https://vega.github.io/vega-lite/docs/config.html#mark-config>`__'s
``opacity`` property.
order : str, :class:`Order`, Dict, List, :class:`OrderValue`
Order of the marks.
* For stacked marks, this ``order`` channel encodes `stack order
<https://vega.github.io/vega-lite/docs/stack.html#order>`__.
* For line and trail marks, this ``order`` channel encodes order of data
points in the lines. This can be useful for creating `a connected
scatterplot
<https://vega.github.io/vega-lite/examples/connected_scatterplot.html>`__.
Setting ``order`` to ``{"value": null}`` makes the line marks use the
original order in the data sources.
* Otherwise, this ``order`` channel encodes layer order of the marks.
**Note**: In aggregate plots, ``order`` field should be aggregated to avoid
creating additional aggregation grouping.
radius : str, :class:`Radius`, Dict, :class:`RadiusDatum`, :class:`RadiusValue`
The outer radius in pixels of arc marks.
radius2 : str, :class:`Radius2`, Dict, :class:`Radius2Datum`, :class:`Radius2Value`
The inner radius in pixels of arc marks.
row : str, :class:`Row`, Dict
A field definition for the vertical facet of trellis plots.
shape : str, :class:`Shape`, Dict, :class:`ShapeDatum`, :class:`ShapeValue`
Shape of the mark.
1. For ``point`` marks the supported values include: - plotting shapes:
``"circle"``, ``"square"``, ``"cross"``, ``"diamond"``, ``"triangle-up"``,
``"triangle-down"``, ``"triangle-right"``, or ``"triangle-left"``. - the
line symbol ``"stroke"`` - centered directional shapes ``"arrow"``,
``"wedge"``, or ``"triangle"`` - a custom `SVG path string
<https://developer.mozilla.org/en-US/docs/Web/SVG/Tutorial/Paths>`__ (For
correct sizing, custom shape paths should be defined within a square
bounding box with coordinates ranging from -1 to 1 along both the x and y
dimensions.)
2. For ``geoshape`` marks it should be a field definition of the geojson
data
**Default value:** If undefined, the default shape depends on `mark config
<https://vega.github.io/vega-lite/docs/config.html#point-config>`__'s
``shape`` property. (``"circle"`` if unset.)
size : str, :class:`Size`, Dict, :class:`SizeDatum`, :class:`SizeValue`
Size of the mark.
* For ``"point"``, ``"square"`` and ``"circle"``, - the symbol size, or
pixel area of the mark.
* For ``"bar"`` and ``"tick"`` - the bar and tick's size.
* For ``"text"`` - the text's font size.
* Size is unsupported for ``"line"``, ``"area"``, and ``"rect"``. (Use
``"trail"`` instead of line with varying size)
stroke : str, :class:`Stroke`, Dict, :class:`StrokeDatum`, :class:`StrokeValue`
Stroke color of the marks. **Default value:** If undefined, the default
color depends on `mark config
<https://vega.github.io/vega-lite/docs/config.html#mark-config>`__'s
``color`` property.
*Note:* The ``stroke`` encoding has higher precedence than ``color``, thus
may override the ``color`` encoding if conflicting encodings are specified.
strokeDash : str, :class:`StrokeDash`, Dict, :class:`StrokeDashDatum`, :class:`StrokeDashValue`
Stroke dash of the marks.
**Default value:** ``[1,0]`` (No dash).
strokeOpacity : str, :class:`StrokeOpacity`, Dict, :class:`StrokeOpacityDatum`, :class:`StrokeOpacityValue`
Stroke opacity of the marks.
**Default value:** If undefined, the default opacity depends on `mark config
<https://vega.github.io/vega-lite/docs/config.html#mark-config>`__'s
``strokeOpacity`` property.
strokeWidth : str, :class:`StrokeWidth`, Dict, :class:`StrokeWidthDatum`, :class:`StrokeWidthValue`
Stroke width of the marks.
**Default value:** If undefined, the default stroke width depends on `mark
config <https://vega.github.io/vega-lite/docs/config.html#mark-config>`__'s
``strokeWidth`` property.
text : str, :class:`Text`, Dict, :class:`TextDatum`, :class:`TextValue`
Text of the ``text`` mark.
theta : str, :class:`Theta`, Dict, :class:`ThetaDatum`, :class:`ThetaValue`
* For arc marks, the arc length in radians if theta2 is not specified,
otherwise the start arc angle. (A value of 0 indicates up or “north”,
increasing values proceed clockwise.)
* For text marks, polar coordinate angle in radians.
theta2 : str, :class:`Theta2`, Dict, :class:`Theta2Datum`, :class:`Theta2Value`
The end angle of arc marks in radians. A value of 0 indicates up or “north”,
increasing values proceed clockwise.
time : str, :class:`Time`, Dict
tooltip : str, :class:`Tooltip`, Dict, List, :class:`TooltipValue`
The tooltip text to show upon mouse hover. Specifying ``tooltip`` encoding
overrides `the tooltip property in the mark definition
<https://vega.github.io/vega-lite/docs/mark.html#mark-def>`__.
See the `tooltip <https://vega.github.io/vega-lite/docs/tooltip.html>`__
documentation for a detailed discussion about tooltip in Vega-Lite.
url : str, :class:`Url`, Dict, :class:`UrlValue`
The URL of an image mark.
x : str, :class:`X`, Dict, :class:`XDatum`, :class:`XValue`
X coordinates of the marks, or width of horizontal ``"bar"`` and ``"area"``
without specified ``x2`` or ``width``.
The ``value`` of this channel can be a number or a string ``"width"`` for
the width of the plot.
x2 : str, :class:`X2`, Dict, :class:`X2Datum`, :class:`X2Value`
X2 coordinates for ranged ``"area"``, ``"bar"``, ``"rect"``, and
``"rule"``.
The ``value`` of this channel can be a number or a string ``"width"`` for
the width of the plot.
xError : str, :class:`XError`, Dict, :class:`XErrorValue`
Error value of x coordinates for error specified ``"errorbar"`` and
``"errorband"``.
xError2 : str, :class:`XError2`, Dict, :class:`XError2Value`
Secondary error value of x coordinates for error specified ``"errorbar"``
and ``"errorband"``.
xOffset : str, :class:`XOffset`, Dict, :class:`XOffsetDatum`, :class:`XOffsetValue`
Offset of x-position of the marks
y : str, :class:`Y`, Dict, :class:`YDatum`, :class:`YValue`
Y coordinates of the marks, or height of vertical ``"bar"`` and ``"area"``
without specified ``y2`` or ``height``.
The ``value`` of this channel can be a number or a string ``"height"`` for
the height of the plot.
y2 : str, :class:`Y2`, Dict, :class:`Y2Datum`, :class:`Y2Value`
Y2 coordinates for ranged ``"area"``, ``"bar"``, ``"rect"``, and
``"rule"``.
The ``value`` of this channel can be a number or a string ``"height"`` for
the height of the plot.
yError : str, :class:`YError`, Dict, :class:`YErrorValue`
Error value of y coordinates for error specified ``"errorbar"`` and
``"errorband"``.
yError2 : str, :class:`YError2`, Dict, :class:`YError2Value`
Secondary error value of y coordinates for error specified ``"errorbar"``
and ``"errorband"``.
yOffset : str, :class:`YOffset`, Dict, :class:`YOffsetDatum`, :class:`YOffsetValue`
Offset of y-position of the marks
"""
kwargs = {
"angle": angle,
"color": color,
"column": column,
"description": description,
"detail": detail,
"facet": facet,
"fill": fill,
"fillOpacity": fillOpacity,
"href": href,
"key": key,
"latitude": latitude,
"latitude2": latitude2,
"longitude": longitude,
"longitude2": longitude2,
"opacity": opacity,
"order": order,
"radius": radius,
"radius2": radius2,
"row": row,
"shape": shape,
"size": size,
"stroke": stroke,
"strokeDash": strokeDash,
"strokeOpacity": strokeOpacity,
"strokeWidth": strokeWidth,
"text": text,
"theta": theta,
"theta2": theta2,
"time": time,
"tooltip": tooltip,
"url": url,
"x": x,
"x2": x2,
"xError": xError,
"xError2": xError2,
"xOffset": xOffset,
"y": y,
"y2": y2,
"yError": yError,
"yError2": yError2,
"yOffset": yOffset,
}
if args:
kwargs = {k: v for k, v in kwargs.items() if v is not Undefined}
# Convert args to kwargs based on their types.
kwargs = _infer_encoding_types(args, kwargs)
# get a copy of the dict representation of the previous encoding
# ignore type as copy method comes from SchemaBase
copy = self.copy(deep=["encoding"]) # type: ignore[attr-defined]
encoding = copy._get("encoding", {})
if isinstance(encoding, core.VegaLiteSchema):
encoding = {k: v for k, v in encoding._kwds.items() if v is not Undefined}
# update with the new encodings, and apply them to the copy
encoding.update(kwargs)
copy.encoding = core.FacetedEncoding(**encoding)
return copy
|
_EncodingMixin
|
python
|
ipython__ipython
|
tests/test_io.py
|
{
"start": 1066,
"end": 1409
}
|
class ____(unittest.TestCase):
def test_capture_output(self):
"""capture_output() context works"""
with capture_output() as io:
print("hi, stdout")
print("hi, stderr", file=sys.stderr)
self.assertEqual(io.stdout, "hi, stdout\n")
self.assertEqual(io.stderr, "hi, stderr\n")
|
TestIOStream
|
python
|
jazzband__django-oauth-toolkit
|
oauth2_provider/management/commands/createapplication.py
|
{
"start": 203,
"end": 4372
}
|
class ____(BaseCommand):
help = "Shortcut to create a new application in a programmatic way"
def add_arguments(self, parser):
parser.add_argument(
"client_type",
type=str,
help="The client type, one of: %s" % ", ".join([ctype[0] for ctype in Application.CLIENT_TYPES]),
)
parser.add_argument(
"authorization_grant_type",
type=str,
help="The type of authorization grant to be used, one of: %s"
% ", ".join([gtype[0] for gtype in Application.GRANT_TYPES]),
)
parser.add_argument(
"--client-id",
type=str,
help="The ID of the new application",
)
parser.add_argument(
"--user",
type=str,
help="The user the application belongs to",
)
parser.add_argument(
"--redirect-uris",
type=str,
help="The redirect URIs, this must be a space separated string e.g 'URI1 URI2'",
)
parser.add_argument(
"--post-logout-redirect-uris",
type=str,
help="The post logout redirect URIs, this must be a space separated string e.g 'URI1 URI2'",
default="",
)
parser.add_argument(
"--client-secret",
type=str,
help="The secret for this application",
)
parser.add_argument(
"--no-hash-client-secret",
dest="hash_client_secret",
action="store_false",
help="Don't hash the client secret",
)
parser.set_defaults(hash_client_secret=True)
parser.add_argument(
"--name",
type=str,
help="The name this application",
)
parser.add_argument(
"--skip-authorization",
action="store_true",
help="If set, completely bypass the authorization form, even on the first use of the application",
)
parser.add_argument(
"--algorithm",
type=str,
help="The OIDC token signing algorithm for this application, one of: %s"
% ", ".join([atype[0] for atype in Application.ALGORITHM_TYPES if atype[0]]),
)
def handle(self, *args, **options):
# Extract all fields related to the application, this will work now and in the future
# and also with custom application models.
application_fields = [field.name for field in Application._meta.fields]
application_data = {}
for key, value in options.items():
# Data in options must be cleaned because there are unneeded key-value like
# verbosity and others. Also do not pass any None to the Application
# instance so default values will be generated for those fields
if key in application_fields and (isinstance(value, bool) or value):
if key == "user":
application_data.update({"user_id": value})
else:
application_data.update({key: value})
new_application = Application(**application_data)
try:
new_application.full_clean()
except ValidationError as exc:
errors = "\n ".join(
["- " + err_key + ": " + str(err_value) for err_key, err_value in exc.message_dict.items()]
)
self.stdout.write(self.style.ERROR("Please correct the following errors:\n %s" % errors))
else:
cleartext_secret = new_application.client_secret
new_application.save()
# Display the newly-created client_name or id.
client_name_or_id = application_data.get("name", new_application.client_id)
self.stdout.write(
self.style.SUCCESS("New application %s created successfully." % client_name_or_id)
)
# Print out the cleartext client_secret if it was autogenerated.
if "client_secret" not in application_data:
self.stdout.write(self.style.SUCCESS("client_secret: %s" % cleartext_secret))
|
Command
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-dbt/prefect_dbt/cli/configs/base.py
|
{
"start": 4028,
"end": 5084
}
|
class ____(DbtConfigs, abc.ABC):
type: str = Field(default=..., description="The name of the database warehouse.")
schema_: str = Field(
alias="schema",
description=(
"The schema that dbt will build objects into; "
"in BigQuery, a schema is actually a dataset."
),
)
threads: int = Field(
default=4,
description=(
"The number of threads representing the max number "
"of paths through the graph dbt may work on at once."
),
)
@model_validator(mode="before")
@classmethod
def handle_target_configs(cls, v: Any) -> Any:
"""Handle target configs field aliasing during validation"""
if isinstance(v, dict):
if "schema_" in v:
v["schema"] = v.pop("schema_")
# Handle nested blocks
for value in v.values():
if isinstance(value, dict) and "schema_" in value:
value["schema"] = value.pop("schema_")
return v
|
BaseTargetConfigs
|
python
|
has2k1__plotnine
|
plotnine/themes/themeable.py
|
{
"start": 31008,
"end": 31728
}
|
class ____(themeable):
"""
y-axis line
Parameters
----------
theme_element : element_line
"""
position = "left"
_omit = ["solid_capstyle"]
def apply_ax(self, ax: Axes):
super().apply_ax(ax)
properties = self.properties
# MPL has a default zorder of 2.5 for spines
# so layers 3+ would be drawn on top of the spines
if "zorder" not in properties:
properties["zorder"] = 10000
ax.spines["right"].set_visible(False)
ax.spines["left"].set(**properties)
def blank_ax(self, ax: Axes):
super().blank_ax(ax)
ax.spines["left"].set_visible(False)
ax.spines["right"].set_visible(False)
|
axis_line_y
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 343837,
"end": 361910
}
|
class ____(VegaLiteSchema):
r"""
FacetEncodingFieldDef schema wrapper.
Parameters
----------
shorthand : str, dict, Sequence[str], :class:`RepeatRef`
shorthand for field, aggregate, and type
aggregate : dict, :class:`Aggregate`, :class:`ArgmaxDef`, :class:`ArgminDef`, :class:`NonArgAggregateOp`, Literal['average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb']
Aggregation function for the field (e.g., ``"mean"``, ``"sum"``, ``"median"``,
``"min"``, ``"max"``, ``"count"``).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
align : dict, :class:`LayoutAlign`, :class:`RowColLayoutAlign`, Literal['all', 'each', 'none']
The alignment to apply to grid rows and columns. The supported string values are
``"all"``, ``"each"``, and ``"none"``.
* For ``"none"``, a flow layout will be used, in which adjacent subviews are simply
placed one after the other.
* For ``"each"``, subviews will be aligned into a clean grid structure, but each row
or column may be of variable size.
* For ``"all"``, subviews will be aligned and each row or column will be sized
identically based on the maximum observed size. String values for this property
will be applied to both grid rows and columns.
Alternatively, an object value of the form ``{"row": string, "column": string}`` can
be used to supply different alignments for rows and columns.
**Default value:** ``"all"``.
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
bin : bool, dict, :class:`BinParams`, None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__, or indicating
that the data for ``x`` or ``y`` channel are binned before they are imported into
Vega-Lite (``"binned"``).
* If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__ will be
applied.
* If ``"binned"``, this indicates that the data for the ``x`` (or ``y``) channel are
already binned. You can map the bin-start field to ``x`` (or ``y``) and the
bin-end field to ``x2`` (or ``y2``). The scale and axis will be formatted similar
to binning in Vega-Lite. To adjust the axis ticks based on the bin step, you can
also set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
bounds : Literal['full', 'flush']
The bounds calculation method to use for determining the extent of a sub-plot. One
of ``full`` (the default) or ``flush``.
* If set to ``full``, the entire calculated bounds (including axes, title, and
legend) will be used.
* If set to ``flush``, only the specified width and height values for the sub-view
will be used. The ``flush`` setting can be useful when attempting to place
sub-plots without axes or legends into a uniform grid structure.
**Default value:** ``"full"``
center : bool, dict, :class:`RowColboolean`
Boolean flag indicating if subviews should be centered relative to their respective
rows or columns.
An object value of the form ``{"row": boolean, "column": boolean}`` can be used to
supply different centering values for rows and columns.
**Default value:** ``false``
columns : float
The number of columns to include in the view composition layout.
**Default value**: ``undefined`` -- An infinite number of columns (a single row)
will be assumed. This is equivalent to ``hconcat`` (for ``concat``) and to using the
``column`` channel (for ``facet`` and ``repeat``).
**Note**:
1) This property is only for:
* the general (wrappable) ``concat`` operator (not ``hconcat``/``vconcat``)
* the ``facet`` and ``repeat`` operator with one field/repetition definition
(without row/column nesting)
2) Setting the ``columns`` to ``1`` is equivalent to ``vconcat`` (for ``concat``)
and to using the ``row`` channel (for ``facet`` and ``repeat``).
field : str, dict, :class:`Field`, :class:`FieldName`, :class:`RepeatRef`
**Required.** A string defining the name of the field from which to pull a data
value or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:** 1) Dots (``.``) and brackets (``[`` and ``]``) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"``). If
field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"``). See more details
about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__. 2) ``field`` is not required
if ``aggregate`` is ``count``.
header : dict, :class:`Header`, None
An object defining properties of a facet's header.
sort : dict, Sequence[str], Sequence[bool], Sequence[float], :class:`SortArray`, :class:`SortOrder`, :class:`EncodingSortField`, Sequence[dict, :class:`DateTime`], Literal['ascending', 'descending'], None
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
JavaScript.
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects
<https://vega.github.io/vega-lite/docs/datetime.html>`__. In addition, for time
units ``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"``).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` is not supported for ``row`` and ``column``.
spacing : dict, float, :class:`RowColnumber`
The spacing in pixels between sub-views of the composition operator. An object of
the form ``{"row": number, "column": number}`` can be used to set different spacing
values for rows and columns.
**Default value**: Depends on ``"spacing"`` property of `the view composition
configuration <https://vega.github.io/vega-lite/docs/config.html#view-config>`__
(``20`` by default)
timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds']
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours``) for a temporal
field. or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`, Literal['quantitative', 'ordinal', 'temporal', 'nominal']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_schema = {"$ref": "#/definitions/FacetEncodingFieldDef"}
def __init__(
self,
shorthand: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
align: Optional[SchemaBase | Map | LayoutAlign_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Map | None] = Undefined,
bounds: Optional[Literal["full", "flush"]] = Undefined,
center: Optional[bool | SchemaBase | Map] = Undefined,
columns: Optional[float] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
header: Optional[SchemaBase | Map | None] = Undefined,
sort: Optional[
SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
| SortOrder_T
| None
] = Undefined,
spacing: Optional[float | SchemaBase | Map] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
**kwds,
):
super().__init__(
shorthand=shorthand,
aggregate=aggregate,
align=align,
bandPosition=bandPosition,
bin=bin,
bounds=bounds,
center=center,
columns=columns,
field=field,
header=header,
sort=sort,
spacing=spacing,
timeUnit=timeUnit,
title=title,
type=type,
**kwds,
)
|
FacetEncodingFieldDef
|
python
|
run-llama__llama_index
|
llama-index-integrations/llms/llama-index-llms-vertex/tests/test_tool_required.py
|
{
"start": 681,
"end": 9272
}
|
class ____:
"""Test suite for Vertex AI tool_required functionality."""
@patch("llama_index.llms.vertex.gemini_utils.create_gemini_client")
def test_to_function_calling_config_tool_required_true(self, mock_create_client):
"""Test that _to_function_calling_config correctly sets mode to ANY when tool_required=True."""
mock_client = Mock()
mock_create_client.return_value = mock_client
llm = Vertex(model="gemini-pro", project="test-project")
config = llm._to_function_calling_config(tool_required=True)
# Check config mode through string representation since direct attribute access is problematic
config_str = str(config)
assert isinstance(config, ToolConfig)
assert "mode: ANY" in config_str
@patch("llama_index.llms.vertex.gemini_utils.create_gemini_client")
def test_to_function_calling_config_tool_required_false(self, mock_create_client):
"""Test that _to_function_calling_config correctly sets mode to AUTO when tool_required=False."""
mock_client = Mock()
mock_create_client.return_value = mock_client
llm = Vertex(model="gemini-pro", project="test-project")
config = llm._to_function_calling_config(tool_required=False)
# Check config mode through string representation
config_str = str(config)
assert isinstance(config, ToolConfig)
assert "mode: AUTO" in config_str
@patch("llama_index.llms.vertex.gemini_utils.create_gemini_client")
def test_prepare_chat_with_tools_tool_required_gemini(self, mock_create_client):
"""Test that tool_required is correctly passed to tool_config for Gemini models."""
mock_client = Mock()
mock_create_client.return_value = mock_client
llm = Vertex(model="gemini-pro", project="test-project")
# Test with tool_required=True
result = llm._prepare_chat_with_tools(tools=[search_tool], tool_required=True)
# Verify tool_config mode using string representation
tool_config_str = str(result["tool_config"])
assert "tool_config" in result
assert isinstance(result["tool_config"], ToolConfig)
assert "mode: ANY" in tool_config_str
assert len(result["tools"]) == 1
assert result["tools"][0]["name"] == "search_tool"
@patch("llama_index.llms.vertex.gemini_utils.create_gemini_client")
def test_prepare_chat_with_tools_tool_not_required_gemini(self, mock_create_client):
"""Test that tool_required=False correctly sets mode to AUTO for Gemini models."""
mock_client = Mock()
mock_create_client.return_value = mock_client
llm = Vertex(model="gemini-pro", project="test-project")
# Test with tool_required=False
result = llm._prepare_chat_with_tools(tools=[search_tool], tool_required=False)
# Verify tool_config mode using string representation
tool_config_str = str(result["tool_config"])
assert "tool_config" in result
assert isinstance(result["tool_config"], ToolConfig)
assert "mode: AUTO" in tool_config_str
assert len(result["tools"]) == 1
assert result["tools"][0]["name"] == "search_tool"
@patch("llama_index.llms.vertex.gemini_utils.create_gemini_client")
def test_prepare_chat_with_tools_default_behavior_gemini(self, mock_create_client):
"""Test default behavior when tool_required is not specified for Gemini models."""
mock_client = Mock()
mock_create_client.return_value = mock_client
llm = Vertex(model="gemini-pro", project="test-project")
# Test without specifying tool_required (should default to False)
result = llm._prepare_chat_with_tools(tools=[search_tool])
# Verify tool_config mode using string representation
tool_config_str = str(result["tool_config"])
assert "tool_config" in result
assert isinstance(result["tool_config"], ToolConfig)
# Should default to AUTO when tool_required=False (default)
assert "mode: AUTO" in tool_config_str
assert len(result["tools"]) == 1
assert result["tools"][0]["name"] == "search_tool"
@patch("llama_index.llms.vertex.gemini_utils.create_gemini_client")
def test_prepare_chat_with_tools_multiple_tools_gemini(self, mock_create_client):
"""Test tool_required with multiple tools for Gemini models."""
mock_client = Mock()
mock_create_client.return_value = mock_client
llm = Vertex(model="gemini-pro", project="test-project")
# Test with tool_required=True and multiple tools
result = llm._prepare_chat_with_tools(
tools=[search_tool, calculator_tool], tool_required=True
)
# Verify tool_config mode using string representation
tool_config_str = str(result["tool_config"])
assert "tool_config" in result
assert isinstance(result["tool_config"], ToolConfig)
assert "mode: ANY" in tool_config_str
assert len(result["tools"]) == 2
tool_names = [tool["name"] for tool in result["tools"]]
assert "search_tool" in tool_names
assert "calculator" in tool_names
@patch("vertexai.language_models.TextGenerationModel.from_pretrained")
@patch("vertexai.language_models.ChatModel.from_pretrained")
def test_prepare_chat_with_tools_non_gemini_no_tool_config(
self, mock_chat_from_pretrained, mock_text_from_pretrained
):
"""Test that non-Gemini models don't include tool_config regardless of tool_required."""
mock_chat_client = Mock()
mock_text_client = Mock()
mock_chat_from_pretrained.return_value = mock_chat_client
mock_text_from_pretrained.return_value = mock_text_client
# Use a non-Gemini model name
llm = Vertex(model="text-bison", project="test-project")
# Test with tool_required=True for non-Gemini model
result = llm._prepare_chat_with_tools(tools=[search_tool], tool_required=True)
# Non-Gemini models should not have tool_config
assert "tool_config" not in result
assert len(result["tools"]) == 1
assert result["tools"][0]["name"] == "search_tool"
# Test with tool_required=False for non-Gemini model
result = llm._prepare_chat_with_tools(tools=[search_tool], tool_required=False)
# Non-Gemini models should not have tool_config
assert "tool_config" not in result
assert len(result["tools"]) == 1
assert result["tools"][0]["name"] == "search_tool"
@patch("llama_index.llms.vertex.gemini_utils.create_gemini_client")
def test_prepare_chat_with_tools_no_tools_gemini(self, mock_create_client):
"""Test tool behavior when no tools are provided for Gemini models."""
mock_client = Mock()
mock_create_client.return_value = mock_client
llm = Vertex(model="gemini-pro", project="test-project")
# Test with tool_required=True but no tools
result = llm._prepare_chat_with_tools(tools=[], tool_required=True)
# Verify tool_config mode using string representation
tool_config_str = str(result["tool_config"])
# The current implementation still includes tool_config even with no tools if tool_required=True
assert "tool_config" in result
assert isinstance(result["tool_config"], ToolConfig)
assert "mode: ANY" in tool_config_str
assert result["tools"] is None
@patch("llama_index.llms.vertex.gemini_utils.create_gemini_client")
def test_prepare_chat_with_tools_with_kwargs_gemini(self, mock_create_client):
"""Test that additional kwargs are preserved when using tool_required for Gemini models."""
mock_client = Mock()
mock_create_client.return_value = mock_client
llm = Vertex(model="gemini-pro", project="test-project")
# Test with tool_required=True and additional kwargs
result = llm._prepare_chat_with_tools(
tools=[search_tool], tool_required=True, temperature=0.7, max_tokens=1000
)
# Verify tool_config mode using string representation
tool_config_str = str(result["tool_config"])
assert "tool_config" in result
assert isinstance(result["tool_config"], ToolConfig)
assert "mode: ANY" in tool_config_str
assert len(result["tools"]) == 1
assert result["tools"][0]["name"] == "search_tool"
assert result["temperature"] == 0.7
assert result["max_tokens"] == 1000
|
TestVertexToolRequired
|
python
|
ethereum__web3.py
|
web3/types.py
|
{
"start": 7007,
"end": 7518
}
|
class ____(TypedDict, total=False):
error: RPCError
id: RPCId
jsonrpc: Literal["2.0"]
result: Any
# eth_subscribe
method: Literal["eth_subscription"]
params: EthSubscriptionParams
EthSubscriptionResult = Union[
BlockData, # newHeads
TxData, # newPendingTransactions, full_transactions=True
HexBytes, # newPendingTransactions, full_transactions=False
LogReceipt, # logs
SyncProgress, # syncing
GethSyncingSubscriptionResult, # geth syncing
]
|
RPCResponse
|
python
|
huggingface__transformers
|
src/transformers/models/qwen3_vl_moe/modular_qwen3_vl_moe.py
|
{
"start": 18114,
"end": 18236
}
|
class ____(Qwen3VLCausalLMOutputWithPast):
aux_loss: Optional[torch.FloatTensor] = None
|
Qwen3VLMoeCausalLMOutputWithPast
|
python
|
etianen__django-reversion
|
tests/test_app/tests/test_api.py
|
{
"start": 7964,
"end": 8397
}
|
class ____(TestModelMixin, TestBase):
def testSetComment(self):
with reversion.create_revision():
reversion.set_comment("comment v1")
obj = TestModel.objects.create()
self.assertSingleRevision((obj,), comment="comment v1")
def testSetCommentNoBlock(self):
with self.assertRaises(reversion.RevisionManagementError):
reversion.set_comment("comment v1")
|
SetCommentTest
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_dlp.py
|
{
"start": 4559,
"end": 5443
}
|
class ____:
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_create_dlp_job(self, mock_hook):
mock_hook.return_value.create_dlp_job.return_value = DlpJob(
name=DLP_JOB_PATH, state=DlpJob.JobState.PENDING
)
operator = CloudDLPCreateDLPJobOperator(project_id=PROJECT_ID, task_id="id")
operator.execute(context=mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.create_dlp_job.assert_called_once_with(
project_id=PROJECT_ID,
inspect_job=None,
risk_job=None,
job_id=None,
retry=DEFAULT,
timeout=None,
metadata=(),
wait_until_finished=True,
)
|
TestCloudDLPCreateDLPJobOperator
|
python
|
PrefectHQ__prefect
|
src/prefect/server/schemas/sorting.py
|
{
"start": 4443,
"end": 5166
}
|
class ____(AutoEnum):
"""Defines deployment sorting options."""
CREATED_DESC = AutoEnum.auto()
UPDATED_DESC = AutoEnum.auto()
NAME_ASC = AutoEnum.auto()
NAME_DESC = AutoEnum.auto()
@db_injector
def as_sql_sort(self, db: "PrefectDBInterface") -> Iterable[sa.ColumnElement[Any]]:
"""Return an expression used to sort task runs"""
sort_mapping: dict[str, Iterable[sa.ColumnElement[Any]]] = {
"CREATED_DESC": [db.Deployment.created.desc()],
"UPDATED_DESC": [db.Deployment.updated.desc()],
"NAME_ASC": [db.Deployment.name.asc()],
"NAME_DESC": [db.Deployment.name.desc()],
}
return sort_mapping[self.value]
|
DeploymentSort
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/utils/external_token_supplier.py
|
{
"start": 4006,
"end": 7129
}
|
class ____(CacheTokenSupplier):
"""
Class that retrieves an OIDC token from an external IdP using OAuth2.0 Client Credentials Grant flow.
This class implements the ``SubjectTokenSupplier`` interface class used by ``google.auth.identity_pool.Credentials``
:params oidc_issuer_url: URL of the IdP that performs OAuth2.0 Client Credentials Grant flow and returns an OIDC token.
:params client_id: Client ID of the application requesting the token
:params client_secret: Client secret of the application requesting the token
:params extra_params_kwargs: Extra parameters to be passed in the payload of the POST request to the `oidc_issuer_url`
See also:
https://googleapis.dev/python/google-auth/latest/reference/google.auth.identity_pool.html#google.auth.identity_pool.SubjectTokenSupplier
"""
def __init__(
self,
oidc_issuer_url: str,
client_id: str,
client_secret: str,
**extra_params_kwargs: Any,
) -> None:
super().__init__()
self.oidc_issuer_url = oidc_issuer_url
self.client_id = client_id
self.client_secret = client_secret
self.extra_params_kwargs = extra_params_kwargs
@cache_token_decorator
def get_subject_token(self, context: SupplierContext, request: Request) -> tuple[str, int]:
"""Perform Client Credentials Grant flow with IdP and retrieves an OIDC token and expiration time."""
self.log.info("Requesting new OIDC token from external IdP.")
try:
response = requests.post(
self.oidc_issuer_url,
data={
"grant_type": "client_credentials",
"client_id": self.client_id,
"client_secret": self.client_secret,
**self.extra_params_kwargs,
},
)
response.raise_for_status()
except requests.HTTPError as e:
raise RefreshError(str(e))
except requests.ConnectionError as e:
raise RefreshError(str(e))
try:
response_dict = response.json()
except requests.JSONDecodeError:
raise RefreshError(f"Didn't get a json response from {self.oidc_issuer_url}")
# These fields are required
if {"access_token", "expires_in"} - set(response_dict.keys()):
# TODO more information about the error can be provided in the exception by inspecting the response
raise RefreshError(f"No access token returned from {self.oidc_issuer_url}")
return response_dict["access_token"], response_dict["expires_in"]
def get_subject_key(self) -> str:
"""
Create a cache key using the OIDC issuer URL, client ID, client secret and additional parameters.
Instances with the same credentials will share tokens.
"""
cache_key = (
self.oidc_issuer_url
+ self.client_id
+ self.client_secret
+ ",".join(sorted(self.extra_params_kwargs))
)
return cache_key
|
ClientCredentialsGrantFlowTokenSupplier
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 345424,
"end": 345775
}
|
class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("platform", "url")
platform = sgqlc.types.Field(
sgqlc.types.non_null(FundingPlatform), graphql_name="platform"
)
url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
|
FundingLink
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-bing-ads/unit_tests/integrations/test_hourly_reports.py
|
{
"start": 15350,
"end": 22663
}
|
class ____(HourlyReportsTestWithStateChangesAfterMigration):
stream_name = "keyword_performance_report_hourly"
report_file = "keyword_performance_report_hourly"
records_number = 24
state_file = "hourly_reports_state"
incremental_report_file = "keyword_performance_report_hourly_incremental"
report_file_with_records_further_start_date = "keyword_performance_report_hourly_with_record_further_config_start_date"
state_file_legacy = "hourly_reports_state_legacy"
state_file_after_migration = "hourly_reports_state_after_migration"
state_file_after_migration_with_cursor_further_config_start_date = (
"hourly_reports_state_after_migration_with_cursor_further_config_start_date"
)
incremental_report_file_with_records_further_cursor = "keyword_performance_report_hourly_incremental_with_records_further_cursor"
def mock_report_apis(self):
self.mock_user_query_api(response_template="user_query")
self.mock_accounts_search_api(
response_template="accounts_search_for_report",
body=b'{"PageInfo": {"Index": 0, "Size": 1000}, "Predicates": [{"Field": "UserId", "Operator": "Equals", "Value": "123456789"}], "ReturnAdditionalFields": "TaxCertificate,AccountMode"}',
)
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "KeywordPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "KeywordPerformanceReportRequest", "Aggregation": "Hourly", "Columns": ["AccountId", "CampaignId", "AdGroupId", "KeywordId", "Keyword", "AdId", "TimePeriod", "CurrencyCode", "DeliveredMatchType", "AdDistribution", "DeviceType", "Language", "Network", "DeviceOS", "TopVsOther", "BidMatchType", "AccountName", "CampaignName", "AdGroupName", "KeywordStatus", "Impressions", "Clicks", "Ctr", "CurrentMaxCpc", "Spend", "CostPerConversion", "QualityScore", "ExpectedCtr", "AdRelevance", "LandingPageExperience", "QualityImpact", "Assists", "ReturnOnAdSpend", "CostPerAssist", "CustomParameters", "FinalAppUrl", "Mainline1Bid", "MainlineBid", "FirstPageBid", "FinalUrlSuffix", "ViewThroughConversions", "ViewThroughConversionsQualified", "AllCostPerConversion", "AllReturnOnAdSpend", "Conversions", "ConversionRate", "ConversionsQualified", "AverageCpc", "AveragePosition", "AverageCpm", "AllConversions", "AllConversionRate", "AllRevenue", "AllRevenuePerConversion", "Revenue", "RevenuePerConversion", "RevenuePerAssist", "CampaignStatus", "TopImpressionRatePercent", "AdGroupStatus", "TrackingTemplate", "BidStrategyType", "AccountStatus", "FinalUrl", "AdType", "KeywordLabels", "FinalMobileUrl", "Goal", "GoalType", "AbsoluteTopImpressionRatePercent", "BaseCampaignId", "AccountNumber", "DestinationUrl"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2024}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
# # for second read
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "KeywordPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "KeywordPerformanceReportRequest", "Aggregation": "Hourly", "Columns": ["AccountId", "CampaignId", "AdGroupId", "KeywordId", "Keyword", "AdId", "TimePeriod", "CurrencyCode", "DeliveredMatchType", "AdDistribution", "DeviceType", "Language", "Network", "DeviceOS", "TopVsOther", "BidMatchType", "AccountName", "CampaignName", "AdGroupName", "KeywordStatus", "Impressions", "Clicks", "Ctr", "CurrentMaxCpc", "Spend", "CostPerConversion", "QualityScore", "ExpectedCtr", "AdRelevance", "LandingPageExperience", "QualityImpact", "Assists", "ReturnOnAdSpend", "CostPerAssist", "CustomParameters", "FinalAppUrl", "Mainline1Bid", "MainlineBid", "FirstPageBid", "FinalUrlSuffix", "ViewThroughConversions", "ViewThroughConversionsQualified", "AllCostPerConversion", "AllReturnOnAdSpend", "Conversions", "ConversionRate", "ConversionsQualified", "AverageCpc", "AveragePosition", "AverageCpm", "AllConversions", "AllConversionRate", "AllRevenue", "AllRevenuePerConversion", "Revenue", "RevenuePerConversion", "RevenuePerAssist", "CampaignStatus", "TopImpressionRatePercent", "AdGroupStatus", "TrackingTemplate", "BidStrategyType", "AccountStatus", "FinalUrl", "AdType", "KeywordLabels", "FinalMobileUrl", "Goal", "GoalType", "AbsoluteTopImpressionRatePercent", "BaseCampaignId", "AccountNumber", "DestinationUrl"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 6, "Month": 5, "Year": 2024}, "CustomDateRangeEnd": {"Day": 8, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
# # for no config start date test
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "KeywordPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "KeywordPerformanceReportRequest", "Aggregation": "Hourly", "Columns": ["AccountId", "CampaignId", "AdGroupId", "KeywordId", "Keyword", "AdId", "TimePeriod", "CurrencyCode", "DeliveredMatchType", "AdDistribution", "DeviceType", "Language", "Network", "DeviceOS", "TopVsOther", "BidMatchType", "AccountName", "CampaignName", "AdGroupName", "KeywordStatus", "Impressions", "Clicks", "Ctr", "CurrentMaxCpc", "Spend", "CostPerConversion", "QualityScore", "ExpectedCtr", "AdRelevance", "LandingPageExperience", "QualityImpact", "Assists", "ReturnOnAdSpend", "CostPerAssist", "CustomParameters", "FinalAppUrl", "Mainline1Bid", "MainlineBid", "FirstPageBid", "FinalUrlSuffix", "ViewThroughConversions", "ViewThroughConversionsQualified", "AllCostPerConversion", "AllReturnOnAdSpend", "Conversions", "ConversionRate", "ConversionsQualified", "AverageCpc", "AveragePosition", "AverageCpm", "AllConversions", "AllConversionRate", "AllRevenue", "AllRevenuePerConversion", "Revenue", "RevenuePerConversion", "RevenuePerAssist", "CampaignStatus", "TopImpressionRatePercent", "AdGroupStatus", "TrackingTemplate", "BidStrategyType", "AccountStatus", "FinalUrl", "AdType", "KeywordLabels", "FinalMobileUrl", "Goal", "GoalType", "AbsoluteTopImpressionRatePercent", "BaseCampaignId", "AccountNumber", "DestinationUrl"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2023}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
self.mock_generate_report_api(
endpoint="Poll", response_template="generate_report_poll", body=b'{"ReportRequestId": "thisisthereport_requestid"}'
)
|
TestKeywordPerformanceReportHourlyStream
|
python
|
django__django
|
django/db/models/functions/datetime.py
|
{
"start": 7367,
"end": 12021
}
|
class ____(TimezoneMixin, Transform):
kind = None
tzinfo = None
def __init__(
self,
expression,
output_field=None,
tzinfo=None,
**extra,
):
self.tzinfo = tzinfo
super().__init__(expression, output_field=output_field, **extra)
def as_sql(self, compiler, connection):
sql, params = compiler.compile(self.lhs)
tzname = None
if isinstance(self.lhs.output_field, DateTimeField):
tzname = self.get_tzname()
elif self.tzinfo is not None:
raise ValueError("tzinfo can only be used with DateTimeField.")
if isinstance(self.output_field, DateTimeField):
sql, params = connection.ops.datetime_trunc_sql(
self.kind, sql, tuple(params), tzname
)
elif isinstance(self.output_field, DateField):
sql, params = connection.ops.date_trunc_sql(
self.kind, sql, tuple(params), tzname
)
elif isinstance(self.output_field, TimeField):
sql, params = connection.ops.time_trunc_sql(
self.kind, sql, tuple(params), tzname
)
else:
raise ValueError(
"Trunc only valid on DateField, TimeField, or DateTimeField."
)
return sql, params
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
copy = super().resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
field = copy.lhs.output_field
# DateTimeField is a subclass of DateField so this works for both.
if not isinstance(field, (DateField, TimeField)):
raise TypeError(
"%r isn't a DateField, TimeField, or DateTimeField." % field.name
)
# If self.output_field was None, then accessing the field will trigger
# the resolver to assign it to self.lhs.output_field.
if not isinstance(copy.output_field, (DateField, DateTimeField, TimeField)):
raise ValueError(
"output_field must be either DateField, TimeField, or DateTimeField"
)
# Passing dates or times to functions expecting datetimes is most
# likely a mistake.
class_output_field = (
self.__class__.output_field
if isinstance(self.__class__.output_field, Field)
else None
)
output_field = class_output_field or copy.output_field
has_explicit_output_field = (
class_output_field or field.__class__ is not copy.output_field.__class__
)
if type(field) is DateField and (
isinstance(output_field, DateTimeField)
or copy.kind in ("hour", "minute", "second", "time")
):
raise ValueError(
"Cannot truncate DateField '%s' to %s."
% (
field.name,
(
output_field.__class__.__name__
if has_explicit_output_field
else "DateTimeField"
),
)
)
elif isinstance(field, TimeField) and (
isinstance(output_field, DateTimeField)
or copy.kind in ("year", "quarter", "month", "week", "day", "date")
):
raise ValueError(
"Cannot truncate TimeField '%s' to %s."
% (
field.name,
(
output_field.__class__.__name__
if has_explicit_output_field
else "DateTimeField"
),
)
)
return copy
def convert_value(self, value, expression, connection):
if isinstance(self.output_field, DateTimeField):
if not settings.USE_TZ:
pass
elif value is not None:
value = value.replace(tzinfo=None)
value = timezone.make_aware(value, self.tzinfo)
elif not connection.features.has_zoneinfo_database:
raise ValueError(
"Database returned an invalid datetime value. Are time "
"zone definitions for your database installed?"
)
elif isinstance(value, datetime):
if isinstance(self.output_field, DateField):
value = value.date()
elif isinstance(self.output_field, TimeField):
value = value.time()
return value
|
TruncBase
|
python
|
getsentry__sentry
|
src/sentry/workflow_engine/endpoints/serializers/incident_groupopenperiod_serializer.py
|
{
"start": 418,
"end": 909
}
|
class ____(Serializer):
def serialize(
self, obj: IncidentGroupOpenPeriod, attrs: Mapping[str, Any], user, **kwargs
) -> IncidentGroupOpenPeriodSerializerResponse:
return {
"incidentId": str(obj.incident_id) if obj.incident_id else None,
"incidentIdentifier": obj.incident_identifier,
"groupId": str(obj.group_open_period.group_id),
"openPeriodId": str(obj.group_open_period.id),
}
|
IncidentGroupOpenPeriodSerializer
|
python
|
weaviate__weaviate-python-client
|
profiling/test_refs.py
|
{
"start": 1310,
"end": 1416
}
|
class ____:
properties: Dict[str, Any]
class_name: str
uuid: uuid_lib.UUID
@dataclass
|
DataObject
|
python
|
aio-libs__aiohttp
|
tests/test_tracing.py
|
{
"start": 2973,
"end": 5313
}
|
class ____:
@pytest.mark.parametrize(
"signal,params,param_obj",
[
("request_start", (Mock(), Mock(), Mock()), TraceRequestStartParams),
(
"request_chunk_sent",
(Mock(), Mock(), Mock()),
TraceRequestChunkSentParams,
),
(
"response_chunk_received",
(Mock(), Mock(), Mock()),
TraceResponseChunkReceivedParams,
),
("request_end", (Mock(), Mock(), Mock(), Mock()), TraceRequestEndParams),
(
"request_exception",
(Mock(), Mock(), Mock(), Mock()),
TraceRequestExceptionParams,
),
(
"request_redirect",
(Mock(), Mock(), Mock(), Mock()),
TraceRequestRedirectParams,
),
("connection_queued_start", (), TraceConnectionQueuedStartParams),
("connection_queued_end", (), TraceConnectionQueuedEndParams),
("connection_create_start", (), TraceConnectionCreateStartParams),
("connection_create_end", (), TraceConnectionCreateEndParams),
("connection_reuseconn", (), TraceConnectionReuseconnParams),
("dns_resolvehost_start", (Mock(),), TraceDnsResolveHostStartParams),
("dns_resolvehost_end", (Mock(),), TraceDnsResolveHostEndParams),
("dns_cache_hit", (Mock(),), TraceDnsCacheHitParams),
("dns_cache_miss", (Mock(),), TraceDnsCacheMissParams),
],
)
async def test_send( # type: ignore[misc]
self, signal: str, params: tuple[Mock, ...], param_obj: Any
) -> None:
session = Mock()
trace_request_ctx = Mock()
callback = mock.AsyncMock()
trace_config = TraceConfig()
getattr(trace_config, "on_%s" % signal).append(callback)
trace_config.freeze()
trace = Trace(
session,
trace_config,
trace_config.trace_config_ctx(trace_request_ctx=trace_request_ctx),
)
await getattr(trace, "send_%s" % signal)(*params)
callback.assert_called_once_with(
session,
SimpleNamespace(trace_request_ctx=trace_request_ctx),
param_obj(*params),
)
|
TestTrace
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_compiler.py
|
{
"start": 269839,
"end": 271878
}
|
class ____(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = default.DefaultDialect(supports_native_boolean=True)
def _fixture(self):
m = MetaData()
return Table("foo", m, Column("id", Integer))
bool_table = table("t", column("x", Boolean))
def test_coerce_bool_where(self):
self.assert_compile(
select(self.bool_table).where(self.bool_table.c.x),
"SELECT t.x FROM t WHERE t.x",
)
def test_coerce_bool_where_non_native(self):
self.assert_compile(
select(self.bool_table).where(self.bool_table.c.x),
"SELECT t.x FROM t WHERE t.x = 1",
dialect=default.DefaultDialect(supports_native_boolean=False),
)
self.assert_compile(
select(self.bool_table).where(~self.bool_table.c.x),
"SELECT t.x FROM t WHERE t.x = 0",
dialect=default.DefaultDialect(supports_native_boolean=False),
)
def test_val_and_false(self):
t = self._fixture()
self.assert_compile(and_(t.c.id == 1, False), "false")
def test_val_and_true_coerced(self):
t = self._fixture()
self.assert_compile(and_(t.c.id == 1, True), "foo.id = :id_1")
def test_val_is_null_coerced(self):
t = self._fixture()
self.assert_compile(and_(t.c.id == None), "foo.id IS NULL") # noqa
def test_val_and_None(self):
t = self._fixture()
self.assert_compile(and_(t.c.id == 1, None), "foo.id = :id_1 AND NULL")
def test_None_and_val(self):
t = self._fixture()
self.assert_compile(and_(None, t.c.id == 1), "NULL AND foo.id = :id_1")
def test_None_and_nothing(self):
# current convention is None in and_()
# returns None May want
# to revise this at some point.
self.assert_compile(and_(None), "NULL")
def test_val_and_null(self):
t = self._fixture()
self.assert_compile(
and_(t.c.id == 1, null()), "foo.id = :id_1 AND NULL"
)
|
CoercionTest
|
python
|
getsentry__sentry
|
tests/sentry/middleware/integrations/test_classifications.py
|
{
"start": 2447,
"end": 5057
}
|
class ____(BaseClassificationTestCase):
get_response = MagicMock()
integration_cls = IntegrationClassification(response_handler=get_response)
prefix = IntegrationClassification.integration_prefix
@override_settings(SILO_MODE=SiloMode.CONTROL)
@patch.object(
IntegrationClassification,
"should_operate",
wraps=integration_cls.should_operate,
)
def test_inactive_on_non_prefix(self, mock_should_operate) -> None:
request = self.factory.get("/settings/")
assert mock_should_operate(request) is False
self.validate_mock_ran_with_noop(request, mock_should_operate)
@override_settings(SILO_MODE=SiloMode.CONTROL)
@patch.object(
IntegrationClassification,
"_identify_provider",
wraps=integration_cls._identify_provider,
)
def test_invalid_provider(self, mock_identify_provider) -> None:
request = self.factory.post(f"{self.prefix}🔥🔥🔥/webhook/")
assert mock_identify_provider(request) == "🔥🔥🔥"
self.validate_mock_ran_with_noop(request, mock_identify_provider)
@override_settings(SILO_MODE=SiloMode.CONTROL)
@patch.object(
IntegrationClassification,
"_identify_provider",
wraps=integration_cls._identify_provider,
)
def test_empty_provider(self, mock_identify_provider) -> None:
request = self.factory.post(f"{self.prefix}/webhook/")
assert mock_identify_provider(request) is None
self.validate_mock_ran_with_noop(request, mock_identify_provider)
@override_settings(SILO_MODE=SiloMode.CONTROL)
@patch.object(
IntegrationClassification,
"_identify_provider",
wraps=integration_cls._identify_provider,
)
def test_unknown_provider(self, mock_identify_provider) -> None:
provider = "acme"
request = self.factory.post(f"{self.prefix}{provider}/webhook/")
assert mock_identify_provider(request) == provider
assert self.integration_cls.integration_parsers.get(provider) is None
self.validate_mock_ran_with_noop(request, mock_identify_provider)
@override_settings(SILO_MODE=SiloMode.CONTROL)
@patch.object(SlackRequestParser, "get_response")
def test_returns_parser_get_response(self, mock_parser_get_response) -> None:
result = HttpResponse(status=204)
mock_parser_get_response.return_value = result
response = self.integration_cls.get_response(
self.factory.post(f"{self.prefix}{SlackRequestParser.provider}/webhook/")
)
assert result == response
|
IntegrationClassificationTest
|
python
|
getsentry__sentry
|
src/sentry/snuba/query_subscriptions/run.py
|
{
"start": 780,
"end": 4132
}
|
class ____(ProcessingStrategyFactory[KafkaPayload]):
def __init__(
self,
dataset: str,
max_batch_size: int,
max_batch_time: int,
num_processes: int,
input_block_size: int | None,
output_block_size: int | None,
multi_proc: bool = True,
topic_override: str | None = None,
):
self.dataset = Dataset(dataset)
self.logical_topic = dataset_to_logical_topic[self.dataset]
if topic_override:
self.logical_topic = topic_override
self.topic = get_topic_definition(Topic(self.logical_topic))["real_topic_name"]
self.max_batch_size = max_batch_size
self.max_batch_time = max_batch_time
self.input_block_size = input_block_size
self.output_block_size = output_block_size
self.multi_proc = multi_proc
self.pool = MultiprocessingPool(num_processes)
def create_with_partitions(
self,
commit: Commit,
partitions: Mapping[Partition, int],
) -> ProcessingStrategy[KafkaPayload]:
callable = partial(process_message, self.dataset, self.topic, self.logical_topic)
if self.multi_proc:
return run_task_with_multiprocessing(
function=callable,
next_step=CommitOffsets(commit),
max_batch_size=self.max_batch_size,
max_batch_time=self.max_batch_time,
pool=self.pool,
input_block_size=self.input_block_size,
output_block_size=self.output_block_size,
)
else:
return RunTask(callable, CommitOffsets(commit))
def shutdown(self) -> None:
self.pool.close()
def process_message(
dataset: Dataset, topic: str, logical_topic: str, message: Message[KafkaPayload]
) -> None:
from sentry.snuba.query_subscriptions.consumer import handle_message
from sentry.utils import metrics
with (
sentry_sdk.start_transaction(
op="handle_message",
name="query_subscription_consumer_process_message",
custom_sampling_context={"sample_rate": options.get("subscriptions-query.sample-rate")},
),
metrics.timer("snuba_query_subscriber.handle_message", tags={"dataset": dataset.value}),
):
value = message.value
assert isinstance(value, BrokerValue)
offset = value.offset
partition = value.partition.index
message_value = value.payload.value
try:
handle_message(
message_value,
offset,
partition,
topic,
dataset.value,
get_codec(logical_topic),
)
except Exception:
# This is a failsafe to make sure that no individual message will block this
# consumer. If we see errors occurring here they need to be investigated to
# make sure that we're not dropping legitimate messages.
logger.exception(
"Unexpected error while handling message in QuerySubscriptionStrategy. Skipping message.",
extra={
"offset": offset,
"partition": partition,
"value": message_value,
},
)
|
QuerySubscriptionStrategyFactory
|
python
|
pikepdf__pikepdf
|
src/pikepdf/models/image.py
|
{
"start": 1255,
"end": 1359
}
|
class ____(Exception):
"""Indicates that an image cannot be directly extracted."""
|
NotExtractableError
|
python
|
boto__boto3
|
tests/integration/test_s3.py
|
{
"start": 3598,
"end": 5469
}
|
class ____:
def __init__(self):
self.rootdir = tempfile.mkdtemp()
def remove_all(self):
shutil.rmtree(self.rootdir)
def create_file(self, filename, contents, mode='w'):
"""Creates a file in a tmpdir
``filename`` should be a relative path, e.g. "foo/bar/baz.txt"
It will be translated into a full path in a tmp dir.
``mode`` is the mode the file should be opened either as ``w`` or
`wb``.
Returns the full path to the file.
"""
full_path = os.path.join(self.rootdir, filename)
if not os.path.isdir(os.path.dirname(full_path)):
os.makedirs(os.path.dirname(full_path))
with open(full_path, mode) as f:
f.write(contents)
return full_path
def create_file_with_size(self, filename, filesize):
filename = self.create_file(filename, contents='')
chunksize = 8192
with open(filename, 'wb') as f:
for i in range(int(math.ceil(filesize / float(chunksize)))):
f.write(b'a' * chunksize)
return filename
def append_file(self, filename, contents):
"""Append contents to a file
``filename`` should be a relative path, e.g. "foo/bar/baz.txt"
It will be translated into a full path in a tmp dir.
Returns the full path to the file.
"""
full_path = os.path.join(self.rootdir, filename)
if not os.path.isdir(os.path.dirname(full_path)):
os.makedirs(os.path.dirname(full_path))
with open(full_path, 'a') as f:
f.write(contents)
return full_path
def full_path(self, filename):
"""Translate relative path to full path in temp dir.
f.full_path('foo/bar.txt') -> /tmp/asdfasd/foo/bar.txt
"""
return os.path.join(self.rootdir, filename)
|
FileCreator
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximum-number-of-events-that-can-be-attended.py
|
{
"start": 84,
"end": 704
}
|
class ____(object):
def maxEvents(self, events):
"""
:type events: List[List[int]]
:rtype: int
"""
events.sort(reverse=True)
min_heap = []
result = 0
for d in xrange(1, max(events, key=lambda x:x[1])[1]+1):
while events and events[-1][0] == d:
heapq.heappush(min_heap, events.pop()[1])
while min_heap and min_heap[0] == d-1:
heapq.heappop(min_heap)
if not min_heap:
continue
heapq.heappop(min_heap)
result += 1
return result
|
Solution
|
python
|
huggingface__transformers
|
src/transformers/models/speecht5/modeling_speecht5.py
|
{
"start": 16983,
"end": 17891
}
|
class ____(nn.Module):
"""
Scaled positional encoding, see §3.2 in https://huggingface.co/papers/1809.08895
"""
def __init__(self, dropout, dim, max_len=5000):
pe = torch.zeros(max_len, dim)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, dim, 2, dtype=torch.int64).float() * -(math.log(10000.0) / dim))
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
pe = pe.unsqueeze(0)
super().__init__()
self.register_buffer("pe", pe, persistent=False)
self.dropout = nn.Dropout(p=dropout)
self.dim = dim
self.alpha = nn.Parameter(torch.tensor(1.0))
def forward(self, emb):
emb = emb + self.alpha * self.pe[:, : emb.size(1)]
emb = self.dropout(emb)
return emb
|
SpeechT5ScaledPositionalEncoding
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-databricks/prefect_databricks/models/jobs.py
|
{
"start": 20576,
"end": 20952
}
|
class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
basic_auth: Optional[DockerBasicAuth] = Field(
None, description="Basic authentication information for Docker repository."
)
url: Optional[str] = Field(None, description="URL for the Docker image.")
|
DockerImage
|
python
|
apache__airflow
|
providers/fab/src/airflow/providers/fab/auth_manager/views/roles_list.py
|
{
"start": 941,
"end": 1512
}
|
class ____(RoleModelView):
"""Customize permission names for FAB's builtin RoleModelView."""
class_permission_name = permissions.RESOURCE_ROLE
method_permission_name = {
"delete": "delete",
"download": "read",
"show": "read",
"list": "read",
"edit": "edit",
"add": "create",
"copy_role": "create",
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
]
|
CustomRoleModelView
|
python
|
fluentpython__example-code-2e
|
17-it-generator/sentence_iter2.py
|
{
"start": 501,
"end": 749
}
|
class ____:
def __init__(self, word_iter):
self.word_iter = word_iter # <3>
def __next__(self):
match = next(self.word_iter) # <4>
return match.group() # <5>
def __iter__(self):
return self
|
SentenceIter
|
python
|
Lightning-AI__lightning
|
tests/tests_pytorch/trainer/connectors/test_signal_connector.py
|
{
"start": 5712,
"end": 6867
}
|
class ____:
def signal_handler(self):
pass
@pytest.mark.parametrize(
("handler", "expected_return"),
[
(None, False),
(signal.Handlers.SIG_IGN, True),
(signal.Handlers.SIG_DFL, False),
(signal_handler, True),
(SignalHandlers().signal_handler, True),
],
)
def test_has_already_handler(handler, expected_return):
"""Test that the SignalConnector detects whether a signal handler is already attached."""
with mock.patch("lightning.pytorch.trainer.connectors.signal_connector.signal.getsignal", return_value=handler):
assert _SignalConnector._has_already_handler(signal.SIGTERM) is expected_return
def test_sigterm_notifier_fn():
trainer = Mock()
launcher = Mock()
trainer.strategy.launcher = launcher
connector = _SignalConnector(trainer)
assert not connector.received_sigterm
connector._sigterm_notifier_fn(signal.SIGTERM, Mock())
launcher.kill.assert_called_once_with(15)
assert connector.received_sigterm
launcher.reset_mock()
connector._sigterm_notifier_fn(signal.SIGTERM, Mock())
launcher.kill.assert_not_called()
|
SignalHandlers
|
python
|
apache__airflow
|
providers/apache/hive/tests/unit/apache/hive/transfers/test_mssql_to_hive.py
|
{
"start": 980,
"end": 5475
}
|
class ____:
def setup_method(self):
self.kwargs = dict(sql="sql", hive_table="table", task_id="test_mssql_to_hive", dag=None)
def test_type_map_binary(self):
mapped_type = MsSqlToHiveOperator(**self.kwargs).type_map(pymssql.BINARY.value)
assert mapped_type == "INT"
def test_type_map_decimal(self):
mapped_type = MsSqlToHiveOperator(**self.kwargs).type_map(pymssql.DECIMAL.value)
assert mapped_type == "FLOAT"
def test_type_map_number(self):
mapped_type = MsSqlToHiveOperator(**self.kwargs).type_map(pymssql.NUMBER.value)
assert mapped_type == "INT"
def test_type_map_string(self):
mapped_type = MsSqlToHiveOperator(**self.kwargs).type_map(None)
assert mapped_type == "STRING"
@patch("airflow.providers.apache.hive.transfers.mssql_to_hive.csv")
@patch("airflow.providers.apache.hive.transfers.mssql_to_hive.NamedTemporaryFile")
@patch("airflow.providers.apache.hive.transfers.mssql_to_hive.MsSqlHook")
@patch("airflow.providers.apache.hive.transfers.mssql_to_hive.HiveCliHook")
def test_execute(self, mock_hive_hook, mock_mssql_hook, mock_tmp_file, mock_csv):
type(mock_tmp_file).name = PropertyMock(return_value="tmp_file")
mock_tmp_file.return_value.__enter__ = Mock(return_value=mock_tmp_file)
mock_mssql_hook_get_conn = mock_mssql_hook.return_value.get_conn.return_value.__enter__
mock_mssql_hook_cursor = mock_mssql_hook_get_conn.return_value.cursor.return_value.__enter__
mock_mssql_hook_cursor.return_value.description = [("anything", "some-other-thing")]
mssql_to_hive_transfer = MsSqlToHiveOperator(**self.kwargs)
mssql_to_hive_transfer.execute(context={})
mock_mssql_hook_cursor.return_value.execute.assert_called_once_with(mssql_to_hive_transfer.sql)
mock_tmp_file.assert_called_with(mode="w", encoding="utf-8")
mock_csv.writer.assert_called_once_with(mock_tmp_file, delimiter=mssql_to_hive_transfer.delimiter)
field_dict = {}
for field in mock_mssql_hook_cursor.return_value.description:
field_dict[field[0]] = mssql_to_hive_transfer.type_map(field[1])
mock_csv.writer.return_value.writerows.assert_called_once_with(mock_mssql_hook_cursor.return_value)
mock_hive_hook.return_value.load_file.assert_called_once_with(
mock_tmp_file.name,
mssql_to_hive_transfer.hive_table,
field_dict=field_dict,
create=mssql_to_hive_transfer.create,
partition=mssql_to_hive_transfer.partition,
delimiter=mssql_to_hive_transfer.delimiter,
recreate=mssql_to_hive_transfer.recreate,
tblproperties=mssql_to_hive_transfer.tblproperties,
)
@patch("airflow.providers.apache.hive.transfers.mssql_to_hive.csv")
@patch("airflow.providers.apache.hive.transfers.mssql_to_hive.NamedTemporaryFile")
@patch("airflow.providers.apache.hive.transfers.mssql_to_hive.MsSqlHook")
@patch("airflow.providers.apache.hive.transfers.mssql_to_hive.HiveCliHook")
def test_execute_empty_description_field(self, mock_hive_hook, mock_mssql_hook, mock_tmp_file, mock_csv):
type(mock_tmp_file).name = PropertyMock(return_value="tmp_file")
mock_tmp_file.return_value.__enter__ = Mock(return_value=mock_tmp_file)
mock_mssql_hook_get_conn = mock_mssql_hook.return_value.get_conn.return_value.__enter__
mock_mssql_hook_cursor = mock_mssql_hook_get_conn.return_value.cursor.return_value.__enter__
mock_mssql_hook_cursor.return_value.description = [("", "")]
mssql_to_hive_transfer = MsSqlToHiveOperator(**self.kwargs)
mssql_to_hive_transfer.execute(context={})
field_dict = {}
for col_count, field in enumerate(mock_mssql_hook_cursor.return_value.description, start=1):
col_position = f"Column{col_count}"
field_dict[col_position] = mssql_to_hive_transfer.type_map(field[1])
mock_hive_hook.return_value.load_file.assert_called_once_with(
mock_tmp_file.name,
mssql_to_hive_transfer.hive_table,
field_dict=field_dict,
create=mssql_to_hive_transfer.create,
partition=mssql_to_hive_transfer.partition,
delimiter=mssql_to_hive_transfer.delimiter,
recreate=mssql_to_hive_transfer.recreate,
tblproperties=mssql_to_hive_transfer.tblproperties,
)
|
TestMsSqlToHiveTransfer
|
python
|
ansible__ansible
|
test/integration/targets/ansible-doc/broken-docs/collections/ansible_collections/testns/testcol/plugins/cache/notjsonfile.py
|
{
"start": 1895,
"end": 2003
}
|
class ____(BaseFileCacheModule):
"""
A caching module backed by json files.
"""
pass
|
CacheModule
|
python
|
simonw__datasette
|
tests/test_base_view.py
|
{
"start": 142,
"end": 394
}
|
class ____(View):
async def get(self, request, datasette):
return Response.json(
{
"absolute_url": datasette.absolute_url(request, "/"),
"request_path": request.path,
}
)
|
GetView
|
python
|
pytorch__pytorch
|
test/dynamo/test_functions.py
|
{
"start": 87218,
"end": 117542
}
|
class ____(torch.nn.Module):
def forward(self, s77: "Sym(s77)", L_x_: "f32[s77, s77]"):
l_x_ = L_x_
mul: "f32[s77, s77]" = l_x_ * 4
mul_1: "f32[s77, s77]" = mul * l_x_; mul = None
mul_2: "f32[s77, s77]" = 20 * l_x_; l_x_ = None
mul_3: "f32[s77, s77]" = torch.mul(mul_1, mul_2); mul_1 = mul_2 = None
return (mul_3,)
""",
)
def test_partials_recompilation(self):
def fn(f0, f1, x):
return f0(x) * f1(x)
lambda0 = functools.partial(udf_mul, y=torch.randn(2, 2))
lambda1 = functools.partial(udf_mul, y=torch.randn(2, 2))
cnts = torch._dynamo.testing.CompileCounter()
x = torch.randn(2, 2)
fn = torch.compile(fn, backend=cnts, fullgraph=True)
fn(lambda0, lambda1, x)
self.assertEqual(cnts.frame_count, 1)
fn(lambda1, lambda0, x)
self.assertEqual(
cnts.frame_count, 1
) # No recompile! Tensor and udf_mul guarded
lambda2 = functools.partial(udf_mul, y=torch.randn(3, 3))
x = torch.randn(3, 3)
fn(lambda2, lambda2, x)
self.assertEqual(cnts.frame_count, 2) # Recompile! Tensor size changed
multiply = lambda x, y: x * y
lambda3 = functools.partial(multiply, y=torch.randn(3, 3))
x = torch.randn(3, 3)
fn(lambda3, lambda3, x)
self.assertEqual(cnts.frame_count, 3) # Recompile! func id changed
def fn2(f0, f1, args):
return f0(*args) * f1(*args)
cnts = torch._dynamo.testing.CompileCounter()
x = torch.randn(2, 2)
fn2 = torch.compile(fn2, backend=cnts, fullgraph=True)
fn2(lambda0, lambda1, [x])
self.assertEqual(cnts.frame_count, 1) # start over
lambda4 = functools.partial(multiply, y=3, x=torch.randn(3, 3))
fn2(lambda4, lambda4, [])
self.assertEqual(cnts.frame_count, 2) # Recompile! Different kwarg keys
lambda5 = functools.partial(multiply, 1)
x = torch.randn(3, 3)
fn2(lambda5, lambda5, [x])
self.assertEqual(cnts.frame_count, 3) # Recompile! Different arg keys
lambda6 = lambda x: x + x
fn2(lambda6, lambda6, [x])
self.assertEqual(
cnts.frame_count, 4
) # Recompile! input is no longer a functools partial
def test_manual_seed(self):
@torch.compile
def foo():
torch.manual_seed(3)
return torch.randint(0, 5, (5,))
self.assertEqual(foo(), foo())
self.assertEqual(foo(), foo())
def test_partial_across_graph_break_uninvoked(self):
from functools import partial
def bar(x, **kwargs):
return x + x
@torch.compile(backend="eager", dynamic=True)
def foo(x, i):
def inner():
print("this is a graph_break")
return op(x)
op = partial(bar, dim=10)
x = inner()
op = partial(bar, other=10)
return inner() + x
foo(torch.rand(1), 10)
def test_no_recompile_inner_function(self):
def forward(inp):
def g(y):
return inp + y
print("graph break")
return g(torch.rand([1]))
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(forward, backend=cnts)
input = torch.rand([2])
_ = opt_fn(input)
_ = opt_fn(input)
_ = opt_fn(input)
# Should not have recompiled
self.assertEqual(cnts.frame_count, 1)
def test_no_recompile_inner_lambda(self):
def forward(inp):
g = lambda y: inp + y
print("graph break")
return g(torch.rand([1]))
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(forward, backend=cnts)
input = torch.rand([2])
_ = opt_fn(input)
_ = opt_fn(input)
_ = opt_fn(input)
# Should not have recompiled
self.assertEqual(cnts.frame_count, 1)
def test_complex_closure(self):
@torch.compile
def forward(y):
def a():
def x(z):
return y + z
return x
return a()
input1 = torch.rand([2])
input2 = torch.rand([2])
res = forward(input1)(input2)
self.assertTrue(same(res, input1 + input2))
def test_non_inlined_closure(self):
@torch.compile()
def program(x, y):
one = lambda x, y: x + y
def inner():
# Force no inlining
torch._dynamo.graph_break()
return one(x, y)
res = inner()
one = lambda x, y: x - y
res += inner()
return res
input1 = torch.randn(1)
input2 = torch.randn(1)
self.assertTrue(same(program(input1, input2), input1 + input1))
@parametrize("int_or_float", ("int", "float"))
def test_np_constant_collections_as_input(self, int_or_float):
info_func = getattr(np, f"{int_or_float[0]}info")
dt_string_arg = f"{int_or_float}16"
np_dt_attr = getattr(np, dt_string_arg)
dt_args = [dt_string_arg, np_dt_attr]
arg_variants_iter = itertools.chain(
dt_args, map(np.dtype, dt_args), map(info_func, dt_args)
)
def func(a, b, info_or_dt):
return a + info_func(info_or_dt).max
opt_fn = torch.compile(func)
a = torch.randn(2)
b = torch.randn(2)
eager_result = func(a, b, dt_args[0])
for arg in arg_variants_iter:
opt_result = opt_fn(a, b, arg)
self.assertTrue(same(opt_result, eager_result))
@parametrize(
"typ, info_func",
[
(int, np.iinfo),
(float, np.finfo),
],
name_fn=lambda t, _: t.__name__,
)
def test_np_constant_collections_guards(self, typ, info_func):
def func_info(a, info):
return a + info.max
def func_dtype(a, dt):
return a + info_func(dt).max
dt_args = [
np.dtype(typ),
np.ones((1,), dtype=typ).dtype,
np.dtype(np.dtype(typ).name),
np.dtype(typ.__name__),
]
cnts_1 = torch._dynamo.testing.CompileCounter()
opt_fn_dtype = torch.compile(func_dtype, backend=cnts_1)
a = torch.zeros(3, dtype=typ)
for arg in dt_args:
opt_fn_dtype(a, arg)
# each should produce an identical arg
self.assertEqual(cnts_1.frame_count, 1)
cnts_2 = torch._dynamo.testing.CompileCounter()
opt_fn_info = torch.compile(func_info, backend=cnts_2)
info_args = [info_func(dt) for dt in dt_args]
for arg in info_args:
opt_fn_info(a, arg)
# each should produce an identical arg
self.assertEqual(cnts_2.frame_count, 1)
if typ is float:
dt_extra = np.dtype(np.float16)
else:
dt_extra = np.dtype(np.int16)
info_extra = info_func(dt_extra)
eager_result_dtype = func_dtype(a, dt_extra)
compile_result_dtype = opt_fn_dtype(a, dt_extra)
self.assertEqual(cnts_1.frame_count, 2)
self.assertEqual(eager_result_dtype, compile_result_dtype)
eager_result_info = func_info(a, info_extra)
compile_result_info = opt_fn_info(a, info_extra)
self.assertEqual(cnts_2.frame_count, 2)
self.assertEqual(eager_result_info, compile_result_info)
def test_compare_constant_and_tensor(self):
for op in [
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
operator.eq,
operator.is_,
operator.is_not,
]:
with self.subTest(op=op):
def fn(x):
return op(-10, x)
opt_fn = torch.compile(fullgraph=True)(fn)
x = torch.randn(10)
self.assertEqual(opt_fn(x), fn(x))
def test_pos(self):
def fn(x, y):
return operator.pos(x) * +y
opt_fn = torch.compile(fullgraph=True, dynamic=True)(fn)
def test(x, y):
self.assertEqual(opt_fn(x, y), fn(x, y))
test(torch.ones(4), 1)
test(1, torch.ones(4))
test(-1, -1)
test(-1.1, 1.1)
test(True, False)
test(torch.ones(4, dtype=torch.float32), 1.1)
def test_index(self):
def fn(x, t):
v = operator.index(x)
torch.mul(t, v)
def test(a, b):
self.assertEqual(opt_fn(a, b), fn(a, b))
for dynamic in [True, False]:
torch._dynamo.reset()
opt_fn = torch.compile(fn, dynamic=dynamic)
t = torch.ones(1)
test(10, t)
test(-100, t)
test(10, t)
test(False, t)
test(True, t)
def test_truth(self):
def fn(x, y):
return operator.truth(x) and bool(y)
opt_fn = torch.compile(dynamic=False)(fn)
def test(x, y):
self.assertEqual(opt_fn(x, y), fn(x, y))
test(1, 100)
test(-1.1, True)
test(-1.1, 1.1)
test(True, False)
test(torch.ones(1), 1)
test(torch.zeros(1), 1)
test(torch.ones(1), torch.ones(1))
def test_unary_fold_op(self):
for op in (operator.abs, abs, operator.neg, operator.pos, operator.truth):
with self.subTest(op=op):
def fn():
a = range(-10, 10)
return list(map(op, a))
opt_fn = torch.compile(fn, fullgraph=True)
self.assertEqual(opt_fn(), fn())
def test_unary_fold_op_seq(self):
for op in (operator.length_hint,):
with self.subTest(op=op):
def fn():
a = [tuple(range(-10, i)) for i in range(10)]
return tuple(map(op, a))
opt_fn = torch.compile(fn, fullgraph=True)
self.assertEqual(opt_fn(), fn())
def test_attrgetter(self):
for attrs in (
("shape",),
("data.shape",),
("device", "shape"),
("device", "shape", "data.shape"),
):
with self.subTest(attrs=attrs):
def fn(x, y):
getter = operator.attrgetter(*attrs)
return getter(x), getter(y)
opt_fn = torch.compile(fullgraph=True)(fn)
x = torch.randn(3, 4)
y = torch.randn(3, 4)
self.assertEqual(opt_fn(x, y), fn(x, y))
def test_itemgetter(self):
for items in (
(0,),
(slice(1, 3),),
(0, 1),
(slice(1, 3), 0, 1),
):
with self.subTest(items=items):
def fn(x, y):
getter = operator.itemgetter(*items)
return getter(x), getter(y)
opt_fn = torch.compile(fullgraph=True)(fn)
x = torch.randn(3, 4)
y = torch.randn(3, 4)
self.assertEqual(opt_fn(x, y), fn(x, y))
def test_methodcaller(self):
for name, args, kwargs in (
("size", (), {}),
("size", (0,), {}),
("add", (torch.randn(3, 4),), {}),
("add", (torch.randn(3, 4),), {"alpha": 2.0}),
):
with self.subTest(name=name, args=args, kwargs=kwargs):
def fn(x, y):
caller = operator.methodcaller(name, *args, **kwargs)
return caller(x), caller(y)
opt_fn = torch.compile(fullgraph=True)(fn)
x = torch.randn(3, 4)
y = torch.randn(3, 4)
self.assertEqual(opt_fn(x, y), fn(x, y))
def gen_random_range_args(self):
args_count = random.randint(1, 3)
args = [random.randint(-10, 10) for _ in range(args_count)]
if args_count == 3 and args[2] == 0:
args[2] = 1
return args
def test_range_iterator_graph_break(self):
@torch.compile(backend="eager")
def fn(x):
it = range(1, 7, 2).__iter__()
y = x + next(it)
torch._dynamo.graph_break()
return y + next(it) + next(it)
x = torch.tensor([1.0])
y = fn(x)
self.assertEqual(y, x + 1 + 3 + 5)
def test_range_iterator_graph_break_2(self):
@torch.compiler.disable
def g(y, it):
return y + next(it) + next(it)
@torch.compile(backend="eager")
def fn(x):
it = range(1, 10, 2).__iter__()
y = x + next(it)
z = g(y, it)
k = next(it)
assert k == 7
return z + k
x = torch.tensor([1.0])
z = fn(x)
self.assertEqual(z, x + 1 + 3 + 5 + 7)
@make_test
def test_range_iterator(a, b):
it = range(5).__iter__()
if isinstance(it, range_iterator):
return a + b
return a - b
@make_test
def test_range_iterator_2(a, b):
# should pass once we stop having three different paths on call_iter
it = iter(range(5))
if isinstance(it, range_iterator):
return a + b
return a - b
def test_range_length(self):
def test(*args, expected=None):
r = range(*args)
range_variable = RangeVariable([ConstantVariable.create(v) for v in args])
self.assertEqual(len(r), range_variable.range_length())
if expected is not None:
self.assertEqual(len(r), expected)
test(1, 1, 1, expected=0)
test(1, 0, expected=0)
test(-10, expected=0)
test(4, expected=4)
test(10, expected=10)
# step >1
test(1, 10, 2, expected=5)
# negative step
test(10, 1, -1, expected=9)
test(10, 1, -3)
# Fuzz testing
for _ in range(100):
args = self.gen_random_range_args()
print("testing :", args)
test(*args)
def test_indexed_range(self):
def test(range, index, expected=None):
range_variable = RangeVariable(
[
ConstantVariable.create(v)
for v in [range.start, range.stop, range.step]
]
)
self.assertEqual(
range[index],
range_variable.apply_index(index).as_python_constant(),
)
if expected is not None:
self.assertEqual(range[index], expected)
test(range(10), 1, expected=1)
test(range(10, 20, 2), 1, expected=12)
# Fuzz testing
for _ in range(100):
range_args = self.gen_random_range_args()
r = range(*range_args)
if len(r) == 0:
continue
index = random.randint(0, len(r) - 1)
print("testing:", r, index)
test(r, index)
def test_sliced_range(self):
def test(range, slice, expected=None):
range_variable = RangeVariable(
[
ConstantVariable.create(v)
for v in [range.start, range.stop, range.step]
]
)
self.assertEqual(
range[slice],
range_variable.apply_slice(slice).as_python_constant(),
)
if expected is not None:
self.assertEqual(
range[slice],
expected,
)
test(range(10), slice(1, 10, 2), expected=range(1, 10, 2))
test(range(10), slice(None, 10, None), expected=range(10))
test(range(10), slice(-1, 7, None), expected=range(9, 7))
test(range(10), slice(-1, 7, 2), expected=range(9, 7, 2))
test(range(1, 10, 2), slice(3, 7, 2), expected=range(7, 11, 4))
test(range(1, 10, 2), slice(-3, 7, 2), expected=range(5, 11, 4))
test(range(-1, -5, -3), slice(5, None, -3), expected=range(-4, 2, 9))
def rand_slice():
def flip_coin():
# 1 out of 10
return random.randint(1, 10) == 5
def r_item(allow_zero=True):
i = random.randint(-10, 10)
if not allow_zero and i == 0:
i = 1
if flip_coin():
i = None
return i
arg_count = random.randint(1, 3)
if arg_count == 1:
return slice(r_item())
elif arg_count == 2:
return slice(r_item(), r_item())
else:
return slice(r_item(), r_item(), r_item(False))
# Fuzz testing
for _ in range(100):
range_args = self.gen_random_range_args()
r = range(*range_args)
# generate random slice
s = rand_slice()
print("testing:", r, s)
test(r, s)
def test_range_with_slice_index(self):
def fn(x):
acc = 1
for k in range(2)[1::2]:
acc *= acc * k
return x * acc
opt_fn = torch.compile(fullgraph=True)(fn)
x = torch.ones(1)
self.assertEqual(opt_fn(x), fn(x))
def test_range_with_index(self):
def fn(x):
acc = 1
acc *= acc * range(10, 20, 2)[2]
return x * acc
opt_fn = torch.compile(fullgraph=True)(fn)
x = torch.ones(1)
self.assertEqual(opt_fn(x), fn(x))
def test_rand_inlined(self):
@torch.compile(backend="eager", dynamic=True)
def fn():
idx_size = [10]
idx_size[random.randint(0, 0)] = random.randint(1, 8)
t = tuple(idx_size)
src_size = [random.randint(1, 5) + s for s in idx_size] # noqa: F841
idx = torch.empty(t) # noqa: F841
fn()
def test_rand_tensor_partial(self):
from collections import namedtuple
from functools import partial
SdpaShape = namedtuple(
"Sdpa_Shape", ["batch", "num_heads", "seq_len", "head_dim"]
)
@torch.compile(backend="eager")
def func():
make_tensor = partial(
torch.rand, device="cpu", dtype=torch.float16, requires_grad=True
)
bsz, num_heads, seq_len_q, seq_len_kv, head_dim = (16, 16, 128, 128, 16)
make_q_tensor = partial(
make_tensor, SdpaShape(bsz, num_heads, seq_len_q, head_dim)
)
make_kv_tensor = partial(
make_tensor, SdpaShape(bsz, num_heads, seq_len_kv, head_dim)
)
t1 = make_q_tensor()
t2 = make_kv_tensor()
t3 = t1 + t2 # noqa: F841
func()
def test_to(self):
@torch.compile(backend="eager")
def fn():
t = torch.ones(2)
y = t.to("meta") # noqa: F841
fn()
def test_elipsis(self):
@torch.compile(backend="eager", fullgraph=True)
def fn(a, ind, val):
a[ind] = val
return a
arr = np.zeros(4)
self.assertEqual(fn(arr, np.s_[...], np.ones(4)), np.ones(4))
arr = np.array([[1, 1], [2, 2]])
self.assertEqual(
fn(arr, np.s_[0, ...], np.zeros(2)), np.array([[0, 0], [2, 2]])
)
arr = np.array([[1, 1], [2, 2]])
self.assertEqual(
fn(arr, np.s_[1, ...], np.zeros(2)), np.array([[1, 1], [0, 0]])
)
arr = np.array([[1, 1], [2, 2]])
self.assertEqual(
fn(arr, np.s_[..., 0], np.array([3, 3])), np.array([[3, 1], [3, 2]])
)
arr = np.array([[1, 1], [2, 2]])
self.assertEqual(
fn(arr, np.s_[..., 1], np.array([3, 3])), np.array([[1, 3], [2, 3]])
)
def test_round(self):
def fn(t):
return t + round(1.00002000011, 7)
t = torch.randn(2)
e = fn(t)
g = torch.compile(fn, backend="eager", fullgraph=True)(t)
self.assertEqual(e, g)
def test_map_return(self):
def fn(a, b):
return map(lambda x: x + 1, [a, b])
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
m = opt_fn(torch.randn(3, 3), torch.randn(3, 3))
self.assertIsInstance(m, map)
@make_test
def test_map_max(a, b):
return max(map(lambda x: x.sum(), [a, b]))
@make_test
def test_map_max_const(a):
return max(map(lambda x: x, [1, 2, 3])), a + 1
@make_test
def test_map_list(a, b):
return list(map(lambda x: x + 1, [a, b]))
@make_test
def test_map_tuple(a, b):
return tuple(map(lambda x: x + 1, [a, b]))
@make_test
def test_map_iter(a, b):
it = iter(map(lambda x: x + 1, [a, b]))
return next(it)
@make_test
def test_map_zip_dict(a):
d = dict(
zip(
map(lambda x: x + 1, [0, 1, 2]),
[map(lambda x: x - 1, [y]) for y in [3, 4, 5]],
)
)
return list(d[3])[0], a + 1 # noqa: RUF015
@make_test
def test_map_dict_fromkeys(a):
return dict.fromkeys(map(lambda x: x + 1, [0, 1])), a + 1
@make_test
def test_map_set(a):
return set(map(lambda x: x + 1, [0, 1])), a + 1
# test_map_sum defined earlier
@make_test
def test_map_reduce(a, b):
return functools.reduce(lambda x, y: x + y, map(lambda x: x + 1, [a, b]))
@make_test
def test_map_sorted(a):
return sorted(map(lambda x: x + 1, [0, 4, 3, 1, 2])), a + 1
@make_test
def test_map_list_extend(a, b, c):
l = [a]
l.extend(map(lambda x: x + 1, [b, c]))
return l
@make_test
def test_map_list_slice_assign(a, b, c, d, e):
l = [a, b, c]
l[1:2] = map(lambda x: x + 1, [d, e])
return l
@make_test
def test_map_deque_extendleft(a, b, c):
d = collections.deque([a])
d.extendleft(map(lambda x: x + 1, [b, c]))
return d
@make_test
def test_map_str_join(a):
return "".join(map(lambda x: x, ["a", "b", "c"])), a + 1
def test_map_with_graph_break(self):
def f(a):
a += 1
def g(x):
nonlocal a
a += 1
return x + 1
m = map(g, [1, 2, 3, 4, 5])
a += next(m) # won't graph break
torch._dynamo.graph_break()
a += next(m) # will graph break
return a
cnts = torch._dynamo.testing.CompileCounter()
opt_f = torch.compile(f, backend=cnts)
self.assertEqual(f(torch.ones(3, 3)), opt_f(torch.ones(3, 3)))
self.assertEqual(cnts.frame_count, 3)
def test_map_reconstruct(self):
def fn(a):
return map(lambda x: x[0] + x[1], zip([1, 2, 3], [1, 2, 3])), a + 1
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
m = opt_fn(torch.ones(3, 3))[0]
self.assertIsInstance(m, map)
self.assertEqual(list(m), list(fn(torch.ones(3, 3))[0]))
def test_zip_reconstruct(self):
def fn(a):
return zip([1, 2, 3], map(lambda x: x + 1, [1, 2, 3])), a + 1
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
m = opt_fn(torch.ones(3, 3))[0]
self.assertIsInstance(m, zip)
self.assertEqual(list(m), list(fn(torch.ones(3, 3))[0]))
@make_test
def test_map_partial_unpack(a, b):
y = 1
def f(x):
nonlocal y
y += 1
return x
l = list(zip([a, b], map(f, [1, 2, 3, 4]))) # noqa: F841
return a + y
@make_test
def test_map_call_function_ex(a, b):
def f(x, y):
return x + y
return f(*map(lambda x: x + 1, [a, b]))
@make_test
def test_map_unpack_twice(a, b):
m = map(lambda x: x + 1, [a, b])
l1 = list(m)
l2 = list(m)
return l1, l2
@make_test
def test_enumerate(a, b):
return list(enumerate([a, b], start=1)), a + 1
@make_test
def test_map_enumerate(a, b):
return list(enumerate(map(lambda x: x + 1, [a, b]), start=1)), a + 1
@make_test
def test_map_infinite(a, b):
return list(map(lambda x, y: x + y, [a, b], itertools.count(3)))
@make_test
def test_map_unpack_vars(a, b):
x, y = map(lambda x: x + 1, [a, b])
return x + y
@make_test
def test_map_list_extend(a):
y = [1]
def inner(z):
return z + y[-1]
y.extend(map(inner, range(3)))
return a + 1, y
@make_test
def test_map_deque_extendleft(a):
y = collections.deque([1])
def inner(z):
return z + y[0]
y.extendleft(map(inner, range(3)))
return a + 1, y
def test_unsqueeze_inplace(self):
def fn(x):
return torch.Tensor.unsqueeze_(x, dim=1) + 1
def self_fn(x):
return x.unsqueeze_(dim=1) + 1
v = torch.ones([3], device="cpu")
# identical tensor since modify inplace
v2 = torch.ones([3], device="cpu")
opt_fn = torch.compile(fn)
opt_self_fn = torch.compile(self_fn)
self.assertEqual(v, v2)
self.assertEqual(opt_fn(v), opt_self_fn(v2))
def test_enumerate_custom(self):
class MyClass:
def __iter__(self):
self.a = 1
return self
def __next__(self):
if self.a > 3:
raise StopIteration
self.a += 1
return self.a
def fn(x):
for i, it in enumerate(MyClass()):
x += i + it
return x
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
self.assertEqual(fn(torch.ones(3, 3)), opt_fn(torch.ones(3, 3)))
@unittest.skip("https://github.com/pytorch/pytorch/pull/146527 exposed a bug")
def test_enumerate_reconstruct(self):
def fn(a, b):
return enumerate([a, b], start=1)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
inps = (torch.randn(3, 3), torch.randn(3, 3))
it1 = fn(*inps)
it2 = opt_fn(*inps)
self.assertIsInstance(it2, enumerate)
self.assertEqual(list(it1), list(it2))
def test_returning_recursive_func(self):
@torch.compile(backend="eager", fullgraph=True)
def run(x):
def f():
return f
return x + 1, f
res, f = run(torch.zeros(1))
self.assertTrue(same(res, torch.ones(1)))
self.assertTrue(f is f())
def test_functools_partial_binding(self):
class Foo:
def __init__(self, x):
self.x = x
@functools.lru_cache # noqa: B019
def incr(self, val):
self.x += val
def fn(x):
f = Foo(4)
f.incr(3)
return x + f.x
x = torch.randn(2)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
self.assertEqual(fn(x), opt_fn(x))
def test_functools_cache_guard(self):
class Foo:
@functools.lru_cache # noqa: B019
def run(self, val, c=1.0):
return val * c * 2
f = Foo()
def fn(x):
return f.run(x)
x = torch.randn(2)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
self.assertEqual(fn(x), opt_fn(x))
def test_torch_get_device_module(self):
def f1():
mod1 = torch.get_device_module()
mod2 = torch.get_device_module("cpu")
mod3 = torch.get_device_module(torch.device(device_type))
return mod1, mod2, mod3
self.assertEqual(f1(), torch.compile(f1, backend="eager", fullgraph=True)())
@torch.compile(backend="eager", fullgraph=True)
def f2():
torch.get_device_module(foo="cpu")
with self.assertRaises(Unsupported):
f2()
@torch.compile(backend="eager", fullgraph=True)
def f3():
torch.get_device_module("cpu", device="cpu")
with self.assertRaises(Unsupported):
f3()
@torch.compile(backend="eager", fullgraph=True)
def f4():
torch.get_device_module("asdf")
with self.assertRaises(Unsupported):
f4()
# test for changing torch.get_device_module() (super rare case due to lru_cache)
@torch.compile(backend="eager", fullgraph=True)
def f5():
return torch.get_device_module()
f5()
new_device = (
"cpu" if torch._C._get_accelerator() == torch.device("cuda") else "cuda"
)
old_get_device_module = torch.get_device_module
def new_get_device_module(device=None):
if device:
return old_get_device_module(device)
return getattr(torch, new_device)
# NOTE: torch.get_device_module.__wrapped__ is guarded on, but not
# torch.get_device_module
with patch("torch.get_device_module", new_get_device_module):
print(torch.get_device_module())
self.assertEqual(f5(), getattr(torch, new_device))
# synchronize causes a graph break, so no fullgraph=True
@torch.compile(backend="eager")
def f6():
mod = torch.get_device_module()
mod.synchronize()
return mod
f6()
def test_torch_source(self):
global torch
g = torch.get_device_module
@torch.compile(backend="eager", fullgraph=True)
def f():
return g()
try:
old_torch = torch
torch = 1
self.assertEqual(torch, 1)
self.assertIsInstance(f(), types.ModuleType)
finally:
torch = old_torch
def udf_mul(x, y):
return x * y
def udf_mul2(x, y, z):
return x * y * z
def udf_add(x, y):
return x + y
|
GraphModule
|
python
|
huggingface__transformers
|
src/transformers/models/layoutlm/modeling_layoutlm.py
|
{
"start": 16861,
"end": 17260
}
|
class ____(PreTrainedModel):
config: LayoutLMConfig
base_model_prefix = "layoutlm"
supports_gradient_checkpointing = True
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
if isinstance(module, LayoutLMLMPredictionHead):
init.zeros_(module.bias)
@auto_docstring
|
LayoutLMPreTrainedModel
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/orm/session.py
|
{
"start": 27598,
"end": 28201
}
|
class ____(Enum):
"""indicates the origin of a :class:`.SessionTransaction`.
This enumeration is present on the
:attr:`.SessionTransaction.origin` attribute of any
:class:`.SessionTransaction` object.
.. versionadded:: 2.0
"""
AUTOBEGIN = 0
"""transaction were started by autobegin"""
BEGIN = 1
"""transaction were started by calling :meth:`_orm.Session.begin`"""
BEGIN_NESTED = 2
"""tranaction were started by :meth:`_orm.Session.begin_nested`"""
SUBTRANSACTION = 3
"""transaction is an internal "subtransaction" """
|
SessionTransactionOrigin
|
python
|
langchain-ai__langchain
|
libs/core/tests/unit_tests/test_messages.py
|
{
"start": 30235,
"end": 44769
}
|
class ____:
pass
def test_tool_message_ser_non_serializable() -> None:
bad_obj = BadObject()
message = ToolMessage("foo", artifact=bad_obj, tool_call_id="1")
ser_message = {
"lc": 1,
"type": "constructor",
"id": ["langchain", "schema", "messages", "ToolMessage"],
"kwargs": {
"content": "foo",
"type": "tool",
"tool_call_id": "1",
"artifact": {
"lc": 1,
"type": "not_implemented",
"id": ["tests", "unit_tests", "test_messages", "BadObject"],
"repr": repr(bad_obj),
},
"status": "success",
},
}
assert dumpd(message) == ser_message
with pytest.raises(NotImplementedError):
load(dumpd(ser_message))
def test_tool_message_to_dict() -> None:
message = ToolMessage("foo", artifact={"bar": {"baz": 123}}, tool_call_id="1")
expected = {
"type": "tool",
"data": {
"content": "foo",
"additional_kwargs": {},
"response_metadata": {},
"artifact": {"bar": {"baz": 123}},
"type": "tool",
"name": None,
"id": None,
"tool_call_id": "1",
"status": "success",
},
}
actual = message_to_dict(message)
assert actual == expected
def test_tool_message_repr() -> None:
message = ToolMessage("foo", artifact={"bar": {"baz": 123}}, tool_call_id="1")
expected = (
"ToolMessage(content='foo', tool_call_id='1', artifact={'bar': {'baz': 123}})"
)
actual = repr(message)
assert expected == actual
def test_tool_message_str() -> None:
message = ToolMessage("foo", artifact={"bar": {"baz": 123}}, tool_call_id="1")
expected = "content='foo' tool_call_id='1' artifact={'bar': {'baz': 123}}"
actual = str(message)
assert expected == actual
@pytest.mark.parametrize(
("first", "others", "expected"),
[
("", [""], ""),
("", [[]], [""]),
([], [""], []),
([], [[]], []),
("foo", [""], "foo"),
("foo", [[]], ["foo"]),
(["foo"], [""], ["foo"]),
(["foo"], [[]], ["foo"]),
("foo", ["bar"], "foobar"),
("foo", [["bar"]], ["foo", "bar"]),
(["foo"], ["bar"], ["foobar"]),
(["foo"], [["bar"]], ["foo", "bar"]),
(
[{"text": "foo"}],
[[{"index": 0, "text": "bar"}]],
[{"text": "foo"}, {"index": 0, "text": "bar"}],
),
],
)
def test_merge_content(first: list | str, others: list, expected: list | str) -> None:
actual = merge_content(first, *others)
assert actual == expected
def test_tool_message_content() -> None:
ToolMessage("foo", tool_call_id="1")
ToolMessage(["foo"], tool_call_id="1")
ToolMessage([{"foo": "bar"}], tool_call_id="1")
# Ignoring since we're testing that tuples get converted to lists in `coerce_args`
assert ToolMessage(("a", "b", "c"), tool_call_id="1").content == ["a", "b", "c"] # type: ignore[call-overload]
assert ToolMessage(5, tool_call_id="1").content == "5" # type: ignore[call-overload]
assert ToolMessage(5.1, tool_call_id="1").content == "5.1" # type: ignore[call-overload]
assert ToolMessage({"foo": "bar"}, tool_call_id="1").content == "{'foo': 'bar'}" # type: ignore[call-overload]
assert (
ToolMessage(Document("foo"), tool_call_id="1").content == "page_content='foo'" # type: ignore[call-overload]
)
def test_tool_message_tool_call_id() -> None:
ToolMessage("foo", tool_call_id="1")
ToolMessage("foo", tool_call_id=uuid.uuid4())
ToolMessage("foo", tool_call_id=1)
ToolMessage("foo", tool_call_id=1.0)
def test_message_text() -> None:
# partitions:
# message types: [ai], [human], [system], [tool]
# content types: [str], [list[str]], [list[dict]], [list[str | dict]]
# content: [empty], [single element], [multiple elements]
# content dict types: [text], [not text], [no type]
assert HumanMessage(content="foo").text == "foo"
assert AIMessage(content=[]).text == ""
assert AIMessage(content=["foo", "bar"]).text == "foobar"
assert (
AIMessage(
content=[
{"type": "text", "text": "<thinking>thinking...</thinking>"},
{
"type": "tool_use",
"id": "toolu_01A09q90qw90lq917835lq9",
"name": "get_weather",
"input": {"location": "San Francisco, CA"},
},
]
).text
== "<thinking>thinking...</thinking>"
)
assert (
SystemMessage(content=[{"type": "text", "text": "foo"}, "bar"]).text == "foobar"
)
assert (
ToolMessage(
content=[
{"type": "text", "text": "15 degrees"},
{
"type": "image",
"source": {
"type": "base64",
"media_type": "image/jpeg",
"data": "/9j/4AAQSkZJRg...",
},
},
],
tool_call_id="1",
).text
== "15 degrees"
)
assert (
AIMessage(content=[{"text": "hi there"}, "hi"]).text == "hi"
) # missing type: text
assert AIMessage(content=[{"type": "nottext", "text": "hi"}]).text == ""
assert AIMessage(content=[]).text == ""
assert (
AIMessage(
content="", tool_calls=[create_tool_call(name="a", args={"b": 1}, id=None)]
).text
== ""
)
def test_is_data_content_block() -> None:
# Test all DataContentBlock types with various data fields
# Image blocks
assert is_data_content_block({"type": "image", "url": "https://..."})
assert is_data_content_block(
{"type": "image", "base64": "<base64 data>", "mime_type": "image/jpeg"}
)
# Video blocks
assert is_data_content_block({"type": "video", "url": "https://video.mp4"})
assert is_data_content_block(
{"type": "video", "base64": "<base64 video>", "mime_type": "video/mp4"}
)
assert is_data_content_block({"type": "video", "file_id": "vid_123"})
# Audio blocks
assert is_data_content_block({"type": "audio", "url": "https://audio.mp3"})
assert is_data_content_block(
{"type": "audio", "base64": "<base64 audio>", "mime_type": "audio/mp3"}
)
assert is_data_content_block({"type": "audio", "file_id": "aud_123"})
# Plain text blocks
assert is_data_content_block({"type": "text-plain", "text": "document content"})
assert is_data_content_block({"type": "text-plain", "url": "https://doc.txt"})
assert is_data_content_block({"type": "text-plain", "file_id": "txt_123"})
# File blocks
assert is_data_content_block({"type": "file", "url": "https://file.pdf"})
assert is_data_content_block(
{"type": "file", "base64": "<base64 file>", "mime_type": "application/pdf"}
)
assert is_data_content_block({"type": "file", "file_id": "file_123"})
# Blocks with additional metadata (should still be valid)
assert is_data_content_block(
{
"type": "image",
"base64": "<base64 data>",
"mime_type": "image/jpeg",
"cache_control": {"type": "ephemeral"},
}
)
assert is_data_content_block(
{
"type": "image",
"base64": "<base64 data>",
"mime_type": "image/jpeg",
"metadata": {"cache_control": {"type": "ephemeral"}},
}
)
assert is_data_content_block(
{
"type": "image",
"base64": "<base64 data>",
"mime_type": "image/jpeg",
"extras": "hi",
}
)
# Invalid cases - wrong type
assert not is_data_content_block({"type": "text", "text": "foo"})
assert not is_data_content_block(
{
"type": "image_url",
"image_url": {"url": "https://..."},
} # This is OpenAI Chat Completions
)
assert not is_data_content_block({"type": "tool_call", "name": "func", "args": {}})
assert not is_data_content_block({"type": "invalid", "url": "something"})
# Invalid cases - valid type but no data or `source_type` fields
assert not is_data_content_block({"type": "image"})
assert not is_data_content_block({"type": "video", "mime_type": "video/mp4"})
assert not is_data_content_block({"type": "audio", "extras": {"key": "value"}})
# Invalid cases - valid type but wrong data field name
assert not is_data_content_block({"type": "image", "source": "<base64 data>"})
assert not is_data_content_block({"type": "video", "data": "video_data"})
# Edge cases - empty or missing values
assert not is_data_content_block({})
assert not is_data_content_block({"url": "https://..."}) # missing type
def test_convert_to_openai_image_block() -> None:
for input_block in [
{
"type": "image",
"url": "https://...",
"cache_control": {"type": "ephemeral"},
},
{
"type": "image",
"source_type": "url",
"url": "https://...",
"cache_control": {"type": "ephemeral"},
},
]:
expected = {
"type": "image_url",
"image_url": {"url": "https://..."},
}
result = convert_to_openai_image_block(input_block)
assert result == expected
for input_block in [
{
"type": "image",
"base64": "<base64 data>",
"mime_type": "image/jpeg",
"cache_control": {"type": "ephemeral"},
},
{
"type": "image",
"source_type": "base64",
"data": "<base64 data>",
"mime_type": "image/jpeg",
"cache_control": {"type": "ephemeral"},
},
]:
expected = {
"type": "image_url",
"image_url": {
"url": "data:image/jpeg;base64,<base64 data>",
},
}
result = convert_to_openai_image_block(input_block)
assert result == expected
def test_known_block_types() -> None:
expected = {
bt
for bt in get_args(ContentBlock)
for bt in get_args(bt.__annotations__["type"])
}
# Normalize any Literal[...] types in block types to their string values.
# This ensures all entries are plain strings, not Literal objects.
expected = {
t
if isinstance(t, str)
else t.__args__[0]
if hasattr(t, "__args__") and len(t.__args__) == 1
else t
for t in expected
}
assert expected == KNOWN_BLOCK_TYPES
def test_typed_init() -> None:
ai_message = AIMessage(content_blocks=[{"type": "text", "text": "Hello"}])
assert ai_message.content == [{"type": "text", "text": "Hello"}]
assert ai_message.content_blocks == ai_message.content
human_message = HumanMessage(content_blocks=[{"type": "text", "text": "Hello"}])
assert human_message.content == [{"type": "text", "text": "Hello"}]
assert human_message.content_blocks == human_message.content
system_message = SystemMessage(content_blocks=[{"type": "text", "text": "Hello"}])
assert system_message.content == [{"type": "text", "text": "Hello"}]
assert system_message.content_blocks == system_message.content
tool_message = ToolMessage(
content_blocks=[{"type": "text", "text": "Hello"}],
tool_call_id="abc123",
)
assert tool_message.content == [{"type": "text", "text": "Hello"}]
assert tool_message.content_blocks == tool_message.content
for message_class in [AIMessage, HumanMessage, SystemMessage]:
message = message_class("Hello")
assert message.content == "Hello"
assert message.content_blocks == [{"type": "text", "text": "Hello"}]
message = message_class(content="Hello")
assert message.content == "Hello"
assert message.content_blocks == [{"type": "text", "text": "Hello"}]
# Test we get type errors for malformed blocks (type checker will complain if
# below type-ignores are unused).
_ = AIMessage(content_blocks=[{"type": "text", "bad": "Hello"}]) # type: ignore[list-item]
_ = HumanMessage(content_blocks=[{"type": "text", "bad": "Hello"}]) # type: ignore[list-item]
_ = SystemMessage(content_blocks=[{"type": "text", "bad": "Hello"}]) # type: ignore[list-item]
_ = ToolMessage(
content_blocks=[{"type": "text", "bad": "Hello"}], # type: ignore[list-item]
tool_call_id="abc123",
)
def test_text_accessor() -> None:
"""Test that `message.text` property and `.text()` method return the same value."""
human_msg = HumanMessage(content="Hello world")
assert human_msg.text == "Hello world"
assert human_msg.text == "Hello world"
assert str(human_msg.text) == str(human_msg.text)
system_msg = SystemMessage(content="You are a helpful assistant")
assert system_msg.text == "You are a helpful assistant"
assert system_msg.text == "You are a helpful assistant"
assert str(system_msg.text) == str(system_msg.text)
ai_msg = AIMessage(content="I can help you with that")
assert ai_msg.text == "I can help you with that"
assert ai_msg.text == "I can help you with that"
assert str(ai_msg.text) == str(ai_msg.text)
tool_msg = ToolMessage(content="Task completed", tool_call_id="tool_1")
assert tool_msg.text == "Task completed"
assert tool_msg.text == "Task completed"
assert str(tool_msg.text) == str(tool_msg.text)
complex_msg = HumanMessage(
content=[{"type": "text", "text": "Hello "}, {"type": "text", "text": "world"}]
)
assert complex_msg.text == "Hello world"
assert complex_msg.text == "Hello world"
assert str(complex_msg.text) == str(complex_msg.text)
mixed_msg = AIMessage(
content=[
{"type": "text", "text": "The answer is "},
{"type": "tool_use", "name": "calculate", "input": {"x": 2}, "id": "1"},
{"type": "text", "text": "42"},
]
)
assert mixed_msg.text == "The answer is 42"
assert mixed_msg.text == "The answer is 42"
assert str(mixed_msg.text) == str(mixed_msg.text)
empty_msg = HumanMessage(content=[])
assert empty_msg.text == ""
assert empty_msg.text == ""
assert str(empty_msg.text) == str(empty_msg.text)
|
BadObject
|
python
|
streamlit__streamlit
|
lib/streamlit/runtime/state/query_params_proxy.py
|
{
"start": 967,
"end": 7730
}
|
class ____(MutableMapping[str, str]):
"""
A stateless singleton that proxies ``st.query_params`` interactions
to the current script thread's QueryParams instance.
"""
def __iter__(self) -> Iterator[str]:
with get_session_state().query_params() as qp:
return iter(qp)
def __len__(self) -> int:
with get_session_state().query_params() as qp:
return len(qp)
def __str__(self) -> str:
with get_session_state().query_params() as qp:
return str(qp)
@gather_metrics("query_params.get_item")
def __getitem__(self, key: str) -> str:
with get_session_state().query_params() as qp:
try:
return qp[key]
except KeyError:
raise KeyError(self.missing_key_error_message(key))
def __delitem__(self, key: str) -> None:
with get_session_state().query_params() as qp:
del qp[key]
@gather_metrics("query_params.set_item")
def __setitem__(self, key: str, value: Any) -> None:
with get_session_state().query_params() as qp:
qp[key] = value
@gather_metrics("query_params.get_attr")
def __getattr__(self, key: str) -> str:
with get_session_state().query_params() as qp:
try:
return qp[key]
except KeyError:
raise AttributeError(self.missing_attr_error_message(key))
def __delattr__(self, key: str) -> None:
with get_session_state().query_params() as qp:
try:
del qp[key]
except KeyError:
raise AttributeError(self.missing_key_error_message(key))
@overload
def update(
self, params: SupportsKeysAndGetItem[str, str | Iterable[str]], /, **kwds: str
) -> None: ...
@overload
def update(
self, params: Iterable[tuple[str, str | Iterable[str]]], /, **kwds: str
) -> None: ...
@overload
def update(self, **kwds: str | Iterable[str]) -> None: ...
def update(self, params=(), /, **kwds) -> None: # type: ignore
"""
Update one or more values in query_params at once from a dictionary or
dictionary-like object.
See `Mapping.update()` from Python's `collections` library.
Parameters
----------
other: SupportsKeysAndGetItem[str, str] | Iterable[tuple[str, str]]
A dictionary or mapping of strings to strings.
**kwds: str
Additional key/value pairs to update passed as keyword arguments.
"""
with get_session_state().query_params() as qp:
qp.update(params, **kwds)
@gather_metrics("query_params.set_attr")
def __setattr__(self, key: str, value: Any) -> None:
with get_session_state().query_params() as qp:
qp[key] = value
@gather_metrics("query_params.get_all")
def get_all(self, key: str) -> list[str]:
"""
Get a list of all query parameter values associated to a given key.
When a key is repeated as a query parameter within the URL, this method
allows all values to be obtained. In contrast, dict-like methods only
retrieve the last value when a key is repeated in the URL.
Parameters
----------
key: str
The label of the query parameter in the URL.
Returns
-------
List[str]
A list of values associated to the given key. May return zero, one,
or multiple values.
"""
with get_session_state().query_params() as qp:
return qp.get_all(key)
@gather_metrics("query_params.clear")
def clear(self) -> None:
"""
Clear all query parameters from the URL of the app.
Returns
-------
None
"""
with get_session_state().query_params() as qp:
qp.clear()
@gather_metrics("query_params.to_dict")
def to_dict(self) -> dict[str, str]:
"""
Get all query parameters as a dictionary.
This method primarily exists for internal use and is not needed for
most cases. ``st.query_params`` returns an object that inherits from
``dict`` by default.
When a key is repeated as a query parameter within the URL, this method
will return only the last value of each unique key.
Returns
-------
Dict[str,str]
A dictionary of the current query parameters in the app's URL.
"""
with get_session_state().query_params() as qp:
return qp.to_dict()
@overload
def from_dict(self, params: Iterable[tuple[str, str | Iterable[str]]]) -> None: ...
@overload
def from_dict(
self, params: SupportsKeysAndGetItem[str, str | Iterable[str]]
) -> None: ...
@gather_metrics("query_params.from_dict")
def from_dict(
self,
params: SupportsKeysAndGetItem[str, str | Iterable[str]]
| Iterable[tuple[str, str | Iterable[str]]],
) -> None:
"""
Set all of the query parameters from a dictionary or dictionary-like object.
This method primarily exists for advanced users who want to control
multiple query parameters in a single update. To set individual query
parameters, use key or attribute notation instead.
This method inherits limitations from ``st.query_params`` and can't be
used to set embedding options as described in `Embed your app \
<https://docs.streamlit.io/deploy/streamlit-community-cloud/share-your-app/embed-your-app#embed-options>`_.
To handle repeated keys, the value in a key-value pair should be a list.
.. note::
``.from_dict()`` is not a direct inverse of ``.to_dict()`` if
you are working with repeated keys. A true inverse operation is
``{key: st.query_params.get_all(key) for key in st.query_params}``.
Parameters
----------
params: dict
A dictionary used to replace the current query parameters.
Example
-------
>>> import streamlit as st
>>>
>>> st.query_params.from_dict({"foo": "bar", "baz": [1, "two"]})
"""
with get_session_state().query_params() as qp:
return qp.from_dict(params)
@staticmethod
def missing_key_error_message(key: str) -> str:
"""Returns a formatted error message for missing keys."""
return f'st.query_params has no key "{key}".'
@staticmethod
def missing_attr_error_message(key: str) -> str:
"""Returns a formatted error message for missing attributes."""
return f'st.query_params has no attribute "{key}".'
|
QueryParamsProxy
|
python
|
google__jax
|
jax/_src/mesh.py
|
{
"start": 15337,
"end": 15567
}
|
class ____:
device_kind: str
num_cores: int | None
def __repr__(self):
return (f"AbstractDevice({self._repr()})")
def _repr(self):
return f"device_kind={self.device_kind}, num_cores={self.num_cores}"
|
AbstractDevice
|
python
|
Textualize__textual
|
src/textual/containers.py
|
{
"start": 418,
"end": 655
}
|
class ____(Widget):
"""Simple container widget, with vertical layout."""
DEFAULT_CSS = """
Container {
width: 1fr;
height: 1fr;
layout: vertical;
overflow: hidden hidden;
}
"""
|
Container
|
python
|
huggingface__transformers
|
src/transformers/pipelines/depth_estimation.py
|
{
"start": 547,
"end": 6115
}
|
class ____(Pipeline):
"""
Depth estimation pipeline using any `AutoModelForDepthEstimation`. This pipeline predicts the depth of an image.
Example:
```python
>>> from transformers import pipeline
>>> depth_estimator = pipeline(task="depth-estimation", model="LiheYoung/depth-anything-base-hf")
>>> output = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg")
>>> # This is a tensor with the values being the depth expressed in meters for each pixel
>>> output["predicted_depth"].shape
torch.Size([1, 384, 384])
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This depth estimation pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"depth-estimation"`.
See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=depth-estimation).
"""
_load_processor = False
_load_image_processor = True
_load_feature_extractor = False
_load_tokenizer = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
requires_backends(self, "vision")
self.check_model_type(MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES)
@overload
def __call__(self, inputs: Union[str, "Image.Image"], **kwargs: Any) -> dict[str, Any]: ...
@overload
def __call__(self, inputs: list[Union[str, "Image.Image"]], **kwargs: Any) -> list[dict[str, Any]]: ...
def __call__(
self, inputs: Union[str, list[str], "Image.Image", list["Image.Image"]], **kwargs: Any
) -> dict[str, Any] | list[dict[str, Any]]:
"""
Predict the depth(s) of the image(s) passed as inputs.
Args:
inputs (`str`, `list[str]`, `PIL.Image` or `list[PIL.Image]`):
The pipeline handles three types of images:
- A string containing a http link pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
The pipeline accepts either a single image or a batch of images, which must then be passed as a string.
Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL
images.
parameters (`Dict`, *optional*):
A dictionary of argument names to parameter values, to control pipeline behaviour.
The only parameter available right now is `timeout`, which is the length of time, in seconds,
that the pipeline should wait before giving up on trying to download an image.
timeout (`float`, *optional*, defaults to None):
The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
the call may block forever.
Return:
A dictionary or a list of dictionaries containing result. If the input is a single image, will return a
dictionary, if the input is a list of several images, will return a list of dictionaries corresponding to
the images.
The dictionaries contain the following keys:
- **predicted_depth** (`torch.Tensor`) -- The predicted depth by the model as a `torch.Tensor`.
- **depth** (`PIL.Image`) -- The predicted depth by the model as a `PIL.Image`.
"""
# After deprecation of this is completed, remove the default `None` value for `images`
if "images" in kwargs:
inputs = kwargs.pop("images")
if inputs is None:
raise ValueError("Cannot call the depth-estimation pipeline without an inputs argument!")
return super().__call__(inputs, **kwargs)
def _sanitize_parameters(self, timeout=None, parameters=None, **kwargs):
preprocess_params = {}
if timeout is not None:
preprocess_params["timeout"] = timeout
if isinstance(parameters, dict) and "timeout" in parameters:
preprocess_params["timeout"] = parameters["timeout"]
return preprocess_params, {}, {}
def preprocess(self, image, timeout=None):
image = load_image(image, timeout)
model_inputs = self.image_processor(images=image, return_tensors="pt")
model_inputs = model_inputs.to(self.dtype)
model_inputs["target_size"] = image.size[::-1]
return model_inputs
def _forward(self, model_inputs):
target_size = model_inputs.pop("target_size")
model_outputs = self.model(**model_inputs)
model_outputs["target_size"] = target_size
return model_outputs
def postprocess(self, model_outputs):
outputs = self.image_processor.post_process_depth_estimation(
model_outputs,
# this acts as `source_sizes` for ZoeDepth and as `target_sizes` for the rest of the models so do *not*
# replace with `target_sizes = [model_outputs["target_size"]]`
[model_outputs["target_size"]],
)
formatted_outputs = []
for output in outputs:
depth = output["predicted_depth"].detach().cpu().numpy()
depth = (depth - depth.min()) / (depth.max() - depth.min())
depth = Image.fromarray((depth * 255).astype("uint8"))
formatted_outputs.append({"predicted_depth": output["predicted_depth"], "depth": depth})
return formatted_outputs[0] if len(outputs) == 1 else formatted_outputs
|
DepthEstimationPipeline
|
python
|
tensorflow__tensorflow
|
tensorflow/python/trackable/data_structures.py
|
{
"start": 5671,
"end": 6083
}
|
class ____(ValueError):
def __init__(self, value): # pylint: disable=super-init-not-called
self._value = value
def __str__(self):
return ("Only trackable objects (such as Layers or Optimizers) may be "
f"stored in a List object. Got {self._value}, which does not "
"inherit from Trackable.")
@tf_export("__internal__.tracking.TrackableDataStructure", v1=[])
|
_UntrackableError
|
python
|
scipy__scipy
|
scipy/fft/_pocketfft/tests/test_basic.py
|
{
"start": 4272,
"end": 4399
}
|
class ____(_TestFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
|
TestSingleFFT
|
python
|
wandb__wandb
|
wandb/vendor/graphql-core-1.1/wandb_graphql/language/ast.py
|
{
"start": 17195,
"end": 18094
}
|
class ____(Node):
__slots__ = ('loc', 'name', 'arguments',)
_fields = ('name', 'arguments',)
def __init__(self, name, arguments=None, loc=None):
self.loc = loc
self.name = name
self.arguments = arguments
def __eq__(self, other):
return (
self is other or (
isinstance(other, Directive) and
# self.loc == other.loc and
self.name == other.name and
self.arguments == other.arguments
)
)
def __repr__(self):
return ('Directive('
'name={self.name!r}'
', arguments={self.arguments!r}'
')').format(self=self)
def __copy__(self):
return type(self)(
self.name,
self.arguments,
self.loc
)
def __hash__(self):
return id(self)
|
Directive
|
python
|
pypa__pip
|
src/pip/_vendor/packaging/specifiers.py
|
{
"start": 878,
"end": 1186
}
|
class ____(ValueError):
"""
Raised when attempting to create a :class:`Specifier` with a specifier
string that is invalid.
>>> Specifier("lolwat")
Traceback (most recent call last):
...
packaging.specifiers.InvalidSpecifier: Invalid specifier: 'lolwat'
"""
|
InvalidSpecifier
|
python
|
Pylons__pyramid
|
tests/test_events.py
|
{
"start": 5622,
"end": 8822
}
|
class ____(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def _makeOne(self, *ifaces, **predicates):
from pyramid.events import subscriber
return subscriber(*ifaces, **predicates)
def test_register_single(self):
from zope.interface import Interface
class IFoo(Interface):
pass
class IBar(Interface):
pass
dec = self._makeOne(IFoo)
def foo(): # pragma: no cover
pass
config = DummyConfigurator()
scanner = Dummy()
scanner.config = config
dec.register(scanner, None, foo)
self.assertEqual(config.subscribed, [(foo, IFoo)])
def test_register_multi(self):
from zope.interface import Interface
class IFoo(Interface):
pass
class IBar(Interface):
pass
dec = self._makeOne(IFoo, IBar)
def foo(): # pragma: no cover
pass
config = DummyConfigurator()
scanner = Dummy()
scanner.config = config
dec.register(scanner, None, foo)
self.assertEqual(config.subscribed, [(foo, IFoo), (foo, IBar)])
def test_register_none_means_all(self):
from zope.interface import Interface
dec = self._makeOne()
def foo(): # pragma: no cover
pass
config = DummyConfigurator()
scanner = Dummy()
scanner.config = config
dec.register(scanner, None, foo)
self.assertEqual(config.subscribed, [(foo, Interface)])
def test_register_objectevent(self):
from zope.interface import Interface
class IFoo(Interface):
pass
class IBar(Interface):
pass
dec = self._makeOne([IFoo, IBar])
def foo(): # pragma: no cover
pass
config = DummyConfigurator()
scanner = Dummy()
scanner.config = config
dec.register(scanner, None, foo)
self.assertEqual(config.subscribed, [(foo, [IFoo, IBar])])
def test___call__(self):
dec = self._makeOne()
dummy_venusian = DummyVenusian()
dec.venusian = dummy_venusian
def foo(): # pragma: no cover
pass
dec(foo)
self.assertEqual(
dummy_venusian.attached, [(foo, dec.register, 'pyramid', 1)]
)
def test___call___with_venusian_args(self):
dec = self._makeOne(_category='foo', _depth=1)
dummy_venusian = DummyVenusian()
dec.venusian = dummy_venusian
def foo(): # pragma: no cover
pass
dec(foo)
self.assertEqual(
dummy_venusian.attached, [(foo, dec.register, 'foo', 2)]
)
def test_regsister_with_predicates(self):
from zope.interface import Interface
dec = self._makeOne(a=1)
def foo(): # pragma: no cover
pass
config = DummyConfigurator()
scanner = Dummy()
scanner.config = config
dec.register(scanner, None, foo)
self.assertEqual(config.subscribed, [(foo, Interface, {'a': 1})])
|
TestSubscriber
|
python
|
pallets__click
|
src/click/exceptions.py
|
{
"start": 4484,
"end": 6944
}
|
class ____(BadParameter):
"""Raised if click required an option or argument but it was not
provided when invoking the script.
.. versionadded:: 4.0
:param param_type: a string that indicates the type of the parameter.
The default is to inherit the parameter type from
the given `param`. Valid values are ``'parameter'``,
``'option'`` or ``'argument'``.
"""
def __init__(
self,
message: str | None = None,
ctx: Context | None = None,
param: Parameter | None = None,
param_hint: cabc.Sequence[str] | str | None = None,
param_type: str | None = None,
) -> None:
super().__init__(message or "", ctx, param, param_hint)
self.param_type = param_type
def format_message(self) -> str:
if self.param_hint is not None:
param_hint: cabc.Sequence[str] | str | None = self.param_hint
elif self.param is not None:
param_hint = self.param.get_error_hint(self.ctx) # type: ignore
else:
param_hint = None
param_hint = _join_param_hints(param_hint)
param_hint = f" {param_hint}" if param_hint else ""
param_type = self.param_type
if param_type is None and self.param is not None:
param_type = self.param.param_type_name
msg = self.message
if self.param is not None:
msg_extra = self.param.type.get_missing_message(
param=self.param, ctx=self.ctx
)
if msg_extra:
if msg:
msg += f". {msg_extra}"
else:
msg = msg_extra
msg = f" {msg}" if msg else ""
# Translate param_type for known types.
if param_type == "argument":
missing = _("Missing argument")
elif param_type == "option":
missing = _("Missing option")
elif param_type == "parameter":
missing = _("Missing parameter")
else:
missing = _("Missing {param_type}").format(param_type=param_type)
return f"{missing}{param_hint}.{msg}"
def __str__(self) -> str:
if not self.message:
param_name = self.param.name if self.param else None
return _("Missing parameter: {param_name}").format(param_name=param_name)
else:
return self.message
|
MissingParameter
|
python
|
django__django
|
tests/sitemaps_tests/urls/http.py
|
{
"start": 679,
"end": 855
}
|
class ____(Sitemap):
changefreq = "never"
priority = 0.5
i18n = True
def items(self):
return I18nTestModel.objects.order_by("pk").all()
|
SimpleI18nSitemap
|
python
|
huggingface__transformers
|
src/transformers/models/levit/image_processing_levit.py
|
{
"start": 1457,
"end": 16477
}
|
class ____(BaseImageProcessor):
r"""
Constructs a LeViT image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Wwhether to resize the shortest edge of the input to int(256/224 *`size`). Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`dict[str, int]`, *optional*, defaults to `{"shortest_edge": 224}`):
Size of the output image after resizing. If size is a dict with keys "width" and "height", the image will
be resized to `(size["height"], size["width"])`. If size is a dict with key "shortest_edge", the shortest
edge value `c` is rescaled to `int(c * (256/224))`. The smaller edge of the image will be matched to this
value i.e, if height > width, then image will be rescaled to `(size["shortest_edge"] * height / width,
size["shortest_edge"])`. Can be overridden by the `size` parameter in the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
`preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether or not to center crop the input to `(crop_size["height"], crop_size["width"])`. Can be overridden
by the `do_center_crop` parameter in the `preprocess` method.
crop_size (`Dict`, *optional*, defaults to `{"height": 224, "width": 224}`):
Desired image size after `center_crop`. Can be overridden by the `crop_size` parameter in the `preprocess`
method.
do_rescale (`bool`, *optional*, defaults to `True`):
Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
`do_rescale` parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the
`preprocess` method.
image_mean (`list[int]`, *optional*, defaults to `[0.485, 0.456, 0.406]`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`list[int]`, *optional*, defaults to `[0.229, 0.224, 0.225]`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_center_crop: bool = True,
crop_size: Optional[dict[str, int]] = None,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN,
image_std: Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"shortest_edge": 224}
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
crop_size = get_size_dict(crop_size, param_name="crop_size")
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image.
If size is a dict with keys "width" and "height", the image will be resized to `(size["height"],
size["width"])`.
If size is a dict with key "shortest_edge", the shortest edge value `c` is rescaled to `int(c * (256/224))`.
The smaller edge of the image will be matched to this value i.e, if height > width, then image will be rescaled
to `(size["shortest_edge"] * height / width, size["shortest_edge"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image after resizing. If size is a dict with keys "width" and "height", the image
will be resized to (height, width). If size is a dict with key "shortest_edge", the shortest edge value
`c` is rescaled to int(`c` * (256/224)). The smaller edge of the image will be matched to this value
i.e, if height > width, then image will be rescaled to (size * height / width, size).
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
size_dict = get_size_dict(size, default_to_square=False)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
shortest_edge = int((256 / 224) * size["shortest_edge"])
output_size = get_resize_output_image_size(
image, size=shortest_edge, default_to_square=False, input_data_format=input_data_format
)
size_dict = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}"
)
return resize(
image,
size=(size_dict["height"], size_dict["width"]),
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_center_crop: Optional[bool] = None,
crop_size: Optional[dict[str, int]] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, Iterable[float]]] = None,
image_std: Optional[Union[float, Iterable[float]]] = None,
return_tensors: Optional[TensorType] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> BatchFeature:
"""
Preprocess an image or batch of images to be used as input to a LeViT model.
Args:
images (`ImageInput`):
Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging
from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the output image after resizing. If size is a dict with keys "width" and "height", the image
will be resized to (height, width). If size is a dict with key "shortest_edge", the shortest edge value
`c` is rescaled to int(`c` * (256/224)). The smaller edge of the image will be matched to this value
i.e, if height > width, then image will be rescaled to (size * height / width, size).
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the output image after center cropping. Crops images to (crop_size["height"],
crop_size["width"]).
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image pixel values by `rescaling_factor` - typical to values between 0 and 1.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Factor to rescale the image pixel values by.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image pixel values by `image_mean` and `image_std`.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Mean to normalize the image pixel values by.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Standard deviation to normalize the image pixel values by.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`str` or `ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
resample = resample if resample is not None else self.resample
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, param_name="crop_size")
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_resize=do_resize,
size=size,
resample=resample,
)
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
if do_resize:
images = [self.resize(image, size, resample, input_data_format=input_data_format) for image in images]
if do_center_crop:
images = [self.center_crop(image, crop_size, input_data_format=input_data_format) for image in images]
if do_rescale:
images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images]
if do_normalize:
images = [
self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images
]
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
]
data = {"pixel_values": images}
return BatchFeature(data=data, tensor_type=return_tensors)
__all__ = ["LevitImageProcessor"]
|
LevitImageProcessor
|
python
|
doocs__leetcode
|
solution/2700-2799/2760.Longest Even Odd Subarray With Threshold/Solution.py
|
{
"start": 0,
"end": 412
}
|
class ____:
def longestAlternatingSubarray(self, nums: List[int], threshold: int) -> int:
ans, n = 0, len(nums)
for l in range(n):
if nums[l] % 2 == 0 and nums[l] <= threshold:
r = l + 1
while r < n and nums[r] % 2 != nums[r - 1] % 2 and nums[r] <= threshold:
r += 1
ans = max(ans, r - l)
return ans
|
Solution
|
python
|
spack__spack
|
lib/spack/spack/error.py
|
{
"start": 5476,
"end": 5567
}
|
class ____(SpackError):
"""Superclass for all Spack config related errors."""
|
ConfigError
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/chat_engine/condense_plus_context.py
|
{
"start": 2784,
"end": 16522
}
|
class ____(BaseChatEngine):
"""
Condensed Conversation & Context Chat Engine.
First condense a conversation and latest user message to a standalone question
Then build a context for the standalone question from a retriever,
Then pass the context along with prompt and user message to LLM to generate a response.
"""
def __init__(
self,
retriever: BaseRetriever,
llm: LLM,
memory: BaseMemory,
context_prompt: Optional[Union[str, PromptTemplate]] = None,
context_refine_prompt: Optional[Union[str, PromptTemplate]] = None,
condense_prompt: Optional[Union[str, PromptTemplate]] = None,
system_prompt: Optional[str] = None,
skip_condense: bool = False,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
callback_manager: Optional[CallbackManager] = None,
verbose: bool = False,
):
self._retriever = retriever
self._llm = llm
self._memory = memory
context_prompt = context_prompt or DEFAULT_CONTEXT_PROMPT_TEMPLATE
if isinstance(context_prompt, str):
context_prompt = PromptTemplate(context_prompt)
self._context_prompt_template = context_prompt
context_refine_prompt = (
context_refine_prompt or DEFAULT_CONTEXT_REFINE_PROMPT_TEMPLATE
)
if isinstance(context_refine_prompt, str):
context_refine_prompt = PromptTemplate(context_refine_prompt)
self._context_refine_prompt_template = context_refine_prompt
condense_prompt = condense_prompt or DEFAULT_CONDENSE_PROMPT_TEMPLATE
if isinstance(condense_prompt, str):
condense_prompt = PromptTemplate(condense_prompt)
self._condense_prompt_template = condense_prompt
self._system_prompt = system_prompt
self._skip_condense = skip_condense
self._node_postprocessors = node_postprocessors or []
self.callback_manager = callback_manager or CallbackManager([])
for node_postprocessor in self._node_postprocessors:
node_postprocessor.callback_manager = self.callback_manager
self._token_counter = TokenCounter()
self._verbose = verbose
@classmethod
def from_defaults(
cls,
retriever: BaseRetriever,
llm: Optional[LLM] = None,
chat_history: Optional[List[ChatMessage]] = None,
memory: Optional[BaseMemory] = None,
system_prompt: Optional[str] = None,
context_prompt: Optional[Union[str, PromptTemplate]] = None,
context_refine_prompt: Optional[Union[str, PromptTemplate]] = None,
condense_prompt: Optional[Union[str, PromptTemplate]] = None,
skip_condense: bool = False,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
verbose: bool = False,
**kwargs: Any,
) -> "CondensePlusContextChatEngine":
"""Initialize a CondensePlusContextChatEngine from default parameters."""
llm = llm or Settings.llm
chat_history = chat_history or []
memory = memory or ChatMemoryBuffer.from_defaults(
chat_history=chat_history, token_limit=llm.metadata.context_window - 256
)
return cls(
retriever=retriever,
llm=llm,
memory=memory,
context_prompt=context_prompt,
context_refine_prompt=context_refine_prompt,
condense_prompt=condense_prompt,
skip_condense=skip_condense,
callback_manager=Settings.callback_manager,
node_postprocessors=node_postprocessors,
system_prompt=system_prompt,
verbose=verbose,
)
def _condense_question(
self, chat_history: List[ChatMessage], latest_message: str
) -> str:
"""Condense a conversation history and latest user message to a standalone question."""
if self._skip_condense or len(chat_history) == 0:
return latest_message
chat_history_str = messages_to_history_str(chat_history)
logger.debug(chat_history_str)
llm_input = self._condense_prompt_template.format(
chat_history=chat_history_str, question=latest_message
)
return str(self._llm.complete(llm_input))
async def _acondense_question(
self, chat_history: List[ChatMessage], latest_message: str
) -> str:
"""Condense a conversation history and latest user message to a standalone question."""
if self._skip_condense or len(chat_history) == 0:
return latest_message
chat_history_str = messages_to_history_str(chat_history)
logger.debug(chat_history_str)
llm_input = self._condense_prompt_template.format(
chat_history=chat_history_str, question=latest_message
)
return str(await self._llm.acomplete(llm_input))
def _get_nodes(self, message: str) -> List[NodeWithScore]:
"""Generate context information from a message."""
nodes = self._retriever.retrieve(message)
for postprocessor in self._node_postprocessors:
nodes = postprocessor.postprocess_nodes(
nodes, query_bundle=QueryBundle(message)
)
return nodes
async def _aget_nodes(self, message: str) -> List[NodeWithScore]:
"""Generate context information from a message."""
nodes = await self._retriever.aretrieve(message)
for postprocessor in self._node_postprocessors:
nodes = postprocessor.postprocess_nodes(
nodes, query_bundle=QueryBundle(message)
)
return nodes
def _get_response_synthesizer(
self, chat_history: List[ChatMessage], streaming: bool = False
) -> CompactAndRefine:
system_prompt = self._system_prompt or ""
qa_messages = get_prefix_messages_with_context(
self._context_prompt_template,
system_prompt,
[],
chat_history,
self._llm.metadata.system_role,
)
refine_messages = get_prefix_messages_with_context(
self._context_refine_prompt_template,
system_prompt,
[],
chat_history,
self._llm.metadata.system_role,
)
return get_response_synthesizer(
self._llm,
self.callback_manager,
qa_messages,
refine_messages,
streaming,
qa_function_mappings=self._context_prompt_template.function_mappings,
refine_function_mappings=self._context_refine_prompt_template.function_mappings,
)
def _run_c3(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
streaming: bool = False,
) -> Tuple[CompactAndRefine, ToolOutput, List[NodeWithScore]]:
if chat_history is not None:
self._memory.set(chat_history)
chat_history = self._memory.get(input=message)
# Condense conversation history and latest message to a standalone question
condensed_question = self._condense_question(chat_history, message) # type: ignore
logger.info(f"Condensed question: {condensed_question}")
if self._verbose:
print(f"Condensed question: {condensed_question}")
# get the context nodes using the condensed question
context_nodes = self._get_nodes(condensed_question)
context_source = ToolOutput(
tool_name="retriever",
content=str(context_nodes),
raw_input={"message": condensed_question},
raw_output=context_nodes,
)
# build the response synthesizer
response_synthesizer = self._get_response_synthesizer(
chat_history, streaming=streaming
)
return response_synthesizer, context_source, context_nodes
async def _arun_c3(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
streaming: bool = False,
) -> Tuple[CompactAndRefine, ToolOutput, List[NodeWithScore]]:
if chat_history is not None:
await self._memory.aset(chat_history)
chat_history = await self._memory.aget(input=message)
# Condense conversation history and latest message to a standalone question
condensed_question = await self._acondense_question(chat_history, message) # type: ignore
logger.info(f"Condensed question: {condensed_question}")
if self._verbose:
print(f"Condensed question: {condensed_question}")
# get the context nodes using the condensed question
context_nodes = await self._aget_nodes(condensed_question)
context_source = ToolOutput(
tool_name="retriever",
content=str(context_nodes),
raw_input={"message": condensed_question},
raw_output=context_nodes,
)
# build the response synthesizer
response_synthesizer = self._get_response_synthesizer(
chat_history, streaming=streaming
)
return response_synthesizer, context_source, context_nodes
@trace_method("chat")
def chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> AgentChatResponse:
synthesizer, context_source, context_nodes = self._run_c3(message, chat_history)
response = synthesizer.synthesize(message, context_nodes)
user_message = ChatMessage(content=message, role=MessageRole.USER)
assistant_message = ChatMessage(
content=str(response), role=MessageRole.ASSISTANT
)
self._memory.put(user_message)
self._memory.put(assistant_message)
return AgentChatResponse(
response=str(response),
sources=[context_source],
source_nodes=context_nodes,
)
@trace_method("chat")
def stream_chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> StreamingAgentChatResponse:
synthesizer, context_source, context_nodes = self._run_c3(
message, chat_history, streaming=True
)
response = synthesizer.synthesize(message, context_nodes)
assert isinstance(response, StreamingResponse)
def wrapped_gen(response: StreamingResponse) -> ChatResponseGen:
full_response = ""
for token in response.response_gen:
full_response += token
yield ChatResponse(
message=ChatMessage(
content=full_response, role=MessageRole.ASSISTANT
),
delta=token,
)
user_message = ChatMessage(content=message, role=MessageRole.USER)
assistant_message = ChatMessage(
content=full_response, role=MessageRole.ASSISTANT
)
self._memory.put(user_message)
self._memory.put(assistant_message)
return StreamingAgentChatResponse(
chat_stream=wrapped_gen(response),
sources=[context_source],
source_nodes=context_nodes,
is_writing_to_memory=False,
)
@trace_method("chat")
async def achat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> AgentChatResponse:
synthesizer, context_source, context_nodes = await self._arun_c3(
message, chat_history
)
response = await synthesizer.asynthesize(message, context_nodes)
user_message = ChatMessage(content=message, role=MessageRole.USER)
assistant_message = ChatMessage(
content=str(response), role=MessageRole.ASSISTANT
)
await self._memory.aput(user_message)
await self._memory.aput(assistant_message)
return AgentChatResponse(
response=str(response),
sources=[context_source],
source_nodes=context_nodes,
)
@trace_method("chat")
async def astream_chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> StreamingAgentChatResponse:
synthesizer, context_source, context_nodes = await self._arun_c3(
message, chat_history, streaming=True
)
response = await synthesizer.asynthesize(message, context_nodes)
assert isinstance(response, AsyncStreamingResponse)
async def wrapped_gen(response: AsyncStreamingResponse) -> ChatResponseAsyncGen:
full_response = ""
async for token in response.async_response_gen():
full_response += token
yield ChatResponse(
message=ChatMessage(
content=full_response, role=MessageRole.ASSISTANT
),
delta=token,
)
user_message = ChatMessage(content=message, role=MessageRole.USER)
assistant_message = ChatMessage(
content=full_response, role=MessageRole.ASSISTANT
)
await self._memory.aput(user_message)
await self._memory.aput(assistant_message)
return StreamingAgentChatResponse(
achat_stream=wrapped_gen(response),
sources=[context_source],
source_nodes=context_nodes,
is_writing_to_memory=False,
)
def reset(self) -> None:
# Clear chat history
self._memory.reset()
@property
def chat_history(self) -> List[ChatMessage]:
"""Get chat history."""
return self._memory.get_all()
|
CondensePlusContextChatEngine
|
python
|
huggingface__transformers
|
src/transformers/models/apertus/modeling_apertus.py
|
{
"start": 15345,
"end": 18484
}
|
class ____(ApertusPreTrainedModel):
def __init__(self, config: ApertusConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[ApertusDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = ApertusRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = ApertusRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_embeddings=position_embeddings,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring
|
ApertusModel
|
python
|
Textualize__textual
|
docs/examples/guide/styles/outline01.py
|
{
"start": 401,
"end": 773
}
|
class ____(App):
def compose(self) -> ComposeResult:
self.widget = Static(TEXT)
yield self.widget
def on_mount(self) -> None:
self.widget.styles.background = "darkblue"
self.widget.styles.width = "50%"
self.widget.styles.outline = ("heavy", "yellow")
if __name__ == "__main__":
app = OutlineApp()
app.run()
|
OutlineApp
|
python
|
huggingface__transformers
|
src/transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py
|
{
"start": 38277,
"end": 38532
}
|
class ____(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"padding": False,
"return_mm_token_type_ids": False,
},
"videos_kwargs": {"return_metadata": True},
}
|
Qwen2_5_VLProcessorKwargs
|
python
|
walkccc__LeetCode
|
solutions/216. Combination Sum III/216.py
|
{
"start": 0,
"end": 377
}
|
class ____:
def combinationSum3(self, k: int, n: int) -> list[list[int]]:
ans = []
def dfs(k: int, n: int, s: int, path: list[int]) -> None:
if k == 0 and n == 0:
ans.append(path)
return
if k == 0 or n < 0:
return
for i in range(s, 10):
dfs(k - 1, n - i, i + 1, path + [i])
dfs(k, n, 1, [])
return ans
|
Solution
|
python
|
google__jax
|
tests/tree_util_test.py
|
{
"start": 3603,
"end": 4548
}
|
class ____:
def __init__(self, structured, *, leaves=None, treedef=None):
if treedef is None:
leaves, treedef = tree_util.tree_flatten(structured)
self._structured = structured
self.treedef = treedef
self.leaves = leaves
def __hash__(self):
return hash(self.structured)
def __eq__(self, other):
return self.structured == other.structured
def __repr__(self):
return f"FlatCache({self.structured!r})"
@property
def structured(self):
if self._structured is None:
self._structured = tree_util.tree_unflatten(self.treedef, self.leaves)
return self._structured
def tree_flatten(self):
return self.leaves, self.treedef
@classmethod
def tree_unflatten(cls, meta, data):
if not tree_util.all_leaves(data):
data, meta = tree_util.tree_flatten(tree_util.tree_unflatten(meta, data))
return FlatCache(None, leaves=data, treedef=meta)
@tree_util.register_static
|
FlatCache
|
python
|
huggingface__transformers
|
src/transformers/models/mlcd/modeling_mlcd.py
|
{
"start": 17451,
"end": 19777
}
|
class ____(PreTrainedModel):
config: MLCDVisionConfig
base_model_prefix = "mlcd"
supports_gradient_checkpointing = True
accepts_loss_kwargs = False
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": MLCDEncoderLayer,
"attentions": MLCDAttention,
}
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_factor
if isinstance(module, MLCDVisionEmbeddings):
factor = self.config.initializer_factor
init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
elif isinstance(module, MLCDAttention):
factor = self.config.initializer_factor
in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
out_proj_std = (module.embed_dim**-0.5) * factor
init.normal_(module.q_proj.weight, std=in_proj_std)
init.normal_(module.k_proj.weight, std=in_proj_std)
init.normal_(module.v_proj.weight, std=in_proj_std)
init.normal_(module.out_proj.weight, std=out_proj_std)
elif isinstance(module, MLCDMLP):
factor = self.config.initializer_factor
in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
init.normal_(module.fc1.weight, std=fc_std)
init.normal_(module.fc2.weight, std=in_proj_std)
elif isinstance(module, MLCDVisionTransformer):
factor = self.config.initializer_factor
pos_emb_std = (module.config.hidden_size // module.config.num_attention_heads // 2) ** -0.5 * factor
init.normal_(module.class_pos_emb, mean=0.0, std=pos_emb_std)
elif isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
elif isinstance(module, nn.Linear) and module.bias is not None:
init.zeros_(module.bias)
|
MLCDPreTrainedModel
|
python
|
huggingface__transformers
|
src/transformers/models/cvt/modeling_cvt.py
|
{
"start": 11932,
"end": 13970
}
|
class ____(nn.Module):
"""
CvtLayer composed by attention layers, normalization and multi-layer perceptrons (mlps).
"""
def __init__(
self,
num_heads,
embed_dim,
kernel_size,
padding_q,
padding_kv,
stride_q,
stride_kv,
qkv_projection_method,
qkv_bias,
attention_drop_rate,
drop_rate,
mlp_ratio,
drop_path_rate,
with_cls_token=True,
):
super().__init__()
self.attention = CvtAttention(
num_heads,
embed_dim,
kernel_size,
padding_q,
padding_kv,
stride_q,
stride_kv,
qkv_projection_method,
qkv_bias,
attention_drop_rate,
drop_rate,
with_cls_token,
)
self.intermediate = CvtIntermediate(embed_dim, mlp_ratio)
self.output = CvtOutput(embed_dim, mlp_ratio, drop_rate)
self.drop_path = CvtDropPath(drop_prob=drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
self.layernorm_before = nn.LayerNorm(embed_dim)
self.layernorm_after = nn.LayerNorm(embed_dim)
def forward(self, hidden_state, height, width):
self_attention_output = self.attention(
self.layernorm_before(hidden_state), # in Cvt, layernorm is applied before self-attention
height,
width,
)
attention_output = self_attention_output
attention_output = self.drop_path(attention_output)
# first residual connection
hidden_state = attention_output + hidden_state
# in Cvt, layernorm is also applied after self-attention
layer_output = self.layernorm_after(hidden_state)
layer_output = self.intermediate(layer_output)
# second residual connection is done here
layer_output = self.output(layer_output, hidden_state)
layer_output = self.drop_path(layer_output)
return layer_output
|
CvtLayer
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1007384,
"end": 1007858
}
|
class ____(sgqlc.types.Type):
"""Autogenerated return type of UnarchiveProjectV2Item"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "item")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
item = sgqlc.types.Field("ProjectV2Item", graphql_name="item")
"""The item unarchived from the project."""
|
UnarchiveProjectV2ItemPayload
|
python
|
huggingface__transformers
|
tests/models/hubert/test_modeling_hubert.py
|
{
"start": 11516,
"end": 16372
}
|
class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (HubertForCTC, HubertForSequenceClassification, HubertModel) if is_torch_available() else ()
pipeline_model_mapping = (
{
"audio-classification": HubertForSequenceClassification,
"automatic-speech-recognition": HubertForCTC,
"feature-extraction": HubertModel,
}
if is_torch_available()
else {}
)
def setUp(self):
self.model_tester = HubertModelTester(self)
self.config_tester = ConfigTester(self, config_class=HubertConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_ctc_loss_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_ctc_loss(*config_and_inputs)
def test_seq_classifier_loss_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_seq_classifier_loss(*config_and_inputs)
def test_ctc_train(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_ctc_training(*config_and_inputs)
def test_seq_classifier_train(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_seq_classifier_training(*config_and_inputs)
def test_labels_out_of_vocab(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_labels_out_of_vocab(*config_and_inputs)
@unittest.skip(reason="Hubert has no inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Hubert has no inputs_embeds")
def test_forward_signature(self):
pass
# Hubert cannot resize token embeddings
# since it has no tokens embeddings
@unittest.skip(reason="Hubert has no tokens embeddings")
def test_resize_tokens_embeddings(self):
pass
@unittest.skip(reason="Hubert has no inputs_embeds")
def test_model_get_set_embeddings(self):
pass
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# force eager attention to support output attentions
config._attn_implementation = "eager"
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
# set layer drop to 0
model.config.layerdrop = 0.0
input_values = inputs_dict["input_values"]
input_lengths = torch.tensor(
[input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device
)
output_lengths = model._get_feat_extract_output_lengths(input_lengths)
labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size)
inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"])
inputs_dict["labels"] = labels
outputs = model(**inputs_dict)
output = outputs[0]
# Encoder-/Decoder-only models
hidden_states = outputs.hidden_states[0]
attentions = outputs.attentions[0]
hidden_states.retain_grad()
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(hidden_states.grad)
self.assertIsNotNone(attentions.grad)
# overwrite from test_modeling_common
def _mock_init_weights(self, module):
if hasattr(module, "weight") and module.weight is not None:
module.weight.fill_(3)
if hasattr(module, "weight_g") and module.weight_g is not None:
module.weight_g.data.fill_(3)
if hasattr(module, "weight_v") and module.weight_v is not None:
module.weight_v.data.fill_(3)
if hasattr(module, "bias") and module.bias is not None:
module.bias.fill_(3)
if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None:
module.masked_spec_embed.data.fill_(3)
@unittest.skip(reason="Feed forward chunking is not implemented")
def test_feed_forward_chunking(self):
pass
@slow
def test_model_from_pretrained(self):
model = HubertModel.from_pretrained("facebook/hubert-base-ls960")
self.assertIsNotNone(model)
@require_torch
|
HubertModelTest
|
python
|
pydata__xarray
|
asv_bench/benchmarks/indexing.py
|
{
"start": 4253,
"end": 5081
}
|
class ____(Base):
@parameterized(["key"], [list(basic_indexes.keys())])
def time_assignment_basic(self, key):
ind = basic_indexes[key]
val = basic_assignment_values[key]
self.ds["var1"][ind.get("x", slice(None)), ind.get("y", slice(None))] = val
@parameterized(["key"], [list(outer_indexes.keys())])
def time_assignment_outer(self, key):
ind = outer_indexes[key]
val = outer_assignment_values[key]
self.ds["var1"][ind.get("x", slice(None)), ind.get("y", slice(None))] = val
@parameterized(["key"], [list(vectorized_indexes.keys())])
def time_assignment_vectorized(self, key):
ind = vectorized_indexes[key]
val = vectorized_assignment_values[key]
self.ds["var1"][ind.get("x", slice(None)), ind.get("y", slice(None))] = val
|
Assignment
|
python
|
catalyst-team__catalyst
|
catalyst/callbacks/metrics/recsys.py
|
{
"start": 8183,
"end": 12141
}
|
class ____(BatchMetricCallback):
"""MRR metric callback.
Computes MRR@topk for the specified values of `topk`.
Args:
input_key: input key to use for metric calculation, specifies our `y_pred`
target_key: output key to use for metric calculation, specifies our `y_true`
prefix: key for the metric's name
topk: specifies which MRR@K to log
log_on_batch: boolean flag to log computed metrics every batch
prefix: metric prefix
suffix: metric suffix
Examples:
.. code-block:: python
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst import dl
# sample data
num_users, num_features, num_items = int(1e4), int(1e1), 10
X = torch.rand(num_users, num_features)
y = (torch.rand(num_users, num_items) > 0.5).to(torch.float32)
# pytorch loaders
dataset = TensorDataset(X, y)
loader = DataLoader(dataset, batch_size=32, num_workers=1)
loaders = {"train": loader, "valid": loader}
# model, criterion, optimizer, scheduler
model = torch.nn.Linear(num_features, num_items)
criterion = torch.nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(model.parameters())
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [2])
# model training
runner = dl.SupervisedRunner(
input_key="features",
output_key="logits",
target_key="targets",
loss_key="loss"
)
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
loaders=loaders,
num_epochs=3,
verbose=True,
callbacks=[
dl.BatchTransformCallback(
transform=torch.sigmoid,
scope="on_batch_end",
input_key="logits",
output_key="scores"
),
dl.CriterionCallback(
input_key="logits", target_key="targets", metric_key="loss"
),
dl.AUCCallback(input_key="scores", target_key="targets"),
dl.HitrateCallback(
input_key="scores", target_key="targets", topk=(1, 3, 5)
),
dl.MRRCallback(input_key="scores", target_key="targets", topk=(1, 3, 5)),
dl.MAPCallback(input_key="scores", target_key="targets", topk=(1, 3, 5)),
dl.NDCGCallback(input_key="scores", target_key="targets", topk=(1, 3)),
dl.OptimizerCallback(metric_key="loss"),
dl.SchedulerCallback(),
dl.CheckpointCallback(
logdir="./logs", loader_key="valid", metric_key="loss", minimize=True
),
]
)
.. note::
Metric names depending on input parameters:
- ``topk = (1,) or None`` ---> ``"mrr01"``
- ``topk = (1, 3)`` ---> ``"mrr01"``, ``"mrr03"``
- ``topk = (1, 3, 5)`` ---> ``"mrr01"``, ``"mrr03"``, ``"mrr05"``
You can find them in ``runner.batch_metrics``, ``runner.loader_metrics`` or
``runner.epoch_metrics``.
.. note::
Please follow the `minimal examples`_ sections for more use cases.
.. _`minimal examples`: https://github.com/catalyst-team/catalyst#minimal-examples # noqa: E501, W505
"""
def __init__(
self,
input_key: str,
target_key: str,
topk: Iterable[int] = None,
log_on_batch: bool = True,
prefix: str = None,
suffix: str = None,
):
"""Init."""
super().__init__(
metric=MRRMetric(topk=topk, prefix=prefix, suffix=suffix),
input_key=input_key,
target_key=target_key,
log_on_batch=log_on_batch,
)
|
MRRCallback
|
python
|
xlwings__xlwings
|
xlwings/constants.py
|
{
"start": 50784,
"end": 50934
}
|
class ____:
xlDataBarBorderNone = 0 # from enum XlDataBarBorderType
xlDataBarBorderSolid = 1 # from enum XlDataBarBorderType
|
DataBarBorderType
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_deprecations.py
|
{
"start": 46185,
"end": 47117
}
|
class ____(fixtures.MappedTest, AssertsCompiledSQL):
@classmethod
def define_tables(cls, metadata):
Table(
"t1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
Table(
"t2",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
Table(
"t1t2_1",
metadata,
Column("t1id", Integer, ForeignKey("t1.id")),
Column("t2id", Integer, ForeignKey("t2.id")),
)
Table(
"t1t2_2",
metadata,
Column("t1id", Integer, ForeignKey("t1.id")),
Column("t2id", Integer, ForeignKey("t2.id")),
)
|
MultiplePathTest
|
python
|
kamyu104__LeetCode-Solutions
|
Python/intersection-of-two-arrays.py
|
{
"start": 41,
"end": 812
}
|
class ____(object):
def intersection(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
if len(nums1) > len(nums2):
return self.intersection(nums2, nums1)
lookup = set()
for i in nums1:
lookup.add(i)
res = []
for i in nums2:
if i in lookup:
res += i,
lookup.discard(i)
return res
def intersection2(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
return list(set(nums1) & set(nums2))
# Time: O(max(m, n) * log(max(m, n)))
# Space: O(1)
# Binary search solution.
|
Solution
|
python
|
ethereum__web3.py
|
web3/utils/subscriptions.py
|
{
"start": 7876,
"end": 8722
}
|
class ____(EthSubscription[Union[HexBytes, TxData]]):
def __init__(
self,
full_transactions: bool = False,
label: str | None = None,
handler: PendingTxSubscriptionHandler | None = None,
handler_context: dict[str, Any] | None = None,
parallelize: bool | None = None,
) -> None:
self.full_transactions = full_transactions
super().__init__(
subscription_params=("newPendingTransactions", full_transactions),
handler=handler,
handler_context=handler_context,
label=label,
parallelize=parallelize,
)
SyncingSubscriptionContext = EthSubscriptionContext["SyncingSubscription", SyncProgress]
SyncingSubscriptionHandler = Callable[
[SyncingSubscriptionContext], Coroutine[Any, Any, None]
]
|
PendingTxSubscription
|
python
|
walkccc__LeetCode
|
solutions/2585. Number of Ways to Earn Points/2585.py
|
{
"start": 0,
"end": 596
}
|
class ____:
def waysToReachTarget(self, target: int, types: list[list[int]]) -> int:
MOD = 1_000_000_007
# dp[i][j] := the number of ways to earn j points with the first i types
dp = [[0] * (target + 1) for _ in range(len(types) + 1)]
dp[0][0] = 1
for i in range(1, len(types) + 1):
count = types[i - 1][0]
mark = types[i - 1][1]
for j in range(target + 1):
for solved in range(count + 1):
if j - solved * mark >= 0:
dp[i][j] += dp[i - 1][j - solved * mark]
dp[i][j] %= MOD
return dp[len(types)][target]
|
Solution
|
python
|
openai__openai-python
|
src/openai/types/evals/runs/output_item_list_response.py
|
{
"start": 404,
"end": 1457
}
|
class ____(BaseModel):
name: str
"""The name of the grader."""
passed: bool
"""Whether the grader considered the output a pass."""
score: float
"""The numeric score produced by the grader."""
sample: Optional[Dict[str, object]] = None
"""Optional sample or intermediate data produced by the grader."""
type: Optional[str] = None
"""The grader type (for example, "string-check-grader")."""
if TYPE_CHECKING:
# Some versions of Pydantic <2.8.0 have a bug and don’t allow assigning a
# value to this field, so for compatibility we avoid doing it at runtime.
__pydantic_extra__: Dict[str, object] = FieldInfo(init=False) # pyright: ignore[reportIncompatibleVariableOverride]
# Stub to indicate that arbitrary properties are accepted.
# To access properties that are not valid identifiers you can use `getattr`, e.g.
# `getattr(obj, '$type')`
def __getattr__(self, attr: str) -> object: ...
else:
__pydantic_extra__: Dict[str, object]
|
Result
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/tasks.py
|
{
"start": 157884,
"end": 160792
}
|
class ____(Request):
"""
Indicates that task is closed
:param force: Allows forcing state change even if transition is not supported
:type force: bool
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "close"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"force": {
"default": False,
"description": "Allows forcing state change even if transition is not supported",
"type": ["boolean", "null"],
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self, task, force=False, status_reason=None, status_message=None, **kwargs
):
super(CloseRequest, self).__init__(**kwargs)
self.force = force
self.task = task
self.status_reason = status_reason
self.status_message = status_message
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self):
return self._property_status_reason
@status_reason.setter
def status_reason(self, value):
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self):
return self._property_status_message
@status_message.setter
def status_message(self, value):
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
|
CloseRequest
|
python
|
kamyu104__LeetCode-Solutions
|
Python/rearrange-string-k-distance-apart.py
|
{
"start": 973,
"end": 1930
}
|
class ____(object):
def rearrangeString(self, s, k):
"""
:type str: str
:type k: int
:rtype: str
"""
if not k:
return s
cnts = collections.Counter(s)
bucket_cnt = (len(s)+k-1)//k
if not (max(cnts.itervalues()) <= bucket_cnt and cnts.values().count(bucket_cnt) <= (len(s)-1)%k+1):
return ""
result = [0]*len(s)
i = 0
for c in itertools.chain((c for c, v in cnts.iteritems() if v == bucket_cnt),
(c for c, v in cnts.iteritems() if v <= bucket_cnt-2),
(c for c, v in cnts.iteritems() if v == bucket_cnt-1)):
for _ in xrange(cnts[c]):
result[i] = c
i += k
if i >= len(result):
i = i%k+1
return "".join(result)
# Time: O(n)
# Space: O(n)
import collections
import itertools
|
Solution2
|
python
|
getsentry__sentry
|
tests/sentry/models/test_organizationaccessrequest.py
|
{
"start": 349,
"end": 3230
}
|
class ____(TestCase):
def test_sends_email_to_everyone(self) -> None:
owner = self.create_user("owner@example.com")
team_admin = self.create_user("team-admin@example.com")
non_team_admin = self.create_user("non-team-admin@example.com")
random_member = self.create_user("member@example.com")
requesting_user = self.create_user("requesting@example.com")
org = self.create_organization(owner=owner)
team = self.create_team(organization=org)
OrganizationMemberTeam.objects.create(
organizationmember=OrganizationMember.objects.get(organization=org, user_id=owner.id),
team=team,
)
self.create_member(organization=org, user=team_admin, role="admin", teams=[team])
self.create_member(organization=org, user=non_team_admin, role="admin", teams=[])
self.create_member(organization=org, user=random_member, role="member", teams=[team])
requesting_member = self.create_member(
organization=org, user=requesting_user, role="member", teams=[]
)
request = OrganizationAccessRequest.objects.create(member=requesting_member, team=team)
with self.tasks():
request.send_request_email()
assert len(mail.outbox) == 2, [m.subject for m in mail.outbox]
assert sorted(m.to[0] for m in mail.outbox) == sorted([owner.email, team_admin.email])
@with_feature("system:multi-region")
def test_sends_no_email_to_invited_member(self) -> None:
owner = self.create_user("owner@example.com")
org = self.create_organization(owner=owner)
team = self.create_team(organization=org)
self.create_team_membership(team=team, user=owner)
requesting_member = self.create_member(
organization=org, role="member", email="joe@example.com"
)
request = OrganizationAccessRequest.objects.create(member=requesting_member, team=team)
with self.tasks():
request.send_request_email()
assert len(mail.outbox) == 0
@with_feature("system:multi-region")
def test_sends_email_with_link(self) -> None:
owner = self.create_user("owner@example.com")
requesting_user = self.create_user("requesting@example.com")
org = self.create_organization(owner=owner)
team = self.create_team(organization=org)
self.create_team_membership(team=team, user=owner)
requesting_member = self.create_member(
organization=org, user=requesting_user, role="member", teams=[]
)
request = OrganizationAccessRequest.objects.create(member=requesting_member, team=team)
with self.tasks():
request.send_request_email()
assert len(mail.outbox) == 1
assert org.absolute_url("/settings/teams/") in mail.outbox[0].body
|
SendRequestEmailTest
|
python
|
google__jax
|
jax/_src/interpreters/ad.py
|
{
"start": 21211,
"end": 25131
}
|
class ____:
__slots__ = ['aval']
def __init__(self, aval):
self.aval = aval
def __repr__(self):
return f'UndefinedPrimal({self.aval})'
def is_undefined_primal(x):
return type(x) is UndefinedPrimal
register_pytree_node(UndefinedPrimal,
lambda z: ((), z.aval),
lambda aval, _: UndefinedPrimal(aval))
def get_primitive_transpose(p):
try:
return primitive_transposes[p]
except KeyError as err:
raise NotImplementedError(
"Transpose rule (for reverse-mode differentiation) for '{}' "
"not implemented".format(p)) from err
def backward_pass3(
jaxpr: core.Jaxpr, transform_stack: bool,
consts: Sequence[Array], primals_in: Sequence[Array | Ref | GradAccum],
cotangents_in: Sequence[Array]) -> None:
if all(type(ct) is Zero for ct in cotangents_in) and not jaxpr.effects:
return
env: dict = dict(zip((*jaxpr.constvars, *jaxpr.invars),
(*consts, *primals_in)))
def read(x: core.Atom) -> Array | GradAccum:
return x.val if isinstance(x, Literal) else env[x]
lin_eqns = []
for eqn in jaxpr.eqns:
if eqn.primitive.ref_primitive:
v, = eqn.outvars
lin_eqns.append(eqn)
if eqn.primitive is core.ref_p:
env[v] = RefAccum(v.aval.inner_aval) # type: ignore
elif eqn.primitive is core.freeze_p:
env[v] = ValAccum(v.aval)
elif eqn.primitive is core.accum_grad_in_ref_p:
env[v] = RefAccum(v.aval)
else:
assert False
elif any(isinstance(read(x), GradAccum) for x in eqn.invars):
for v in eqn.outvars:
env[v] = ValAccum(v.aval)
lin_eqns.append(eqn)
else:
subfuns, params = eqn.primitive.get_bind_params(eqn.params)
with eqn.ctx.manager, _name_stack_ctx(eqn.source_info):
ans = eqn.primitive.bind(*subfuns, *map(read, eqn.invars), **params)
ans = ans if eqn.primitive.multiple_results else [ans]
foreach(env.setdefault, eqn.outvars, ans)
ctx = (source_info_util.transform_name_stack('transpose') if transform_stack # type: ignore
else contextlib.nullcontext())
for acc, ct in zip(map(read, jaxpr.outvars), cotangents_in):
if isinstance(acc, GradAccum):
acc.accum(ct) # jaxpr.outvars can have Literals, env can have inst zeros
with ctx:
for eqn in lin_eqns[::-1]:
with eqn.ctx.manager, _name_stack_ctx(eqn.source_info):
if eqn.primitive.ref_primitive:
ct = env.pop(eqn.outvars[0]).freeze()
acc = read(eqn.invars[0])
if isinstance(acc, GradAccum):
acc.accum(ct)
else:
cts_in = [env.pop(v).freeze() for v in eqn.outvars]
if not eqn.primitive.multiple_results:
cts_in, = cts_in
if eqn.primitive in fancy_transposes:
rule = fancy_transposes[eqn.primitive]
rule(cts_in, *map(read, eqn.invars), **eqn.params)
else:
rule = get_primitive_transpose(eqn.primitive)
primals = map(read, eqn.invars)
up = lambda x: UndefinedPrimal(x.aval) if isinstance(x, GradAccum) else x
if eqn.primitive.call_primitive or eqn.primitive.map_primitive:
# TODO(mattjj,dougalm): remove this path by revising call/map trans
cts_in_avals = [v.aval for v in eqn.outvars]
params = dict(eqn.params)
call_jaxpr = params.pop('call_jaxpr')
cts_out = rule(params, call_jaxpr, map(up, primals), cts_in, cts_in_avals)
else:
cts_out = rule(cts_in, *map(up, primals), **eqn.params)
for x, ct in zip(primals, cts_out):
if isinstance(x, GradAccum):
x.accum(ct)
def _name_stack_ctx(src_info):
stack = source_info_util.current_name_stack() + src_info.name_stack
return source_info_util.user_context(src_info.traceback, name_stack=stack)
|
UndefinedPrimal
|
python
|
getsentry__sentry
|
src/sentry/integrations/pagerduty/utils.py
|
{
"start": 1166,
"end": 8320
}
|
class ____(TypedDict):
integration_id: int
integration_key: str
service_name: str
id: int
@control_silo_function
def add_service(
organization_integration: OrganizationIntegration, integration_key: str, service_name: str
) -> PagerDutyServiceDict:
with transaction.atomic(router.db_for_write(OrganizationIntegration)):
OrganizationIntegration.objects.filter(id=organization_integration.id).select_for_update()
with transaction.get_connection(
router.db_for_write(OrganizationIntegration)
).cursor() as cursor:
cursor.execute(
"SELECT nextval(%s)", [f"{OrganizationIntegration._meta.db_table}_id_seq"]
)
next_id: int = cursor.fetchone()[0]
service: PagerDutyServiceDict = {
"id": next_id,
"integration_key": integration_key,
"service_name": service_name,
"integration_id": organization_integration.integration_id,
}
existing = organization_integration.config.get("pagerduty_services", [])
new_services: list[PagerDutyServiceDict] = existing + [service]
organization_integration.config["pagerduty_services"] = new_services
organization_integration.save()
return service
def get_services(
org_integration: OrganizationIntegration | RpcOrganizationIntegration | None,
) -> list[PagerDutyServiceDict]:
if not org_integration:
return []
return org_integration.config.get("pagerduty_services", [])
def get_service(
org_integration: OrganizationIntegration | RpcOrganizationIntegration | None,
service_id: int | str,
) -> PagerDutyServiceDict | None:
services = get_services(org_integration)
if not services:
return None
service: PagerDutyServiceDict | None = None
for candidate in services:
if str(candidate["id"]) == str(service_id):
service = candidate
break
return service
def build_incident_attachment(
alert_context: AlertContext,
metric_issue_context: MetricIssueContext,
organization: Organization,
integration_key: str,
notification_uuid: str | None = None,
) -> dict[str, Any]:
data = incident_attachment_info(
organization=organization,
alert_context=alert_context,
metric_issue_context=metric_issue_context,
notification_uuid=notification_uuid,
referrer="metric_alert_pagerduty",
)
severity = "info"
if metric_issue_context.new_status == IncidentStatus.CRITICAL:
severity = "critical"
elif metric_issue_context.new_status == IncidentStatus.WARNING:
severity = "warning"
elif metric_issue_context.new_status == IncidentStatus.CLOSED:
severity = "info"
event_action = "resolve"
if metric_issue_context.new_status in [IncidentStatus.WARNING, IncidentStatus.CRITICAL]:
event_action = "trigger"
return {
"routing_key": integration_key,
"event_action": event_action,
"dedup_key": f"incident_{organization.id}_{metric_issue_context.open_period_identifier}",
"payload": {
"summary": alert_context.name,
"severity": severity,
"source": str(metric_issue_context.open_period_identifier),
"custom_details": {"details": data["text"]},
},
"links": [{"href": data["title_link"], "text": data["title"]}],
}
def attach_custom_severity(
data: dict[str, Any],
sentry_app_config: list[dict[str, Any]] | dict[str, Any] | None,
new_status: IncidentStatus,
) -> dict[str, Any]:
# use custom severity (overrides default in build_incident_attachment)
if new_status == IncidentStatus.CLOSED or sentry_app_config is None:
return data
if isinstance(sentry_app_config, list):
raise ValueError("Sentry app config must be a single dict")
severity = sentry_app_config.get("priority", None)
if severity is not None and severity != PAGERDUTY_DEFAULT_SEVERITY:
data["payload"]["severity"] = severity
return data
def send_incident_alert_notification(
notification_context: NotificationContext,
alert_context: AlertContext,
metric_issue_context: MetricIssueContext,
organization: Organization,
notification_uuid: str | None = None,
) -> bool:
from sentry.integrations.pagerduty.integration import PagerDutyIntegration
integration_id = notification_context.integration_id
organization_id = organization.id
result = integration_service.organization_context(
organization_id=organization_id,
integration_id=integration_id,
)
integration = result.integration
org_integration = result.organization_integration
if integration is None:
logger.info(
"pagerduty.integration.missing",
extra={
"integration_id": integration_id,
"organization_id": organization_id,
},
)
return False
org_integration_id: int | None = None
if org_integration:
org_integration_id = org_integration.id
else:
org_integrations = None
if integration_id is not None:
org_integration_id = infer_org_integration(
integration_id=integration_id, ctx_logger=logger
)
if org_integration_id:
org_integrations = integration_service.get_organization_integrations(
org_integration_ids=[org_integration_id]
)
if org_integrations:
org_integration = org_integrations[0]
install = integration.get_installation(organization_id=organization_id)
assert isinstance(install, PagerDutyIntegration)
try:
client = install.get_keyring_client(str(notification_context.target_identifier))
except ValueError:
# service has been removed after rule creation
logger.info(
"fetch.fail.pagerduty_metric_alert",
extra={
"integration_id": integration_id,
"organization_id": organization_id,
"target_identifier": notification_context.target_identifier,
},
)
metrics.incr(
"pagerduty.metric_alert_rule.integration_removed_after_rule_creation", sample_rate=1.0
)
return False
attachment = build_incident_attachment(
alert_context=alert_context,
metric_issue_context=metric_issue_context,
organization=organization,
integration_key=client.integration_key,
notification_uuid=notification_uuid,
)
attachment = attach_custom_severity(
attachment, notification_context.sentry_app_config, metric_issue_context.new_status
)
try:
client.send_trigger(attachment)
return True
except ApiError as e:
logger.info(
"rule.fail.pagerduty_metric_alert",
extra={
"error": str(e),
"service_id": notification_context.target_identifier,
"integration_id": integration_id,
},
)
raise
|
PagerDutyServiceDict
|
python
|
getsentry__sentry
|
tests/sentry/workflow_engine/endpoints/test_organization_data_condition_index.py
|
{
"start": 362,
"end": 3163
}
|
class ____(APITestCase):
endpoint = "sentry-api-0-organization-data-condition-index"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.registry = Registry[type[DataConditionHandler[dict[str, Any]]]](
enable_reverse_lookup=False
)
self.registry_patcher = patch(
"sentry.workflow_engine.endpoints.organization_data_condition_index.condition_handler_registry",
new=self.registry,
)
self.registry_patcher.start()
@self.registry.register(Condition.REAPPEARED_EVENT)
@dataclass(frozen=True)
class TestWorkflowTrigger(DataConditionHandler[dict[str, str]]):
group = DataConditionHandler.Group.WORKFLOW_TRIGGER
comparison_json_schema = {"type": "boolean"}
@self.registry.register(Condition.AGE_COMPARISON)
@dataclass(frozen=True)
class TestActionFilter(DataConditionHandler[dict[str, Any]]):
group = DataConditionHandler.Group.ACTION_FILTER
subgroup = DataConditionHandler.Subgroup.ISSUE_ATTRIBUTES
comparison_json_schema = {
"type": "object",
"properties": {
"value": {"type": "integer", "minimum": 0},
},
"required": ["value"],
"additionalProperties": False,
}
@self.registry.register(Condition.ANOMALY_DETECTION)
@dataclass(frozen=True)
class TestDetectorTrigger(DataConditionHandler[dict[str, str]]):
group = DataConditionHandler.Group.DETECTOR_TRIGGER
comparison_json_schema = {"type": "boolean"}
# This legacy condition should not be included in the response
@self.registry.register(Condition.EXISTING_HIGH_PRIORITY_ISSUE)
@dataclass(frozen=True)
class TestIgnoredCondition(DataConditionHandler[dict[str, str]]):
group = DataConditionHandler.Group.WORKFLOW_TRIGGER
comparison_json_schema = {"type": "boolean"}
@self.registry.register(Condition.ISSUE_CATEGORY)
@dataclass(frozen=True)
class TestIssueCategoryCondition(DataConditionHandler[dict[str, Any]]):
group = DataConditionHandler.Group.ACTION_FILTER
subgroup = DataConditionHandler.Subgroup.ISSUE_ATTRIBUTES
comparison_json_schema = {
"type": "object",
"properties": {
"value": {"type": "integer", "minimum": 0},
},
"required": ["value"],
"additionalProperties": False,
}
def tearDown(self) -> None:
super().tearDown()
self.registry_patcher.stop()
@region_silo_test
|
OrganizationDataConditionAPITestCase
|
python
|
pytorch__pytorch
|
torch/utils/_cxx_pytree.py
|
{
"start": 34574,
"end": 39486
}
|
class ____(TreeSpec, metaclass=LeafSpecMeta): # type: ignore[misc,final]
def __new__(cls) -> Self:
return treespec_leaf() # type: ignore[return-value]
def tree_flatten_with_path(
tree: PyTree,
is_leaf: Callable[[PyTree], bool] | None = None,
) -> tuple[list[tuple[KeyPath, Any]], TreeSpec]:
"""Flattens a pytree like :func:`tree_flatten`, but also returns each leaf's key path.
Args:
tree: a pytree to flatten. If it contains a custom type, that type must be
registered with an appropriate `tree_flatten_with_path_fn` when registered
with :func:`register_pytree_node`.
is_leaf: An extra leaf predicate function that will be called at each
flattening step. The function should have a single argument with signature
``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
leaf or not. If the function is not specified, the default pytree registry will be used.
Returns:
A tuple where the first element is a list of (key path, leaf) pairs, and the
second element is a :class:`TreeSpec` representing the structure of the flattened
tree.
"""
raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.")
def tree_leaves_with_path(
tree: PyTree,
is_leaf: Callable[[PyTree], bool] | None = None,
) -> list[tuple[KeyPath, Any]]:
"""Gets the leaves of a pytree like ``tree_leaves`` and returns each leaf's key path.
Args:
tree: a pytree. If it contains a custom type, that type must be
registered with an appropriate `tree_flatten_with_path_fn` when registered
with :func:`register_pytree_node`.
is_leaf: An extra leaf predicate function that will be called at each
flattening step. The function should have a single argument with signature
``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
leaf or not. If the function is not specified, the default pytree registry will be used.
Returns:
A list of (key path, leaf) pairs.
"""
raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.")
def tree_map_with_path(
func: Callable[..., Any],
tree: PyTree,
*rests: PyTree,
is_leaf: Callable[[PyTree], bool] | None = None,
) -> PyTree:
"""Like :func:`tree_map`, but the provided callable takes an additional key path argument.
Args:
func: A function that takes ``2 + len(rests)`` arguments, to be applied at the
corresponding leaves of the pytrees. The first positional argument
to ``func`` is the key path of the leaf in question. The second
positional argument is the value of the leaf.
tree: A pytree to be mapped over, with each leaf providing the first positional
argument to function ``func``.
rests: A tuple of pytrees, each of which has the same structure as
``tree`` or has ``tree`` as a prefix.
is_leaf: An extra leaf predicate function that will be called at each
flattening step. The function should have a single argument with signature
``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
leaf or not. If the function is not specified, the default pytree registry will be used.
Returns
A new pytree with the same structure as ``tree`` but with the value at each leaf given by
``func(keypath, x, *xs)`` where ``keypath`` is the key path at the
corresponding leaf in ``tree``, ``x`` is the value at that leaf, and
``xs`` is the tuple of values at corresponding nodes in ``rests``.
"""
raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.")
def keystr(kp: KeyPath) -> str:
"""Given a key path, return a pretty-printed representation."""
raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.")
def key_get(obj: Any, kp: KeyPath) -> Any:
"""Given an object and a key path, return the value at the key path."""
raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.")
with python_pytree._NODE_REGISTRY_LOCK:
# pyrefly: ignore [bad-assignment]
python_pytree._cxx_pytree_imported = True
args, kwargs = (), {} # type: ignore[var-annotated]
for args, kwargs in python_pytree._cxx_pytree_pending_imports:
_private_register_pytree_node(*args, **kwargs)
python_pytree._cxx_pytree_pending_imports.clear()
del args, kwargs
|
LeafSpec
|
python
|
pytorch__pytorch
|
test/torch_np/test_random.py
|
{
"start": 3191,
"end": 3619
}
|
class ____(TestCase):
@parametrize("use_numpy", [True, False])
def test_choice(self, use_numpy):
kwds = dict(size=3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
with control_stream(use_numpy):
tnp.random.seed(12345)
x = tnp.random.choice(5, **kwds)
tnp.random.seed(12345)
x_1 = tnp.random.choice(tnp.arange(5), **kwds)
assert_equal(x, x_1)
|
TestChoice
|
python
|
jazzband__django-oauth-toolkit
|
tests/test_commands.py
|
{
"start": 428,
"end": 4944
}
|
class ____(TestCase):
def test_command_creates_application(self):
output = StringIO()
self.assertEqual(Application.objects.count(), 0)
call_command(
"createapplication",
"confidential",
"authorization-code",
"--redirect-uris=http://example.com http://example2.com",
stdout=output,
)
self.assertEqual(Application.objects.count(), 1)
self.assertIn("created successfully", output.getvalue())
def test_missing_required_args(self):
self.assertEqual(Application.objects.count(), 0)
with self.assertRaises(CommandError) as ctx:
call_command(
"createapplication",
"--redirect-uris=http://example.com http://example2.com",
)
self.assertIn("client_type", ctx.exception.args[0])
self.assertIn("authorization_grant_type", ctx.exception.args[0])
self.assertEqual(Application.objects.count(), 0)
def test_command_creates_application_with_skipped_auth(self):
self.assertEqual(Application.objects.count(), 0)
call_command(
"createapplication",
"confidential",
"authorization-code",
"--redirect-uris=http://example.com http://example2.com",
"--skip-authorization",
)
app = Application.objects.get()
self.assertTrue(app.skip_authorization)
def test_application_created_normally_with_no_skipped_auth(self):
call_command(
"createapplication",
"confidential",
"authorization-code",
"--redirect-uris=http://example.com http://example2.com",
)
app = Application.objects.get()
self.assertFalse(app.skip_authorization)
def test_application_created_with_name(self):
call_command(
"createapplication",
"confidential",
"authorization-code",
"--redirect-uris=http://example.com http://example2.com",
"--name=TEST",
)
app = Application.objects.get()
self.assertEqual(app.name, "TEST")
def test_application_created_with_client_secret(self):
call_command(
"createapplication",
"confidential",
"authorization-code",
"--redirect-uris=http://example.com http://example2.com",
"--client-secret=SECRET",
)
app = Application.objects.get()
self.assertTrue(check_password("SECRET", app.client_secret))
def test_application_created_with_client_id(self):
call_command(
"createapplication",
"confidential",
"authorization-code",
"--redirect-uris=http://example.com http://example2.com",
"--client-id=someId",
)
app = Application.objects.get()
self.assertEqual(app.client_id, "someId")
def test_application_created_with_user(self):
User = get_user_model()
user = User.objects.create()
call_command(
"createapplication",
"confidential",
"authorization-code",
"--redirect-uris=http://example.com http://example2.com",
"--user=%s" % user.pk,
)
app = Application.objects.get()
self.assertEqual(app.user, user)
@pytest.mark.usefixtures("oauth2_settings")
@pytest.mark.oauth2_settings(presets.OIDC_SETTINGS_RW)
def test_application_created_with_algorithm(self):
call_command(
"createapplication",
"confidential",
"authorization-code",
"--redirect-uris=http://example.com http://example2.com",
"--algorithm=RS256",
)
app = Application.objects.get()
self.assertEqual(app.algorithm, "RS256")
def test_validation_failed_message(self):
import django
output = StringIO()
call_command(
"createapplication",
"confidential",
"authorization-code",
"--redirect-uris=http://example.com http://example2.com",
"--user=783",
stdout=output,
)
output_str = output.getvalue()
self.assertIn("user", output_str)
self.assertIn("783", output_str)
if django.VERSION < (5, 2):
self.assertIn("does not exist", output_str)
else:
self.assertIn("is not a valid choice", output_str)
|
CreateApplicationTest
|
python
|
crytic__slither
|
slither/detectors/operations/missing_events_access_control.py
|
{
"start": 801,
"end": 4493
}
|
class ____(AbstractDetector):
"""
Missing events for critical contract parameters set by owners and used in access control
"""
ARGUMENT = "events-access"
HELP = "Missing Events Access Control"
IMPACT = DetectorClassification.LOW
CONFIDENCE = DetectorClassification.MEDIUM
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#missing-events-access-control"
WIKI_TITLE = "Missing events access control"
WIKI_DESCRIPTION = "Detect missing events for critical access control parameters"
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
contract C {
modifier onlyAdmin {
if (msg.sender != owner) throw;
_;
}
function updateOwner(address newOwner) onlyAdmin external {
owner = newOwner;
}
}
```
`updateOwner()` has no event, so it is difficult to track off-chain owner changes.
"""
# endregion wiki_exploit_scenario
WIKI_RECOMMENDATION = "Emit an event for critical parameter changes."
@staticmethod
def _detect_missing_events(
contract: Contract,
) -> List[Tuple[FunctionContract, List[Tuple[Node, StateVariable, Modifier]]]]:
"""
Detects if critical contract parameters set by owners and used in access control are missing events
:param contract: The contract to check
:return: Functions with nodes of critical operations but no events
"""
results = []
# pylint: disable=too-many-nested-blocks
for function in contract.functions_entry_points:
nodes = []
# Check for any events in the function and skip if found
# Note: not checking if event corresponds to critical parameter
if any(ir for node in function.nodes for ir in node.irs if isinstance(ir, EventCall)):
continue
# Ignore constructors and private/internal functions
# Heuristic-1: functions with critical operations are typically "protected". Skip unprotected functions.
if function.is_constructor or not function.is_protected():
continue
# Heuristic-2: Critical operations are where state variables are written and tainted
# Heuristic-3: Variables of interest are address type that are used in modifiers i.e. access control
# Heuristic-4: Critical operations present but no events in the function is not a good practice
for node in function.nodes:
for sv in node.state_variables_written:
if is_tainted(sv, function) and sv.type == ElementaryType("address"):
for mod in function.contract.modifiers:
if sv in mod.state_variables_read:
nodes.append((node, sv, mod))
if nodes:
results.append((function, nodes))
return results
def _detect(self) -> List[Output]:
"""Detect missing events for critical contract parameters set by owners and used in access control
Returns:
list: {'(function, node)'}
"""
# Check derived contracts for missing events
results = []
for contract in self.compilation_unit.contracts_derived:
missing_events = self._detect_missing_events(contract)
for (function, nodes) in missing_events:
info: DETECTOR_INFO = [function, " should emit an event for: \n"]
for (node, _sv, _mod) in nodes:
info += ["\t- ", node, " \n"]
res = self.generate_result(info)
results.append(res)
return results
|
MissingEventsAccessControl
|
python
|
pandas-dev__pandas
|
pandas/tests/indexes/ranges/test_constructors.py
|
{
"start": 158,
"end": 5328
}
|
class ____:
@pytest.mark.parametrize("name", [None, "foo"])
@pytest.mark.parametrize(
"args, kwargs, start, stop, step",
[
((5,), {}, 0, 5, 1),
((1, 5), {}, 1, 5, 1),
((1, 5, 2), {}, 1, 5, 2),
((0,), {}, 0, 0, 1),
((0, 0), {}, 0, 0, 1),
((), {"start": 0}, 0, 0, 1),
((), {"stop": 0}, 0, 0, 1),
],
)
def test_constructor(self, args, kwargs, start, stop, step, name):
result = RangeIndex(*args, name=name, **kwargs)
expected = Index(np.arange(start, stop, step, dtype=np.int64), name=name)
assert isinstance(result, RangeIndex)
assert result.name is name
assert result._range == range(start, stop, step)
tm.assert_index_equal(result, expected, exact="equiv")
def test_constructor_invalid_args(self):
msg = "RangeIndex\\(\\.\\.\\.\\) must be called with integers"
with pytest.raises(TypeError, match=msg):
RangeIndex()
with pytest.raises(TypeError, match=msg):
RangeIndex(name="Foo")
# we don't allow on a bare Index
msg = (
r"Index\(\.\.\.\) must be called with a collection of some "
r"kind, 0 was passed"
)
with pytest.raises(TypeError, match=msg):
Index(0)
@pytest.mark.parametrize(
"args",
[
Index(["a", "b"]),
Series(["a", "b"]),
np.array(["a", "b"]),
[],
np.arange(0, 10),
np.array([1]),
[1],
],
)
def test_constructor_additional_invalid_args(self, args):
msg = f"Value needs to be a scalar value, was type {type(args).__name__}"
with pytest.raises(TypeError, match=msg):
RangeIndex(args)
@pytest.mark.parametrize("args", ["foo", datetime(2000, 1, 1, 0, 0)])
def test_constructor_invalid_args_wrong_type(self, args):
msg = f"Wrong type {type(args)} for value {args}"
with pytest.raises(TypeError, match=msg):
RangeIndex(args)
def test_constructor_same(self):
# pass thru w and w/o copy
index = RangeIndex(1, 5, 2)
result = RangeIndex(index, copy=False)
assert result.identical(index)
result = RangeIndex(index, copy=True)
tm.assert_index_equal(result, index, exact=True)
result = RangeIndex(index)
tm.assert_index_equal(result, index, exact=True)
with pytest.raises(
ValueError,
match="Incorrect `dtype` passed: expected signed integer, received float64",
):
RangeIndex(index, dtype="float64")
def test_constructor_range_object(self):
result = RangeIndex(range(1, 5, 2))
expected = RangeIndex(1, 5, 2)
tm.assert_index_equal(result, expected, exact=True)
def test_constructor_range(self):
result = RangeIndex.from_range(range(1, 5, 2))
expected = RangeIndex(1, 5, 2)
tm.assert_index_equal(result, expected, exact=True)
result = RangeIndex.from_range(range(5, 6))
expected = RangeIndex(5, 6, 1)
tm.assert_index_equal(result, expected, exact=True)
# an invalid range
result = RangeIndex.from_range(range(5, 1))
expected = RangeIndex(0, 0, 1)
tm.assert_index_equal(result, expected, exact=True)
result = RangeIndex.from_range(range(5))
expected = RangeIndex(0, 5, 1)
tm.assert_index_equal(result, expected, exact=True)
result = Index(range(1, 5, 2))
expected = RangeIndex(1, 5, 2)
tm.assert_index_equal(result, expected, exact=True)
msg = (
r"(RangeIndex.)?from_range\(\) got an unexpected keyword argument( 'copy')?"
)
with pytest.raises(TypeError, match=msg):
RangeIndex.from_range(range(10), copy=True)
def test_constructor_name(self):
# GH#12288
orig = RangeIndex(10)
orig.name = "original"
copy = RangeIndex(orig)
copy.name = "copy"
assert orig.name == "original"
assert copy.name == "copy"
new = Index(copy)
assert new.name == "copy"
new.name = "new"
assert orig.name == "original"
assert copy.name == "copy"
assert new.name == "new"
def test_constructor_corner(self):
arr = np.array([1, 2, 3, 4], dtype=object)
index = RangeIndex(1, 5)
assert index.values.dtype == np.int64
expected = Index(arr).astype("int64")
tm.assert_index_equal(index, expected, exact="equiv")
# non-int raise Exception
with pytest.raises(TypeError, match=r"Wrong type \<class 'str'\>"):
RangeIndex("1", "10", "1")
with pytest.raises(TypeError, match=r"Wrong type \<class 'float'\>"):
RangeIndex(1.1, 10.2, 1.3)
# invalid passed type
with pytest.raises(
ValueError,
match="Incorrect `dtype` passed: expected signed integer, received float64",
):
RangeIndex(1, 5, dtype="float64")
|
TestRangeIndexConstructors
|
python
|
huggingface__transformers
|
src/transformers/models/dpt/image_processing_dpt.py
|
{
"start": 4183,
"end": 32454
}
|
class ____(BaseImageProcessor):
r"""
Constructs a DPT image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions. Can be overridden by `do_resize` in `preprocess`.
size (`dict[str, int]` *optional*, defaults to `{"height": 384, "width": 384}`):
Size of the image after resizing. Can be overridden by `size` in `preprocess`.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Defines the resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
keep_aspect_ratio (`bool`, *optional*, defaults to `False`):
If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved. Can
be overridden by `keep_aspect_ratio` in `preprocess`.
ensure_multiple_of (`int`, *optional*, defaults to 1):
If `do_resize` is `True`, the image is resized to a size that is a multiple of this value. Can be overridden
by `ensure_multiple_of` in `preprocess`.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
`preprocess`.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in `preprocess`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
do_pad (`bool`, *optional*, defaults to `False`):
Whether to apply center padding. This was introduced in the DINOv2 paper, which uses the model in
combination with DPT.
size_divisor (`int`, *optional*):
If `do_pad` is `True`, pads the image dimensions to be divisible by this value. This was introduced in the
DINOv2 paper, which uses the model in combination with DPT.
do_reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is
used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The
background label will be replaced by 255. Can be overridden by the `do_reduce_labels` parameter in the
`preprocess` method.
"""
model_input_names = ["pixel_values"]
valid_kwargs = DPTImageProcessorKwargs
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
keep_aspect_ratio: bool = False,
ensure_multiple_of: int = 1,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_pad: bool = False,
size_divisor: Optional[int] = None,
do_reduce_labels: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"height": 384, "width": 384}
size = get_size_dict(size)
self.do_resize = do_resize
self.size = size
self.keep_aspect_ratio = keep_aspect_ratio
self.ensure_multiple_of = ensure_multiple_of
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
self.do_pad = do_pad
self.size_divisor = size_divisor
self.do_reduce_labels = do_reduce_labels
def resize(
self,
image: np.ndarray,
size: dict[str, int],
keep_aspect_ratio: bool = False,
ensure_multiple_of: int = 1,
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image to target size `(size["height"], size["width"])`. If `keep_aspect_ratio` is `True`, the image
is resized to the largest possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is
set, the image is resized to a size that is a multiple of this value.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Target size of the output image.
keep_aspect_ratio (`bool`, *optional*, defaults to `False`):
If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved.
ensure_multiple_of (`int`, *optional*, defaults to 1):
The image is resized to a size that is a multiple of this value.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Defines the resampling filter to use if resizing the image. Otherwise, the image is resized to size
specified in `size`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
size = get_size_dict(size)
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}")
output_size = get_resize_output_image_size(
image,
output_size=(size["height"], size["width"]),
keep_aspect_ratio=keep_aspect_ratio,
multiple=ensure_multiple_of,
input_data_format=input_data_format,
)
return resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
def pad_image(
self,
image: np.ndarray,
size_divisor: int,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Center pad an image to be a multiple of `multiple`.
Args:
image (`np.ndarray`):
Image to pad.
size_divisor (`int`):
The width and height of the image will be padded to a multiple of this number.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
def _get_pad(size, size_divisor):
new_size = math.ceil(size / size_divisor) * size_divisor
pad_size = new_size - size
pad_size_left = pad_size // 2
pad_size_right = pad_size - pad_size_left
return pad_size_left, pad_size_right
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
height, width = get_image_size(image, input_data_format)
pad_size_left, pad_size_right = _get_pad(height, size_divisor)
pad_size_top, pad_size_bottom = _get_pad(width, size_divisor)
return pad(image, ((pad_size_left, pad_size_right), (pad_size_top, pad_size_bottom)), data_format=data_format)
# Copied from transformers.models.beit.image_processing_beit.BeitImageProcessor.reduce_label
def reduce_label(self, label: ImageInput) -> np.ndarray:
label = to_numpy_array(label)
# Avoid using underflow conversion
label[label == 0] = 255
label = label - 1
label[label == 254] = 255
return label
def _preprocess(
self,
image: ImageInput,
do_reduce_labels: Optional[bool] = None,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
keep_aspect_ratio: Optional[bool] = None,
ensure_multiple_of: Optional[int] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_pad: Optional[bool] = None,
size_divisor: Optional[int] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
if do_reduce_labels:
image = self.reduce_label(image)
if do_resize:
image = self.resize(
image=image,
size=size,
resample=resample,
keep_aspect_ratio=keep_aspect_ratio,
ensure_multiple_of=ensure_multiple_of,
input_data_format=input_data_format,
)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
if do_pad:
image = self.pad_image(image=image, size_divisor=size_divisor, input_data_format=input_data_format)
return image
def _preprocess_image(
self,
image: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
keep_aspect_ratio: Optional[bool] = None,
ensure_multiple_of: Optional[int] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_pad: Optional[bool] = None,
size_divisor: Optional[int] = None,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""Preprocesses a single image."""
# All transformations expect numpy arrays.
image = to_numpy_array(image)
if do_rescale and is_scaled_image(image):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(image)
image = self._preprocess(
image,
do_reduce_labels=False,
do_resize=do_resize,
size=size,
resample=resample,
keep_aspect_ratio=keep_aspect_ratio,
ensure_multiple_of=ensure_multiple_of,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_pad=do_pad,
size_divisor=size_divisor,
input_data_format=input_data_format,
)
if data_format is not None:
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
return image
def _preprocess_segmentation_map(
self,
segmentation_map: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
keep_aspect_ratio: Optional[bool] = None,
ensure_multiple_of: Optional[int] = None,
do_reduce_labels: Optional[bool] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""Preprocesses a single segmentation map."""
# All transformations expect numpy arrays.
segmentation_map = to_numpy_array(segmentation_map)
# Add an axis to the segmentation maps for transformations.
if segmentation_map.ndim == 2:
segmentation_map = segmentation_map[None, ...]
added_dimension = True
input_data_format = ChannelDimension.FIRST
else:
added_dimension = False
if input_data_format is None:
input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1)
segmentation_map = self._preprocess(
image=segmentation_map,
do_reduce_labels=do_reduce_labels,
do_resize=do_resize,
size=size,
resample=resample,
keep_aspect_ratio=keep_aspect_ratio,
ensure_multiple_of=ensure_multiple_of,
do_normalize=False,
do_rescale=False,
input_data_format=input_data_format,
)
# Remove extra axis if added
if added_dimension:
segmentation_map = np.squeeze(segmentation_map, axis=0)
segmentation_map = segmentation_map.astype(np.int64)
return segmentation_map
# Copied from transformers.models.beit.image_processing_beit.BeitImageProcessor.__call__
def __call__(self, images, segmentation_maps=None, **kwargs):
# Overrides the `__call__` method of the `Preprocessor` class such that the images and segmentation maps can both
# be passed in as positional arguments.
return super().__call__(images, segmentation_maps=segmentation_maps, **kwargs)
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput] = None,
do_resize: Optional[bool] = None,
size: Optional[int] = None,
keep_aspect_ratio: Optional[bool] = None,
ensure_multiple_of: Optional[int] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_pad: Optional[bool] = None,
size_divisor: Optional[int] = None,
do_reduce_labels: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
segmentation_maps (`ImageInput`, *optional*):
Segmentation map to preprocess.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after reszing. If `keep_aspect_ratio` is `True`, the image is resized to the largest
possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is set, the image is
resized to a size that is a multiple of this value.
keep_aspect_ratio (`bool`, *optional*, defaults to `self.keep_aspect_ratio`):
Whether to keep the aspect ratio of the image. If False, the image will be resized to (size, size). If
True, the image will be resized to keep the aspect ratio and the size will be the maximum possible.
ensure_multiple_of (`int`, *optional*, defaults to `self.ensure_multiple_of`):
Ensure that the image size is a multiple of this value.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
is used for background, and background itself is not included in all classes of a dataset (e.g.
ADE20k). The background label will be replaced by 255.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size)
keep_aspect_ratio = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
ensure_multiple_of = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_pad = do_pad if do_pad is not None else self.do_pad
size_divisor = size_divisor if size_divisor is not None else self.size_divisor
do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels
images = make_flat_list_of_images(images)
if segmentation_maps is not None:
segmentation_maps = make_flat_list_of_images(segmentation_maps, expected_ndims=2)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
images = [
self._preprocess_image(
image=img,
do_resize=do_resize,
do_rescale=do_rescale,
do_normalize=do_normalize,
do_pad=do_pad,
size=size,
resample=resample,
keep_aspect_ratio=keep_aspect_ratio,
ensure_multiple_of=ensure_multiple_of,
rescale_factor=rescale_factor,
image_mean=image_mean,
image_std=image_std,
size_divisor=size_divisor,
data_format=data_format,
input_data_format=input_data_format,
)
for img in images
]
data = {"pixel_values": images}
if segmentation_maps is not None:
segmentation_maps = [
self._preprocess_segmentation_map(
segmentation_map=segmentation_map,
do_reduce_labels=do_reduce_labels,
do_resize=do_resize,
size=size,
resample=resample,
keep_aspect_ratio=keep_aspect_ratio,
ensure_multiple_of=ensure_multiple_of,
input_data_format=input_data_format,
)
for segmentation_map in segmentation_maps
]
data["labels"] = segmentation_maps
return BatchFeature(data=data, tensor_type=return_tensors)
# Copied from transformers.models.beit.image_processing_beit.BeitImageProcessor.post_process_semantic_segmentation with Beit->DPT
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple]] = None):
"""
Converts the output of [`DPTForSemanticSegmentation`] into semantic segmentation maps.
Args:
outputs ([`DPTForSemanticSegmentation`]):
Raw outputs of the model.
target_sizes (`list[Tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
Returns:
semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
"""
logits = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
if is_torch_tensor(target_sizes):
target_sizes = target_sizes.numpy()
semantic_segmentation = []
for idx in range(len(logits)):
resized_logits = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = logits.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
def post_process_depth_estimation(
self,
outputs: "DepthEstimatorOutput",
target_sizes: Optional[Union[TensorType, list[tuple[int, int]], None]] = None,
) -> list[dict[str, TensorType]]:
"""
Converts the raw output of [`DepthEstimatorOutput`] into final depth predictions and depth PIL images.
Only supports PyTorch.
Args:
outputs ([`DepthEstimatorOutput`]):
Raw outputs of the model.
target_sizes (`TensorType` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
(height, width) of each image in the batch. If left to None, predictions will not be resized.
Returns:
`list[dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth
predictions.
"""
requires_backends(self, "torch")
predicted_depth = outputs.predicted_depth
if (target_sizes is not None) and (len(predicted_depth) != len(target_sizes)):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the predicted depth"
)
results = []
target_sizes = [None] * len(predicted_depth) if target_sizes is None else target_sizes
for depth, target_size in zip(predicted_depth, target_sizes):
if target_size is not None:
depth = torch.nn.functional.interpolate(
depth.unsqueeze(0).unsqueeze(1), size=target_size, mode="bicubic", align_corners=False
).squeeze()
results.append({"predicted_depth": depth})
return results
__all__ = ["DPTImageProcessor"]
|
DPTImageProcessor
|
python
|
pytorch__pytorch
|
torch/_inductor/cudagraph_trees.py
|
{
"start": 73979,
"end": 74076
}
|
class ____(Enum):
FORWARD = auto()
BACKWARD = auto()
INFERENCE = auto()
|
CompilationMode
|
python
|
google__pytype
|
pytype/pytd/visitors.py
|
{
"start": 62324,
"end": 68573
}
|
class ____(Visitor):
"""Visitor for verifying containers.
Every container (except typing.Generic) must inherit from typing.Generic and
have an explicitly parameterized base that is also a container. The
parameters on typing.Generic must all be TypeVar instances. A container must
have at most as many parameters as specified in its template.
Raises:
ContainerError: If a problematic container definition is encountered.
"""
def EnterGenericType(self, node):
"""Verify a pytd.GenericType."""
base_type = node.base_type
if isinstance(base_type, pytd.LateType):
return # We can't verify this yet
if not pytd.IsContainer(base_type.cls):
raise ContainerError(f"Class {base_type.name} is not a container")
elif base_type.name in ("typing.Generic", "typing.Protocol"):
for t in node.parameters:
if not isinstance(t, pytd.TypeParameter):
raise ContainerError(f"Name {t.name} must be defined as a TypeVar")
elif not isinstance(node, (pytd.CallableType, pytd.TupleType)):
actual_param_count = len(node.parameters)
max_param_count = len(base_type.cls.template)
if actual_param_count > max_param_count:
raise ContainerError(
"Too many parameters on {}: expected {}, got {}".format(
base_type.name, max_param_count, actual_param_count
)
)
def EnterCallableType(self, node):
self.EnterGenericType(node)
def EnterTupleType(self, node):
self.EnterGenericType(node)
def _GetGenericBasesLookupMap(self, node):
"""Get a lookup map for the generic bases of a class.
Gets a map from a pytd.ClassType to the list of pytd.GenericType bases of
the node that have that class as their base. This method does depth-first
traversal of the bases, which ensures that the order of elements in each
list is consistent with the node's MRO.
Args:
node: A pytd.Class node.
Returns:
A pytd.ClassType -> List[pytd.GenericType] map.
"""
mapping = collections.defaultdict(list)
seen_bases = set()
bases = list(reversed(node.bases))
while bases:
base = bases.pop()
if base in seen_bases:
continue
seen_bases.add(base)
if isinstance(base, pytd.GenericType) and isinstance(
base.base_type, pytd.ClassType
):
mapping[base.base_type].append(base)
bases.extend(reversed(base.base_type.cls.bases))
elif isinstance(base, pytd.ClassType):
bases.extend(reversed(base.cls.bases))
return mapping
def _UpdateParamToValuesMapping(self, mapping, param, value):
"""Update the given mapping of parameter names to values."""
param_name = param.type_param.full_name
if isinstance(value, pytd.TypeParameter):
value_name = value.full_name
assert param_name != value_name
# A TypeVar has been aliased, e.g.,
# class MyList(List[U]): ...
# class List(Sequence[T]): ...
# Register the alias. May raise AliasingDictConflictError.
mapping.add_alias(param_name, value_name, lambda x, y, z: x.union(y))
else:
# A TypeVar has been given a concrete value, e.g.,
# class MyList(List[str]): ...
# Register the value.
if param_name not in mapping:
mapping[param_name] = set()
mapping[param_name].add(value)
def _TypeCompatibilityCheck(self, type_params):
"""Check if the types are compatible.
It is used to handle the case:
class A(Sequence[A]): pass
class B(A, Sequence[B]): pass
class C(B, Sequence[C]): pass
In class `C`, the type parameter `_T` of Sequence could be `A`, `B` or `C`.
Next we will check they have a linear inheritance relationship:
`A` -> `B` -> `C`.
Args:
type_params: The class type params.
Returns:
True if all the types are compatible.
"""
type_params = {
t for t in type_params if not isinstance(t, pytd.AnythingType)
}
if not all(isinstance(t, pytd.ClassType) for t in type_params):
return False
mro_list = [set(mro.GetBasesInMRO(t.cls)) for t in type_params]
mro_list.sort(key=len)
prev = set()
for cur in mro_list:
if not cur.issuperset(prev):
return False
prev = cur
return True
def EnterClass(self, node):
"""Check for conflicting type parameter values in the class's bases."""
# Get the bases in MRO, since we need to know the order in which type
# parameters are aliased or assigned values.
try:
classes = mro.GetBasesInMRO(node)
except mro.MROError:
# TODO(rechen): We should report this, but VerifyContainers() isn't the
# right place to check for mro errors.
return
# GetBasesInMRO gave us the pytd.ClassType for each base. Map class types
# to generic types so that we can iterate through the latter in MRO.
cls_to_bases = self._GetGenericBasesLookupMap(node)
param_to_values = datatypes.AliasingDict()
ambiguous_aliases = set()
for base in sum((cls_to_bases[cls] for cls in classes), []):
for param, value in zip(base.base_type.cls.template, base.parameters):
try:
self._UpdateParamToValuesMapping(param_to_values, param, value)
except datatypes.AliasingDictConflictError:
ambiguous_aliases.add(param.type_param.full_name)
for param_name, values in param_to_values.items():
if any(param_to_values[alias] is values for alias in ambiguous_aliases):
# Any conflict detected for this type parameter might be a false
# positive, since a conflicting value assigned through an ambiguous
# alias could have been meant for a different type parameter.
continue
elif len(values) > 1 and not self._TypeCompatibilityCheck(values):
raise ContainerError(
"Conflicting values for TypeVar {}: {}".format(
param_name, ", ".join(str(v) for v in values)
)
)
for t in node.template:
if t.type_param.full_name in param_to_values:
(value,) = param_to_values[t.type_param.full_name]
raise ContainerError(
f"Conflicting value {value} for TypeVar {t.type_param.full_name}"
)
|
VerifyContainers
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pydocstyle/all.py
|
{
"start": 124,
"end": 196
}
|
class ____:
pass
__all__ = ("public_func", "PublicClass")
|
PrivateClass
|
python
|
Unity-Technologies__ml-agents
|
ml-agents/mlagents/trainers/trainer/on_policy_trainer.py
|
{
"start": 680,
"end": 5851
}
|
class ____(RLTrainer):
"""The PPOTrainer is an implementation of the PPO algorithm."""
def __init__(
self,
behavior_name: str,
reward_buff_cap: int,
trainer_settings: TrainerSettings,
training: bool,
load: bool,
seed: int,
artifact_path: str,
):
"""
Responsible for collecting experiences and training an on-policy model.
:param behavior_name: The name of the behavior associated with trainer config
:param reward_buff_cap: Max reward history to track in the reward buffer
:param trainer_settings: The parameters for the trainer.
:param training: Whether the trainer is set for training.
:param load: Whether the model should be loaded.
:param seed: The seed the model will be initialized with
:param artifact_path: The directory within which to store artifacts from this trainer.
"""
super().__init__(
behavior_name,
trainer_settings,
training,
load,
artifact_path,
reward_buff_cap,
)
self.hyperparameters = cast(
OnPolicyHyperparamSettings, self.trainer_settings.hyperparameters
)
self.seed = seed
self.policy: Policy = None # type: ignore
self.optimizer: TorchOptimizer = None # type: ignore
def _is_ready_update(self):
"""
Returns whether or not the trainer has enough elements to run update model
:return: A boolean corresponding to whether or not update_model() can be run
"""
size_of_buffer = self.update_buffer.num_experiences
return size_of_buffer > self.hyperparameters.buffer_size
def _update_policy(self):
"""
Uses demonstration_buffer to update the policy.
The reward signal generators must be updated in this method at their own pace.
"""
buffer_length = self.update_buffer.num_experiences
self.cumulative_returns_since_policy_update.clear()
# Make sure batch_size is a multiple of sequence length. During training, we
# will need to reshape the data into a batch_size x sequence_length tensor.
batch_size = (
self.hyperparameters.batch_size
- self.hyperparameters.batch_size % self.policy.sequence_length
)
# Make sure there is at least one sequence
batch_size = max(batch_size, self.policy.sequence_length)
n_sequences = max(
int(self.hyperparameters.batch_size / self.policy.sequence_length), 1
)
advantages = np.array(
self.update_buffer[BufferKey.ADVANTAGES].get_batch(), dtype=np.float32
)
self.update_buffer[BufferKey.ADVANTAGES].set(
(advantages - advantages.mean()) / (advantages.std() + 1e-10)
)
num_epoch = self.hyperparameters.num_epoch
batch_update_stats = defaultdict(list)
for _ in range(num_epoch):
self.update_buffer.shuffle(sequence_length=self.policy.sequence_length)
buffer = self.update_buffer
max_num_batch = buffer_length // batch_size
for i in range(0, max_num_batch * batch_size, batch_size):
minibatch = buffer.make_mini_batch(i, i + batch_size)
update_stats = self.optimizer.update(minibatch, n_sequences)
update_stats.update(self.optimizer.update_reward_signals(minibatch))
for stat_name, value in update_stats.items():
batch_update_stats[stat_name].append(value)
for stat, stat_list in batch_update_stats.items():
self._stats_reporter.add_stat(stat, np.mean(stat_list))
if self.optimizer.bc_module:
update_stats = self.optimizer.bc_module.update()
for stat, val in update_stats.items():
self._stats_reporter.add_stat(stat, val)
self._clear_update_buffer()
return True
def add_policy(
self, parsed_behavior_id: BehaviorIdentifiers, policy: Policy
) -> None:
"""
Adds policy to trainer.
:param parsed_behavior_id: Behavior identifiers that the policy should belong to.
:param policy: Policy to associate with name_behavior_id.
"""
if self.policy:
logger.warning(
"Your environment contains multiple teams, but {} doesn't support adversarial games. Enable self-play to \
train adversarial games.".format(
self.__class__.__name__
)
)
self.policy = policy
self.policies[parsed_behavior_id.behavior_id] = policy
self.optimizer = self.create_optimizer()
for _reward_signal in self.optimizer.reward_signals.keys():
self.collected_rewards[_reward_signal] = defaultdict(lambda: 0)
self.model_saver.register(self.policy)
self.model_saver.register(self.optimizer)
self.model_saver.initialize_or_load()
# Needed to resume loads properly
self._step = policy.get_current_step()
|
OnPolicyTrainer
|
python
|
walkccc__LeetCode
|
solutions/332. Reconstruct Itinerary/332.py
|
{
"start": 0,
"end": 358
}
|
class ____:
def findItinerary(self, tickets: list[list[str]]) -> list[str]:
ans = []
graph = collections.defaultdict(list)
for a, b in reversed(sorted(tickets)):
graph[a].append(b)
def dfs(u: str) -> None:
while u in graph and graph[u]:
dfs(graph[u].pop())
ans.append(u)
dfs('JFK')
return ans[::-1]
|
Solution
|
python
|
kamyu104__LeetCode-Solutions
|
Python/longest-subsequence-repeated-k-times.py
|
{
"start": 59,
"end": 1373
}
|
class ____(object):
def longestSubsequenceRepeatedK(self, s, k):
"""
:type s: str
:type k: int
:rtype: str
"""
def check(s, k, curr):
if not curr:
return True
i = 0
for c in s:
if c != curr[i]:
continue
i += 1
if i != len(curr):
continue
i = 0
k -= 1
if not k:
return True
return False
def backtracking(s, k, curr, cnts, result):
if not check(s, k, curr):
return
if len(curr) > len(result):
result[:] = curr
for c in reversed(string.ascii_lowercase):
if cnts[c] < k:
continue
cnts[c] -= k
curr.append(c)
backtracking(s, k, curr, cnts, result)
curr.pop()
cnts[c] += k
cnts = collections.Counter(s)
new_s = []
for c in s:
if cnts[c] < k:
continue
new_s.append(c)
result =[]
backtracking(new_s, k, [], cnts, result)
return "".join(result)
|
Solution
|
python
|
kamyu104__LeetCode-Solutions
|
Python/leaf-similar-trees.py
|
{
"start": 210,
"end": 760
}
|
class ____(object):
def leafSimilar(self, root1, root2):
"""
:type root1: TreeNode
:type root2: TreeNode
:rtype: bool
"""
def dfs(node):
if not node:
return
if not node.left and not node.right:
yield node.val
for i in dfs(node.left):
yield i
for i in dfs(node.right):
yield i
return all(a == b for a, b in
itertools.izip_longest(dfs(root1), dfs(root2)))
|
Solution
|
python
|
Pylons__pyramid
|
tests/test_util.py
|
{
"start": 27929,
"end": 28840
}
|
class ____(unittest.TestCase):
def _callFUT(self, *args, **kw):
from pyramid.util import is_same_domain
return is_same_domain(*args, **kw)
def test_it(self):
self.assertTrue(self._callFUT("example.com", "example.com"))
self.assertFalse(self._callFUT("evil.com", "example.com"))
self.assertFalse(self._callFUT("evil.example.com", "example.com"))
self.assertFalse(self._callFUT("example.com", ""))
def test_with_wildcard(self):
self.assertTrue(self._callFUT("example.com", ".example.com"))
self.assertTrue(self._callFUT("good.example.com", ".example.com"))
def test_with_port(self):
self.assertTrue(self._callFUT("example.com:8080", "example.com:8080"))
self.assertFalse(self._callFUT("example.com:8080", "example.com"))
self.assertFalse(self._callFUT("example.com", "example.com:8080"))
|
Test_is_same_domain
|
python
|
numba__numba
|
numba/core/typing/builtins.py
|
{
"start": 15988,
"end": 16293
}
|
class ____(AbstractTemplate):
def generic(self, args, kws):
assert not kws
ptr, idx = args
if isinstance(ptr, types.CPointer) and isinstance(idx, types.Integer):
return signature(ptr.dtype, ptr, normalize_1d_index(idx))
@infer_global(operator.setitem)
|
GetItemCPointer
|
python
|
pikepdf__pikepdf
|
tests/test_page.py
|
{
"start": 1973,
"end": 9390
}
|
class ____:
def _make_simple_dict(self):
return Dictionary(Type=Name.XObject, Subtype=Name.Image, Width=1, Height=1)
def test_basic(self, graph_page):
d = self._make_simple_dict()
with pytest.raises(ValueError, match="already exists"):
graph_page.add_resource(d, Name.XObject, Name.Im0, replace_existing=False)
res = graph_page.add_resource(d, Name.XObject, Name.Im0, replace_existing=True)
assert graph_page.resources.XObject[res].Width == 1
res2 = graph_page.add_resource(d, Name.XObject, prefix='Im')
assert str(res2).startswith("/Im")
assert graph_page.resources.XObject[res2].Height == 1
def test_resources_exists_but_wrong_type(self, graph_page):
d = self._make_simple_dict()
del graph_page.obj.Resources
graph_page.obj.Resources = Name.Dummy
with pytest.raises(TypeError, match='exists but is not a dictionary'):
graph_page.add_resource(d, Name.XObject, Name.Im0, replace_existing=False)
def test_create_resource_dict_if_not_exists(self, graph_page):
d = self._make_simple_dict()
del graph_page.obj.Resources
graph_page.add_resource(d, Name.XObject, Name.Im0, replace_existing=False)
assert Name.Resources in graph_page.obj
def test_name_and_prefix(self, graph_page):
d = self._make_simple_dict()
with pytest.raises(ValueError, match="one of"):
graph_page.add_resource(d, Name.XObject, name=Name.X, prefix='y')
def test_unrecognized_object_not_disturbed(self, graph_page):
d = self._make_simple_dict()
graph_page.obj.Resources.InvalidItem = Array([42])
graph_page.add_resource(d, Name.Pattern)
assert Name.InvalidItem in graph_page.obj.Resources
def test_add_unowned_page(): # issue 174
pdf = Pdf.new()
d = Dictionary(Type=Name.Page)
pdf.pages.append(Page(d))
def test_failed_add_page_cleanup():
pdf = Pdf.new()
d = Dictionary(Type=Name.NotAPage)
num_objects = len(pdf.objects)
with pytest.raises(TypeError, match="pikepdf.Page"):
pdf.pages.append(d)
assert len(pdf.pages) == 0
# If we fail to add a new page, confirm we did not create a new object
assert len(pdf.objects) == num_objects, "A dangling page object was created"
assert pdf.objects[-1] is not None, "Left a stale object behind without deleting"
# But we'd better not delete an existing object...
d2 = pdf.make_indirect(Dictionary(Type=Name.StillNotAPage))
with pytest.raises(TypeError, match="pikepdf.Page"):
pdf.pages.append(d2)
assert len(pdf.pages) == 0
assert d2.same_owner_as(pdf.Root)
def test_formx(graph, outpdf):
formx = graph.pages[0].as_form_xobject()
graph.add_blank_page()
new_page = graph.pages[-1]
formx_placed_name = new_page.add_resource(formx, Name.XObject)
cs = new_page.calc_form_xobject_placement(
formx, formx_placed_name, Rectangle(0, 0, 200, 200)
)
assert bytes(formx_placed_name) in cs
new_page.obj.Contents = graph.make_stream(cs)
graph.save(outpdf)
assert formx_placed_name in new_page.form_xobjects
assert new_page.form_xobjects[formx_placed_name] == formx
def test_fourpages_to_4up(fourpages, graph, outpdf):
pdf = Pdf.new()
pdf.add_blank_page(page_size=(1000, 1000))
page = pdf.pages[0]
pdf.pages.extend(fourpages.pages)
# Keep explicit Page(pdf.pages[..]) here
page.add_overlay(pdf.pages[1], Rectangle(0, 500, 500, 1000))
page.add_overlay(Page(pdf.pages[2]), Rectangle(500, 500, 1000, 1000))
page.add_overlay(Page(pdf.pages[3]).as_form_xobject(), Rectangle(0, 0, 500, 500))
page.add_underlay(pdf.pages[4], Rectangle(500, 0, 1000, 500))
page.add_underlay(graph.pages[0].obj)
with pytest.raises(TypeError):
page.add_overlay(Dictionary(Key=123))
del pdf.pages[1:]
pdf.save(outpdf)
def _simple_interpret_content_stream(page: Page | Object):
ctm = Matrix()
stack: list[Matrix] = []
for instruction in parse_content_stream(page, operators='q Q cm Do'):
if isinstance(instruction, ContentStreamInlineImage):
continue
operands, op = instruction.operands, instruction.operator
if op == Operator('q'):
stack.append(ctm)
elif op == Operator('Q'):
ctm = stack.pop()
elif op == Operator('cm'):
ctm = Matrix(operands) @ ctm
elif op == Operator('Do'):
xobj_name = operands[0]
yield (xobj_name, ctm)
def test_push_stack(fourpages, outpdf):
pdf = Pdf.new()
pdf.add_blank_page(page_size=(1000, 1000))
page = pdf.pages[0]
pdf.pages.extend(fourpages.pages)
page.Contents = pdf.make_stream(
b"0.4 G\n"
b"0 500 500 1000 re s\n"
b"500 500 1000 1000 re s\n"
b"-1 0 0 1 500 0 cm\n"
)
xobj1 = page.add_overlay(
pdf.pages[1], Rectangle(0, 500, 500, 1000), push_stack=False
)
xobj2 = page.add_overlay(
pdf.pages[2], Rectangle(500, 500, 1000, 1000), push_stack=True
)
draw_events = _simple_interpret_content_stream(page)
# First page should be mirrored horizontally since stack was not pushed
xobj, ctm = next(draw_events)
assert xobj == xobj1
assert ctm.a < 0 and ctm.d > 0, "Not horizontally mirrored as expected"
# Second page should be in upper right corner, properly positioned for a 4-up
xobj, ctm = next(draw_events)
assert xobj == xobj2
assert ctm.e >= 500 and ctm.f >= 500
# Test requires visual confirmation
del pdf.pages[1:]
pdf.save(outpdf)
def test_page_equal(fourpages, graph):
assert fourpages.pages[0] == fourpages.pages[0]
assert fourpages.pages[0] != fourpages.pages[1]
assert graph.pages[0] != fourpages.pages[2]
graph.pages.append(graph.pages[0])
assert graph.pages[1] == graph.pages[0]
assert copy.copy(graph.pages[1]) == graph.pages[0]
assert graph.pages[0] != "dissimilar type"
def test_cant_hash_page(graph):
with pytest.raises(TypeError, match="unhashable"):
hash(graph.pages[0])
def test_contents_add(graph):
graph.pages[0].contents_add(b'q Q', prepend=True)
new_cs = graph.make_stream(b'q Q')
graph.pages[0].contents_add(new_cs, prepend=False)
graph.pages[0].contents_coalesce()
assert graph.pages[0].Contents.read_bytes().startswith(b'q Q')
assert graph.pages[0].Contents.read_bytes().endswith(b'q Q')
def test_remove_unrefed(graph):
assert len(graph.pages[0].Resources.XObject) != 0
graph.pages[0].Contents = graph.make_stream(b'')
graph.pages[0].remove_unreferenced_resources()
assert len(graph.pages[0].Resources.XObject) == 0
def test_page_attrs(graph):
# Test __getattr__
assert isinstance(graph.pages[0].Resources, Dictionary)
del graph.pages[0].Resources
with pytest.raises(
AttributeError, match=r"can't delete|property( '')? of 'Page' object has no deleter"
):
del graph.pages[0].obj
del graph.pages[0]['/Contents']
assert Name.Contents not in graph.pages[0] and Name.Resources not in graph.pages[0]
def test_block_make_indirect_page(graph: Pdf):
with pytest.raises(TypeError, match='implicitly'):
graph.make_indirect(graph.pages[0])
assert isinstance(graph.make_indirect(graph.pages[0].obj), Object)
|
TestAddResource
|
python
|
keras-team__keras
|
keras/src/layers/core/lambda_layer_test.py
|
{
"start": 121,
"end": 3259
}
|
class ____(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_lambda_basics(self):
self.run_layer_test(
layers.Lambda,
init_kwargs={
"function": ops.square,
},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
custom_objects={"square": ops.square},
)
self.run_layer_test(
layers.Lambda,
init_kwargs={"function": ops.square, "mask": ops.ones((2, 3))},
input_shape=(2, 3, 4),
expected_output_shape=(2, 3, 4),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
custom_objects={"square": ops.square},
)
def stacker(x):
return ops.concatenate([x, x], axis=1)
self.run_layer_test(
layers.Lambda,
init_kwargs={"function": stacker, "output_shape": (6,)},
input_shape=(2, 3),
expected_output_shape=(2, 6),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
custom_objects={"stacker": stacker},
)
def stacker_shape(s):
return (s[0], s[1] * 2)
self.run_layer_test(
layers.Lambda,
init_kwargs={
"function": stacker,
"output_shape": stacker_shape,
},
input_shape=(2, 3),
expected_output_shape=(2, 6),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
custom_objects={"stacker": stacker, "stacker_shape": stacker_shape},
)
def test_correctness(self):
layer = layers.Lambda(lambda x: x**2)
output = layer(2 * np.ones((2, 3)))
self.assertAllClose(4 * np.ones((2, 3)), output)
# Test serialization roundtrip
config = layer.get_config()
layer = layers.Lambda.from_config(config, safe_mode=False)
output = layer(2 * np.ones((2, 3)))
self.assertAllClose(4 * np.ones((2, 3)), output)
def test_correctness_lambda_shape(self):
layer = layers.Lambda(lambda x: x**2, output_shape=lambda x: x)
output = layer(2 * np.ones((2, 3)))
self.assertAllClose(4 * np.ones((2, 3)), output)
# Test serialization roundtrip
config = layer.get_config()
layer = layers.Lambda.from_config(config, safe_mode=False)
output = layer(2 * np.ones((2, 3)))
self.assertAllClose(4 * np.ones((2, 3)), output)
|
LambdaTest
|
python
|
spyder-ide__spyder
|
spyder/plugins/remoteclient/api/protocol.py
|
{
"start": 993,
"end": 1108
}
|
class ____(typing.TypedDict):
url: str
token: str
default_kernel_spec: str | None
|
JupyterHubClientOptions
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.