language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
spack__spack
|
lib/spack/spack/version/git_ref_lookup.py
|
{
"start": 936,
"end": 8225
}
|
class ____(AbstractRefLookup):
"""An object for cached lookups of git refs
GitRefLookup objects delegate to the MISC_CACHE for locking. GitRefLookup objects may
be attached to a GitVersion to allow for comparisons between git refs and versions as
represented by tags in the git repository.
"""
def __init__(self, pkg_name):
self.pkg_name = pkg_name
self.data: Dict[str, Tuple[Optional[str], int]] = {}
self._pkg = None
self._fetcher = None
self._cache_key = None
self._cache_path = None
# The following properties are used as part of a lazy reference scheme
# to avoid querying the package repository until it is necessary (and
# in particular to wait until after the configuration has been
# assembled)
@property
def cache_key(self):
if not self._cache_key:
key_base = "git_metadata"
self._cache_key = (Path(key_base) / self.repository_uri).as_posix()
# Cache data in MISC_CACHE
# If this is the first lazy access, initialize the cache as well
spack.caches.MISC_CACHE.init_entry(self.cache_key)
return self._cache_key
@property
def cache_path(self):
if not self._cache_path:
self._cache_path = spack.caches.MISC_CACHE.cache_path(self.cache_key)
return self._cache_path
@property
def pkg(self):
if not self._pkg:
try:
pkg = spack.repo.PATH.get_pkg_class(self.pkg_name)
pkg.git
except (spack.repo.RepoError, AttributeError) as e:
raise VersionLookupError(f"Couldn't get the git repo for {self.pkg_name}") from e
self._pkg = pkg
return self._pkg
@property
def fetcher(self):
if not self._fetcher:
# We require the full git repository history
fetcher = spack.fetch_strategy.GitFetchStrategy(git=self.pkg.git)
fetcher.get_full_repo = True
self._fetcher = fetcher
return self._fetcher
@property
def repository_uri(self):
"""Identifier for git repos used within the repo and metadata caches."""
return Path(spack.util.hash.b32_hash(self.pkg.git)[-7:])
def save(self):
"""Save the data to file"""
with spack.caches.MISC_CACHE.write_transaction(self.cache_key) as (old, new):
sjson.dump(self.data, new)
def load_data(self):
"""Load data if the path already exists."""
if os.path.isfile(self.cache_path):
with spack.caches.MISC_CACHE.read_transaction(self.cache_key) as cache_file:
self.data = sjson.load(cache_file)
def get(self, ref) -> Tuple[Optional[str], int]:
if not self.data:
self.load_data()
if ref not in self.data:
self.data[ref] = self.lookup_ref(ref)
self.save()
return self.data[ref]
def lookup_ref(self, ref) -> Tuple[Optional[str], int]:
"""Lookup the previous version and distance for a given commit.
We use git to compare the known versions from package to the git tags,
as well as any git tags that are SEMVER versions, and find the latest
known version prior to the commit, as well as the distance from that version
to the commit in the git repo. Those values are used to compare Version objects.
"""
pathlib_dest = Path(spack.paths.user_repos_cache_path) / self.repository_uri
dest = str(pathlib_dest)
# prepare a cache for the repository
dest_parent = os.path.dirname(dest)
if not os.path.exists(dest_parent):
mkdirp(dest_parent)
# Only clone if we don't have it!
if not os.path.exists(dest):
self.fetcher.bare_clone(dest)
# Lookup commit info
with working_dir(dest):
# TODO: we need to update the local tags if they changed on the
# remote instance, simply adding '-f' may not be sufficient
# (if commits are deleted on the remote, this command alone
# won't properly update the local rev-list)
self.fetcher.git("fetch", "--tags", output=os.devnull, error=os.devnull)
# Ensure ref is a commit object known to git
# Note the brackets are literals, the ref replaces the format string
try:
self.fetcher.git(
"cat-file", "-e", "%s^{commit}" % ref, output=os.devnull, error=os.devnull
)
except spack.util.executable.ProcessError:
raise VersionLookupError("%s is not a valid git ref for %s" % (ref, self.pkg_name))
# List tags (refs) by date, so last reference of a tag is newest
tag_info = self.fetcher.git(
"for-each-ref",
"--sort=creatordate",
"--format",
"%(objectname) %(refname)",
"refs/tags",
output=str,
).split("\n")
# Lookup of commits to spack versions
commit_to_version = {}
for entry in tag_info:
if not entry:
continue
tag_commit, tag = entry.split()
tag = tag.replace("refs/tags/", "", 1)
# For each tag, try to match to a version
for v in [v.string for v in self.pkg.versions]:
if v == tag or "v" + v == tag:
commit_to_version[tag_commit] = v
break
else:
# try to parse tag to compare versions spack does not know
match = SEMVER_REGEX.search(tag)
if match:
commit_to_version[tag_commit] = match.group()
ancestor_commits = []
for tag_commit in commit_to_version:
self.fetcher.git("merge-base", "--is-ancestor", tag_commit, ref, ignore_errors=[1])
if self.fetcher.git.returncode == 0:
distance = self.fetcher.git(
"rev-list", "%s..%s" % (tag_commit, ref), "--count", output=str, error=str
).strip()
ancestor_commits.append((tag_commit, int(distance)))
if ancestor_commits:
# Get nearest ancestor that is a known version
prev_version_commit, distance = min(ancestor_commits, key=lambda x: x[1])
prev_version = commit_to_version[prev_version_commit]
else:
# Get list of all commits, this is in reverse order
# We use this to get the first commit below
ref_info = self.fetcher.git("log", "--all", "--pretty=format:%H", output=str)
commits = [c for c in ref_info.split("\n") if c]
# No previous version and distance from first commit
prev_version = None
distance = int(
self.fetcher.git(
"rev-list", "%s..%s" % (commits[-1], ref), "--count", output=str, error=str
).strip()
)
return prev_version, distance
|
GitRefLookup
|
python
|
sphinx-doc__sphinx
|
tests/roots/test-ext-autodoc/target/pep695.py
|
{
"start": 296,
"end": 1848
}
|
class ____:
"""This is class Foo."""
type Pep695Alias = Foo
"""This is PEP695 type alias."""
type Pep695AliasUndocumented = Foo
TypeAliasTypeExplicit = TypeAliasType('TypeAliasTypeExplicit', Foo) # NoQA: UP040
"""This is an explicitly constructed typing.TypeAlias."""
HandlerTypeAliasType = TypeAliasType('HandlerTypeAliasType', type[Exception]) # NoQA: UP040
"""This is an explicitly constructed generic alias typing.TypeAlias."""
TypeAliasTypeExtension = typing_extensions.TypeAliasType('TypeAliasTypeExtension', Foo) # NoQA: UP040
"""This is an explicitly constructed typing_extensions.TypeAlias."""
#: This is PEP695 complex type alias with doc comment.
type Pep695AliasC = dict[str, Foo]
type Pep695AliasUnion = str | int
"""This is PEP695 type alias for union."""
type Pep695AliasOfAlias = Pep695AliasC
"""This is PEP695 type alias of PEP695 alias."""
Bar = NewType('Bar', Pep695Alias)
"""This is newtype of Pep695Alias."""
def ret_pep695(a: Pep695Alias) -> Pep695Alias:
"""This fn accepts and returns PEP695 alias."""
...
def read_file(path: pathlike) -> bytes:
"""Read a file and return its contents.
Tests Union type alias cross-reference resolution.
"""
def process_error(handler: Handler, other: HandlerTypeAliasType) -> str:
"""Process an error with a custom handler type.
Tests generic type alias cross-reference resolution.
"""
def buffer_len(data: buffer_like) -> int:
"""Return length of a buffer-like object.
Tests Union type alias cross-reference resolution.
"""
|
Foo
|
python
|
joke2k__faker
|
faker/providers/address/th_TH/__init__.py
|
{
"start": 84,
"end": 9261
}
|
class ____(AddressProvider):
street_name_formats = ("{{street_prefix}}{{last_name}}",)
street_address_formats = ("{{building_number}} {{street_name}}",)
address_formats = OrderedDict(
(
(
"{{street_address}} {{tambon}} {{amphoe}} {{province}} {{postcode}}",
50.0,
),
(
"{{street_address}} ตำบล{{tambon}} อำเภอ{{amphoe}} {{province}} {{postcode}}",
50.0,
),
(
"{{street_address}} ต.{{tambon}} อ.{{amphoe}} {{province}} {{postcode}}",
50.0,
),
(
"{{street_address}} ต.{{tambon}} อ.{{amphoe}} จ.{{province}} {{postcode}}",
40.0,
),
("{{street_address}} อำเภอ{{amphoe}} {{province}} {{postcode}}", 30.0),
("{{street_address}} อ.{{amphoe}} {{province}} {{postcode}}", 30.0),
("{{street_address}} {{amphoe}} {{province}} {{postcode}}", 30.0),
("{{street_address}} {{tambon}} {{province}} {{postcode}}", 15.0),
("{{street_address}} {{amphoe}} จ.{{province}} {{postcode}}", 15.0),
("{{street_address}} {{tambon}} จ.{{province}} {{postcode}}", 15.0),
("{{street_address}} อ.{{amphoe}} จ.{{province}} {{postcode}}", 15.0),
("{{street_address}} ต.{{tambon}} จ.{{province}} {{postcode}}", 15.0),
(
"{{street_address}} อำเภอ{{amphoe}} จังหวัด{{province}} {{postcode}}",
15.0,
),
(
"{{street_address}} ตำบล{{tambon}} อำเภอ{{amphoe}} จังหวัด{{province}} {{postcode}}",
10.0,
),
("{{street_address}} {{province}} {{postcode}}", 15.0),
("{{street_address}} ต.{{tambon}} อ.{{amphoe}} {{province}}", 15.0),
("{{street_address}} ต.{{tambon}} อ.{{amphoe}} จ.{{province}}", 15.0),
(
"{{street_address}} ตำบล{{tambon}} จังหวัด{{province}} {{postcode}}",
10.0,
),
(
"{{building_number}} ต.{{tambon}} อ.{{amphoe}} {{province}} {{postcode}}",
10.0,
),
(
"{{building_number}} หมู่บ้าน{{first_name}} {{amphoe}} {{province}} {{postcode}}",
10.0,
),
)
)
# city names are actual city municipalities in Thailand
# source: Wikipedia: https://th.wikipedia.org/wiki/เทศบาลนครในประเทศไทย
city_formats = ("{{city_name}}",)
cities = (
"กรุงเทพมหานคร",
"นนทบุรี",
"ปากเกร็ด",
"หาดใหญ่",
"เจ้าพระยาสุรศักดิ์",
"สุราษฎร์ธานี",
"อุดรธานี",
"เชียงใหม่",
"นครราชสีมา",
"พัทยา",
"ขอนแก่น",
"นครศรีธรรมราช",
"แหลมฉบัง",
"รังสิต",
"นครสวรรค์",
"ภูเก็ต",
"เชียงราย",
"อุบลราชธานี",
"นครปฐม",
"เกาะสมุย",
"สมุทรสาคร",
"พิษณุโลก",
"ระยอง",
"สงขลา",
"ยะลา",
"ตรัง",
"อ้อมน้อย",
"สกลนคร",
"ลำปาง",
"สมุทรปราการ",
"พระนครศรีอยุธยา",
"แม่สอด",
)
building_number_formats = (
"###",
"##",
"#",
"###/#",
"###/##",
"##/#",
"##/##",
"#/#",
"## หมู่ #",
"## หมู่ ##",
)
street_prefixes = OrderedDict(
(
("ถนน", 0.5),
("ถ.", 0.4),
("ซอย", 0.02),
("ซ.", 0.02),
)
)
postcode_formats = (
# as per https://en.wikipedia.org/wiki/Postal_codes_in_Thailand
"1###0",
"2###0",
"3###0",
"4###0",
"5###0",
"6###0",
"7###0",
"8###0",
"9###0",
)
provinces = (
"กระบี่",
"กรุงเทพมหานคร",
"กรุงเทพ",
"กรุงเทพฯ",
"กทม.",
"กาญจนบุรี",
"กาฬสินธุ์",
"กำแพงเพชร",
"ขอนแก่น",
"จันทบุรี",
"ฉะเชิงเทรา",
"ชลบุรี",
"ชัยนาท",
"ชัยภูมิ",
"ชุมพร",
"เชียงราย",
"เชียงใหม่",
"ตรัง",
"ตราด",
"ตาก",
"นครนายก",
"นครปฐม",
"นครพนม",
"นครราชสีมา",
"นครศรีธรรมราช",
"นครสวรรค์",
"นนทบุรี",
"นราธิวาส",
"น่าน",
"บึงกาฬ",
"บุรีรัมย์",
"ปทุมธานี",
"ประจวบคีรีขันธ์",
"ปราจีนบุรี",
"ปัตตานี",
"พระนครศรีอยุธยา",
"พะเยา",
"พังงา",
"พัทลุง",
"พิจิตร",
"พิษณุโลก",
"เพชรบุรี",
"เพชรบูรณ์",
"แพร่",
"ภูเก็ต",
"มหาสารคาม",
"มุกดาหาร",
"แม่ฮ่องสอน",
"ยโสธร",
"ยะลา",
"ร้อยเอ็ด",
"ระนอง",
"ระยอง",
"ราชบุรี",
"ลพบุรี",
"ลำปาง",
"ลำพูน",
"เลย",
"ศรีสะเกษ",
"สกลนคร",
"สงขลา",
"สตูล",
"สมุทรปราการ",
"สมุทรสงคราม",
"สมุทรสาคร",
"สระแก้ว",
"สระบุรี",
"สิงห์บุรี",
"สุโขทัย",
"สุพรรณบุรี",
"สุราษฎร์ธานี",
"สุรินทร์",
"หนองคาย",
"หนองบัวลำภู",
"อ่างทอง",
"อำนาจเจริญ",
"อุดรธานี",
"อุตรดิตถ์",
"อุทัยธานี",
"อุบลราชธานี",
)
amphoes = (
"เกษตรสมบูรณ์",
"แก้งคร้อ",
"คอนสวรรค์",
"คอนสาร",
"ซับใหญ่",
"เทพสถิต",
"เนินสง่า",
"บ้านเขว้า",
"บ้านแท่น",
"บำเหน็จณรงค์",
"หนองบัวโคก",
"ภักดีชุมพล",
"ภูเขียว",
"หนองบัวแดง",
"หนองบัวระเหว",
"เทิง",
"แม่ลาว",
"แม่สรวย",
"เวียงแก่น",
"เวียงชัย",
"เวียงป่าเป้า",
"เขาสมิง",
"คลองใหญ่",
"บ่อไร่",
"นาแก",
"นาทม",
"นาหว้า",
"บ้านแพง",
"ปลาปาก",
"โพนสวรรค์",
"เรณูนคร",
"วังยาง",
"ศรีสงคราม",
"เฉลิมพระเกียรติ",
"เมือง",
"ปากคาด",
"พรเจริญ",
"ศรีวิไล",
"ป้อมปราบศัตรูพ่าย",
"พระนคร",
"สามโคก",
"บางสะพานน้อย",
"บึงกุ่ม",
"ภาษีเจริญ",
"วังทองหลาง",
"ห้วยขวาง",
"หนอกจอก",
"สะพานสูง",
)
tambons = (
"บางแค",
"บางแค",
"บางไผ่",
"บางปะกอก",
"ยางตลาด",
"ดอนสมบูรณ์",
"หัวงัว",
"นาเชือก",
"เทพศิรินทร์",
"อุ่มเม่า",
"คลองขาม",
"บัวบาน",
"เขาพระนอน",
"เว่อ",
"นาดี",
"อิตื้อ",
"โนนสูง",
"หัวนาคำ",
"หนองตอกแป้น",
"หนองอิเฒ่า",
"โนนศิลา",
"หนองปลาหมอ",
"เปือยใหญ่",
"โนนแดง",
"ก้อนแก้ว",
"คลองเขื่อน",
"บางเล่า",
"บางโรง",
"บางตลาด",
"เนินขาม",
"กะบกเตี้ย",
"สุขเดือนห้า",
"พะโต๊ะ",
"ปากทรง",
"ปังหวาน",
"พระรักษ์",
"ห้วยยอด",
"ปากคม",
"หนองช้างแล่น",
"ท่างิ้ว",
"บางดี",
"ลำภูรา",
"บางกุ้ง",
"นาวง",
"เขากอบ",
"เขาขาว",
"ในเตา",
"เขาปูน",
"ทุ่งต่อ",
"ปากแจ่ม",
"เกาะหวาย",
"ปากพลี",
"เกาะโพธิ์",
"ท่าเรือ",
"โคกกรวด",
"หนองแสง",
"นาหินลาด",
)
tambon_prefixes = OrderedDict(
(
("", 40.0),
("วัด", 2.0),
("บ้าน", 2.0),
("บ่อ", 2.0),
("บึง", 2.0),
("ป่า", 1.0),
("ห้วย", 1.0),
)
)
tambon_suffixes = OrderedDict(
(
("", 30),
("เหนือ", 3),
("ใต้", 3),
("ใหญ่", 2),
("กลาง", 1),
("เล็ก", 1),
("ใหม่", 1),
("เดิม", 0.1),
)
)
city_suffixes = ("นคร",)
def street_prefix(self) -> str:
"""
:example: 'ถนน'
"""
return self.random_element(self.street_prefixes)
def administrative_unit(self) -> str:
"""
:example: 'อุบลราชธานี'
"""
return self.random_element(self.provinces)
province = administrative_unit
def amphoe(self) -> str:
"""
Get a random Amphoe (district) name.
Currently it's total random and not necessarily matched with a province.
:example: 'บางสะพานน้อย'
"""
return self.random_element(self.amphoes)
def tambon(self) -> str:
"""
Get a random Tambon (subdistrict) name.
Currently it's total random and not necessarily matched with an amphoe or province.
:example: 'ห้วยนาง'
"""
return (
f"{self.random_element(self.tambon_prefixes)}{self.random_element(self.tambons)}"
+ f"{self.random_element(self.tambon_suffixes)}"
)
def city_name(self) -> str:
return self.random_element(self.cities)
|
Provider
|
python
|
wandb__wandb
|
tools/graphql_codegen/plugin.py
|
{
"start": 17670,
"end": 20032
}
|
class ____(ast.NodeTransformer):
"""Applies various modifications to a generated module with pydantic classes."""
def visit_Module(self, node: ast.Module) -> Any:
# Prepend shared import statements to the module. Ruff will clean this up later.
node.body = [
make_import_from("__future__", "annotations"),
make_import_from("wandb._pydantic", [GQL_INPUT, GQL_RESULT, TYPENAME]),
*node.body,
]
return self.generic_visit(node)
def visit_ImportFrom(self, node: ast.ImportFrom) -> Any:
if node.module == "typing":
# Import from `typing_extensions` instead, and let Ruff rewrite later.
node.module = "typing_extensions"
return node
def visit_AnnAssign(self, node: ast.AnnAssign) -> Any:
if isinstance(node.target, ast.Name) and node.target.id == "typename__":
# e.g. BEFORE: `typename__: Literal["MyType"] = Field(alias="__typename")`
# e.g. AFTER: `typename__: Typename[Literal["MyType"]]`
# T -> Typename[T]
node.annotation = ast.Subscript(ast.Name(TYPENAME), node.annotation)
# Drop `= Field(alias="__typename")`, if present
if (
isinstance(call := node.value, ast.Call)
and isinstance(call.func, ast.Name)
and call.func.id == "Field"
and len(call.keywords) == 1
and call.keywords[0].arg == "alias"
):
node.value = None
# If this is a union of a single type, drop the `Field(discriminator=...)`
# since pydantic may complain.
# See: https://github.com/pydantic/pydantic/issues/3636
elif (
isinstance(annotation := node.annotation, ast.Subscript)
and isinstance(annotation.value, ast.Name)
and annotation.value.id == "Union"
and isinstance(annotation.slice, ast.Tuple)
and len(annotation.slice.elts) == 1
):
# e.g. BEFORE: `field: Union[OnlyType,] = Field(discriminator="...")`
# e.g. AFTER: `field: OnlyType`
node.annotation = annotation.slice.elts[0] # Union[T,] -> T
node.value = None # drop `= Field(discriminator=...)`, if present
return self.generic_visit(node)
|
PydanticModuleRewriter
|
python
|
tiangolo__fastapi
|
docs_src/security/tutorial005.py
|
{
"start": 1250,
"end": 1345
}
|
class ____(BaseModel):
username: Union[str, None] = None
scopes: List[str] = []
|
TokenData
|
python
|
huggingface__transformers
|
src/transformers/models/mimi/modeling_mimi.py
|
{
"start": 27591,
"end": 32455
}
|
class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: MimiConfig, layer_idx: Optional[int] = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
if layer_idx is None:
logger.warning_once(
f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
"lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
"when creating this class."
)
self.attention_dropout = config.attention_dropout
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = config.head_dim
self.num_key_value_heads = config.num_key_value_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.max_position_embeddings = config.max_position_embeddings
self.is_causal = True
self.scaling = 1 / math.sqrt(config.head_dim)
if self.hidden_size % self.num_heads != 0:
raise ValueError(
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
f" and `num_heads`: {self.num_heads})."
)
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias)
self.rotary_emb = MimiRotaryEmbedding(config)
self.sliding_window = config.sliding_window # Ignore copy
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: bool = False,
use_cache: bool = False,
cache_position: Optional[torch.LongTensor] = None,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
cos, sin = self.rotary_emb(value_states, position_ids)
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scaling
if attention_mask is not None: # no matter the length, we just slice it
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
# upcast attention to fp32
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
attn_output = torch.matmul(attn_weights, value_states)
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.view(bsz, q_len, -1)
attn_output = self.o_proj(attn_output)
if not output_attentions:
attn_weights = None
return attn_output, attn_weights
# NO LONGER EXIST Copied from transformers.models.gemma.modeling_gemma.GemmaFlashAttention2 with Gemma->Mimi
# TODO cyril: modular
|
MimiAttention
|
python
|
graphql-python__graphene
|
graphene/validation/tests/test_depth_limit_validator.py
|
{
"start": 443,
"end": 541
}
|
class ____(ObjectType):
class meta:
name = "Dog"
interfaces = (PetType,)
|
DogType
|
python
|
bokeh__bokeh
|
src/bokeh/document/json.py
|
{
"start": 1814,
"end": 1921
}
|
class ____(TypedDict):
kind: Literal["MessageSent"]
msg_type: str
msg_data: Any | None
|
MessageSent
|
python
|
apache__airflow
|
providers/teradata/src/airflow/providers/teradata/operators/teradata_compute_cluster.py
|
{
"start": 16027,
"end": 19101
}
|
class ____(_TeradataComputeClusterOperator):
"""
Teradata Compute Cluster Operator to Resume the specified Teradata Vantage Cloud Lake Compute Cluster.
Resumes the Teradata Vantage Lake Computer Cluster by employing the RESUME SQL statement within the
Teradata Vantage Lake Compute Cluster SQL Interface.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:TeradataComputeClusterResumeOperator`
:param compute_profile_name: Name of the Compute Profile to manage.
:param compute_group_name: Name of compute group to which compute profile belongs.
:param teradata_conn_id: The :ref:`Teradata connection id <howto/connection:teradata>`
reference to a specific Teradata database.
:param timeout: Time elapsed before the task times out and fails. Time is in minutes.
"""
template_fields: Sequence[str] = (
"compute_profile_name",
"compute_group_name",
"teradata_conn_id",
"timeout",
)
ui_color = "#e07c24"
def __init__(
self,
**kwargs,
) -> None:
super().__init__(**kwargs)
def execute(self, context: Context):
"""
Initiate the execution of RESUME COMPUTE SQL statement.
Initiate the execution of the SQL statement for resuming the compute cluster within Teradata Vantage
Lake, effectively resumes the compute cluster.
Airflow runs this method on the worker and defers using the trigger.
"""
return self._compute_cluster_execute()
def _compute_cluster_execute(self, operation: str | None = None):
super()._compute_cluster_execute("Resume")
cc_status_query = (
"SEL ComputeProfileState FROM DBC.ComputeProfilesVX WHERE UPPER(ComputeProfileName) = UPPER('"
+ self.compute_profile_name
+ "')"
)
if self.compute_group_name:
cc_status_query += " AND UPPER(ComputeGroupName) = UPPER('" + self.compute_group_name + "')"
cc_status_result = self._hook_run(cc_status_query, handler=_single_result_row_handler)
if cc_status_result is not None:
cp_status_result = str(cc_status_result)
# Generates an error message if the compute cluster does not exist for the specified
# compute profile and compute group.
else:
raise AirflowException(Constants.CC_GRP_PRP_NON_EXISTS_MSG % operation)
if cp_status_result != Constants.CC_RESUME_DB_STATUS:
cp_resume_query = f"RESUME COMPUTE FOR COMPUTE PROFILE {self.compute_profile_name}"
if self.compute_group_name:
cp_resume_query = f"{cp_resume_query} IN COMPUTE GROUP {self.compute_group_name}"
return self._handle_cc_status(Constants.CC_RESUME_OPR, cp_resume_query)
self.log.info(
"Compute Cluster %s is already in '%s' status.",
self.compute_profile_name,
Constants.CC_RESUME_DB_STATUS,
)
|
TeradataComputeClusterResumeOperator
|
python
|
huggingface__transformers
|
src/transformers/models/sam3/modeling_sam3.py
|
{
"start": 81284,
"end": 86647
}
|
class ____(Sam3PreTrainedModel):
"""
Mask decoder that combines object queries with pixel-level features to predict instance masks.
Also produces a semantic segmentation output and supports cross-attention to prompts.
"""
_can_record_outputs = {
"attentions": Sam3Attention,
}
def __init__(self, config: Sam3MaskDecoderConfig):
super().__init__(config)
self.config = config
hidden_size = config.hidden_size
# Pixel decoder (FPN)
self.pixel_decoder = Sam3PixelDecoder(config)
# Mask embedder (MLP to transform queries)
self.mask_embedder = Sam3MaskEmbedder(config)
# Projection from pixel decoder output to mask embedding space
self.instance_projection = nn.Conv2d(self.pixel_decoder.out_channels, hidden_size, kernel_size=1)
# Semantic segmentation head (always present in UniversalSegmentationHead)
self.semantic_projection = nn.Conv2d(self.pixel_decoder.out_channels, 1, kernel_size=1)
self.prompt_cross_attn = Sam3Attention(config)
self.prompt_cross_attn_norm = nn.LayerNorm(hidden_size)
self.prompt_cross_attn_dropout = nn.Dropout(config.dropout)
@check_model_inputs()
def forward(
self,
decoder_queries: torch.Tensor,
backbone_features: list[torch.Tensor],
encoder_hidden_states: torch.Tensor,
prompt_features: Optional[torch.Tensor] = None,
prompt_mask: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> dict[str, torch.Tensor]:
"""
Args:
decoder_queries: Decoder output queries [batch_size, num_queries, hidden_size]
backbone_features: List of backbone features to process through FPN
encoder_hidden_states: Encoder outputs [batch_size, seq_len, hidden_size]
prompt_features: Prompt features (text + geometry) for cross-attention [batch_size, prompt_len, hidden_size]
prompt_mask: Padding mask [batch_size, prompt_len] where True=valid, False=padding
Returns:
Sam3MaskDecoderOutput containing predicted masks and semantic segmentation.
"""
if prompt_features is not None:
# Cross-attention: encoder features attend to prompt features
residual = encoder_hidden_states
normed_hidden_states = self.prompt_cross_attn_norm(encoder_hidden_states)
cross_attn_mask = None
if prompt_mask is not None:
cross_attn_mask = create_bidirectional_mask(
config=self.config,
input_embeds=normed_hidden_states,
encoder_hidden_states=prompt_features,
attention_mask=prompt_mask,
)
attn_output, _ = self.prompt_cross_attn(
query=normed_hidden_states,
key=prompt_features,
value=prompt_features,
attention_mask=cross_attn_mask,
**kwargs,
)
encoder_hidden_states = residual + self.prompt_cross_attn_dropout(attn_output)
# Process backbone features through FPN to get pixel embeddings
pixel_embed = self._embed_pixels(
backbone_features=backbone_features,
encoder_hidden_states=encoder_hidden_states,
)
# Predict instance masks via dot product between query embeddings and pixel embeddings
instance_embeds = self.instance_projection(pixel_embed)
mask_embeddings = self.mask_embedder(decoder_queries)
pred_masks = torch.einsum("bqc,bchw->bqhw", mask_embeddings, instance_embeds)
# Generate semantic segmentation
semantic_seg = self.semantic_projection(pixel_embed)
return Sam3MaskDecoderOutput(
pred_masks=pred_masks,
semantic_seg=semantic_seg,
)
def _embed_pixels(
self,
backbone_features: list[torch.Tensor],
encoder_hidden_states: torch.Tensor,
) -> torch.Tensor:
"""
Embed pixels by combining backbone FPN features with encoder vision features.
The encoder vision features replace the finest-resolution backbone feature.
Args:
backbone_features: List of backbone features [batch_size, C, H_i, W_i]
encoder_hidden_states: Encoder outputs [batch_size, seq_len, hidden_size]
Returns:
Pixel embeddings [batch_size, hidden_size, H, W]
"""
backbone_visual_feats = [feat.clone() for feat in backbone_features]
# Extract vision features from encoder output and reshape to spatial format
spatial_dim = backbone_features[-1].shape[-2] * backbone_features[-1].shape[-1]
encoder_visual_embed = encoder_hidden_states[:, :spatial_dim, :]
batch_size, _, hidden_size = encoder_visual_embed.shape
height, width = backbone_features[-1].shape[-2:]
encoder_visual_embed = encoder_visual_embed.transpose(1, 2).reshape(batch_size, hidden_size, height, width)
# Replace finest backbone feature with encoder vision features
backbone_visual_feats[-1] = encoder_visual_embed
# Process through FPN decoder
pixel_embed = self.pixel_decoder(backbone_visual_feats)
return pixel_embed
|
Sam3MaskDecoder
|
python
|
run-llama__llama_index
|
llama-index-integrations/embeddings/llama-index-embeddings-azure-openai/llama_index/embeddings/azure_openai/base.py
|
{
"start": 1009,
"end": 7014
}
|
class ____(OpenAIEmbedding):
azure_endpoint: Optional[str] = Field(
default=None, description="The Azure endpoint to use.", validate_default=True
)
azure_deployment: Optional[str] = Field(
default=None, description="The Azure deployment to use.", validate_default=True
)
api_base: str = Field(
default="",
description="The base URL for Azure deployment.",
validate_default=True,
)
api_version: str = Field(
default="",
description="The version for Azure OpenAI API.",
validate_default=True,
)
azure_ad_token_provider: Optional[AnnotatedProvider] = Field(
default=None,
description="Callback function to provide Azure AD token.",
exclude=True,
)
use_azure_ad: bool = Field(
description="Indicates if Microsoft Entra ID (former Azure AD) is used for token authentication"
)
_azure_ad_token: Any = PrivateAttr(default=None)
_client: AzureOpenAI = PrivateAttr()
_aclient: AsyncAzureOpenAI = PrivateAttr()
def __init__(
self,
mode: str = OpenAIEmbeddingMode.TEXT_SEARCH_MODE,
model: str = OpenAIEmbeddingModelType.TEXT_EMBED_ADA_002,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
dimensions: Optional[int] = None,
additional_kwargs: Optional[Dict[str, Any]] = None,
api_key: Optional[str] = None,
api_version: Optional[str] = None,
api_base: Optional[str] = None,
# azure specific
azure_endpoint: Optional[str] = None,
azure_deployment: Optional[str] = None,
azure_ad_token_provider: Optional[AzureADTokenProvider] = None,
use_azure_ad: bool = False,
deployment_name: Optional[str] = None,
max_retries: int = 10,
reuse_client: bool = True,
callback_manager: Optional[CallbackManager] = None,
num_workers: Optional[int] = None,
# custom httpx client
http_client: Optional[httpx.Client] = None,
async_http_client: Optional[httpx.AsyncClient] = None,
**kwargs: Any,
):
# OpenAI base_url (api_base) and azure_endpoint are mutually exclusive
if api_base is None:
azure_endpoint = get_from_param_or_env(
"azure_endpoint", azure_endpoint, "AZURE_OPENAI_ENDPOINT", None
)
if not use_azure_ad:
api_key = get_from_param_or_env(
"api_key", api_key, "AZURE_OPENAI_API_KEY", None
)
azure_deployment = resolve_from_aliases(
azure_deployment,
deployment_name,
)
super().__init__(
mode=mode,
model=model,
embed_batch_size=embed_batch_size,
dimensions=dimensions,
additional_kwargs=additional_kwargs,
api_key=api_key,
api_version=api_version,
api_base=api_base,
azure_endpoint=azure_endpoint,
azure_deployment=azure_deployment,
azure_ad_token_provider=azure_ad_token_provider,
use_azure_ad=use_azure_ad,
max_retries=max_retries,
reuse_client=reuse_client,
callback_manager=callback_manager,
http_client=http_client,
async_http_client=async_http_client,
num_workers=num_workers,
**kwargs,
)
# reset api_base to None if it is the default
if self.api_base == DEFAULT_OPENAI_API_BASE or self.azure_endpoint:
self.api_base = None
@model_validator(mode="before")
@classmethod
def validate_env(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate necessary credentials are set."""
if (
values.get("api_base") == "https://api.openai.com/v1"
and values.get("azure_endpoint") is None
):
raise ValueError(
"You must set OPENAI_API_BASE to your Azure endpoint. "
"It should look like https://YOUR_RESOURCE_NAME.openai.azure.com/"
)
if values.get("api_version") is None:
raise ValueError("You must set OPENAI_API_VERSION for Azure OpenAI.")
return values
def _get_client(self) -> AzureOpenAI:
if not self.reuse_client:
return AzureOpenAI(**self._get_credential_kwargs())
if self._client is None:
self._client = AzureOpenAI(**self._get_credential_kwargs())
return self._client
def _get_aclient(self) -> AsyncAzureOpenAI:
if not self.reuse_client:
return AsyncAzureOpenAI(**self._get_credential_kwargs(is_async=True))
if self._aclient is None:
self._aclient = AsyncAzureOpenAI(
**self._get_credential_kwargs(is_async=True)
)
return self._aclient
def _get_credential_kwargs(self, is_async: bool = False) -> Dict[str, Any]:
if self.use_azure_ad:
if self.azure_ad_token_provider:
self.api_key = self.azure_ad_token_provider()
else:
self._azure_ad_token = refresh_openai_azuread_token(
self._azure_ad_token
)
self.api_key = self._azure_ad_token.token
else:
self.api_key = get_from_param_or_env(
"api_key", self.api_key, "AZURE_OPENAI_API_KEY"
)
return {
"api_key": self.api_key,
"azure_ad_token_provider": self.azure_ad_token_provider,
"azure_endpoint": self.azure_endpoint,
"azure_deployment": self.azure_deployment,
"base_url": self.api_base,
"api_version": self.api_version,
"default_headers": self.default_headers,
"http_client": self._async_http_client if is_async else self._http_client,
}
@classmethod
def class_name(cls) -> str:
return "AzureOpenAIEmbedding"
|
AzureOpenAIEmbedding
|
python
|
huggingface__transformers
|
tests/models/rag/test_modeling_rag.py
|
{
"start": 25685,
"end": 40726
}
|
class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.temp_dir = tempfile.TemporaryDirectory()
cls.dataset_path = cls.temp_dir.name
cls.index_path = os.path.join(cls.temp_dir.name, "index.faiss")
ds = load_dataset("hf-internal-testing/wiki_dpr_dummy")["train"]
ds.save_to_disk(cls.dataset_path)
url = "https://huggingface.co/datasets/hf-internal-testing/wiki_dpr_dummy/resolve/main/index.faiss"
response = requests.get(url, stream=True)
with open(cls.index_path, "wb") as fp:
fp.write(response.content)
@classmethod
def tearDownClass(cls):
cls.temp_dir.cleanup()
def tearDown(self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
cleanup(torch_device, gc_collect=True)
@cached_property
def sequence_model(self):
return (
RagSequenceForGeneration.from_pretrained_question_encoder_generator(
"facebook/dpr-question_encoder-single-nq-base", "facebook/bart-large-cnn"
)
.to(torch_device)
.eval()
)
@cached_property
def token_model(self):
return (
RagTokenForGeneration.from_pretrained_question_encoder_generator(
"facebook/dpr-question_encoder-single-nq-base", "facebook/bart-large-cnn"
)
.to(torch_device)
.eval()
)
def get_rag_config(self):
question_encoder_config = AutoConfig.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
generator_config = AutoConfig.from_pretrained("facebook/bart-large-cnn")
return RagConfig.from_question_encoder_generator_configs(
question_encoder_config,
generator_config,
bos_token_id=0,
decoder_start_token_id=2,
eos_token_id=2,
is_encoder_decoder=True,
pad_token_id=1,
vocab_size=50264,
title_sep=" / ",
doc_sep=" // ",
n_docs=5,
max_combined_length=300,
dataset="wiki_dpr",
dataset_split="train",
index_name="custom",
passages_path=self.dataset_path,
index_path=self.index_path,
use_dummy_dataset=True,
retrieval_vector_size=768,
retrieval_batch_size=8,
dataset_revision="b24a417",
)
def test_rag_sequence_inference(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
rag_sequence = self.sequence_model
rag_sequence.set_retriever(rag_retriever)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="pt"
).input_ids
decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="pt").input_ids
input_ids = input_ids.to(torch_device)
decoder_input_ids = decoder_input_ids.to(torch_device)
with torch.no_grad():
output = rag_sequence(
input_ids,
labels=decoder_input_ids,
)
expected_shape = torch.Size([5, 5, 50264])
self.assertEqual(output.logits.shape, expected_shape)
expected_doc_scores = torch.tensor([[75.0286, 74.4998, 74.0804, 74.0306, 73.9504]]).to(torch_device)
_assert_tensors_equal(expected_doc_scores, output.doc_scores, atol=TOLERANCE)
expected_loss = torch.tensor([36.7368]).to(torch_device)
_assert_tensors_equal(expected_loss, output.loss, atol=TOLERANCE)
def test_rag_token_inference(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
rag_token = self.token_model
rag_token.set_retriever(rag_retriever)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="pt"
).input_ids
decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="pt").input_ids
input_ids = input_ids.to(torch_device)
decoder_input_ids = decoder_input_ids.to(torch_device)
with torch.no_grad():
output = rag_token(
input_ids,
labels=decoder_input_ids,
)
expected_shape = torch.Size([5, 5, 50264])
self.assertEqual(output.logits.shape, expected_shape)
expected_doc_scores = torch.tensor([[75.0286, 74.4998, 74.0804, 74.0306, 73.9504]]).to(torch_device)
_assert_tensors_equal(expected_doc_scores, output.doc_scores, atol=TOLERANCE)
expected_loss = torch.tensor([36.3557]).to(torch_device)
_assert_tensors_equal(expected_loss, output.loss, atol=TOLERANCE)
def test_rag_token_generate_beam(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
rag_token = self.token_model
rag_token.set_retriever(rag_retriever)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="pt"
).input_ids
input_ids = input_ids.to(torch_device)
output_ids = rag_token.generate(
input_ids,
decoder_start_token_id=rag_token.generator.config.decoder_start_token_id,
num_beams=2,
num_return_sequences=2,
)
# sequence generate test
output_text_1 = rag_decoder_tokenizer.decode(output_ids[0], skip_special_tokens=True)
output_text_2 = rag_decoder_tokenizer.decode(output_ids[1], skip_special_tokens=True)
# Expected outputs as given by model at integration time.
EXPECTED_OUTPUT_TEXT_1 = '"She\'s My Kind of Girl" was released through Epic Records in Japan in March 1972. The song was a Top 10 hit in the country. It was the first single to be released by ABBA in the UK. The single was followed by "En Carousel" and "Love Has Its Uses"'
EXPECTED_OUTPUT_TEXT_2 = '"She\'s My Kind of Girl" was released through Epic Records in Japan in March 1972. The song was a Top 10 hit in the country. It was the first single to be released by ABBA in the UK. The single was followed by "En Carousel" and "Love Has Its Ways"'
self.assertEqual(output_text_1, EXPECTED_OUTPUT_TEXT_1)
self.assertEqual(output_text_2, EXPECTED_OUTPUT_TEXT_2)
def test_rag_sequence_generate_beam(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
rag_sequence = self.sequence_model
rag_sequence.set_retriever(rag_retriever)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="pt"
).input_ids
input_ids = input_ids.to(torch_device)
output_ids = rag_sequence.generate(
input_ids,
decoder_start_token_id=rag_sequence.generator.config.decoder_start_token_id,
num_beams=2,
num_return_sequences=2,
)
# sequence generate test
output_text_1 = rag_decoder_tokenizer.decode(output_ids[0], skip_special_tokens=True)
output_text_2 = rag_decoder_tokenizer.decode(output_ids[1], skip_special_tokens=True)
# Expected outputs as given by model at integration time.
EXPECTED_OUTPUT_TEXT_1 = """\"She's My Kind of Girl\" was released through Epic Records in Japan in March 1972, giving the duo a Top 10 hit. Two more singles were released in Japan, \"En Carousel\" and \"Love Has Its Ways\" Ulvaeus and Andersson persevered with their songwriting and experimented with new sounds and vocal arrangements."""
EXPECTED_OUTPUT_TEXT_2 = """In September 2018, Björn Ulvaeus revealed that the two new songs, \"I Still Have Faith In You\" and \"Don't Shut Me Down\", would be released no earlier than March 2019. The two new tracks will feature in a TV special set to air later in the year."""
self.assertEqual(output_text_1, EXPECTED_OUTPUT_TEXT_1)
self.assertEqual(output_text_2, EXPECTED_OUTPUT_TEXT_2)
@property
def questions_data(self):
return [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
]
def test_rag_sequence_generate_batch(self):
tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq")
retriever = RagRetriever.from_pretrained(
"facebook/rag-sequence-nq",
index_name="custom",
passages_path=self.dataset_path,
index_path=self.index_path,
)
rag_sequence = RagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", retriever=retriever).to(
torch_device
)
input_dict = tokenizer(
self.questions_data,
return_tensors="pt",
padding=True,
truncation=True,
)
input_ids = input_dict.input_ids.to(torch_device)
attention_mask = input_dict.attention_mask.to(torch_device)
output_ids = rag_sequence.generate(
input_ids,
attention_mask=attention_mask,
)
outputs = tokenizer.decode(output_ids, skip_special_tokens=True)
# PR #31938 cause the output being changed from `june 22, 2018` to `june 22 , 2018`.
EXPECTED_OUTPUTS = [
" albert einstein",
" june 22 , 2018",
" amplitude modulation",
" tim besley ( chairman )",
" june 20 , 2018",
" 1980",
" 7.0",
" 8",
]
self.assertListEqual(outputs, EXPECTED_OUTPUTS)
def test_rag_sequence_generate_batch_from_context_input_ids(self):
tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq")
retriever = RagRetriever.from_pretrained(
"facebook/rag-sequence-nq",
index_name="custom",
passages_path=self.dataset_path,
index_path=self.index_path,
)
rag_sequence = RagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", retriever=retriever).to(
torch_device
)
input_dict = tokenizer(
self.questions_data,
return_tensors="pt",
padding=True,
truncation=True,
)
input_ids = input_dict.input_ids.to(torch_device)
attention_mask = input_dict.attention_mask.to(torch_device)
question_hidden_states = rag_sequence.question_encoder(input_ids, attention_mask=attention_mask)[0]
docs_dict = retriever(
input_ids.detach().cpu().numpy(), question_hidden_states.detach().cpu().numpy(), return_tensors="pt"
)
doc_scores = torch.bmm(
question_hidden_states.unsqueeze(1),
docs_dict["retrieved_doc_embeds"].to(torch_device).float().transpose(1, 2),
).squeeze(1)
output_ids = rag_sequence.generate(
context_input_ids=docs_dict["context_input_ids"].to(torch_device),
context_attention_mask=docs_dict["context_attention_mask"].to(torch_device),
doc_scores=doc_scores.to(torch_device),
do_deduplication=True,
)
outputs = tokenizer.decode(output_ids, skip_special_tokens=True)
EXPECTED_OUTPUTS = [
" albert einstein",
" june 22 , 2018",
" amplitude modulation",
" tim besley ( chairman )",
" june 20 , 2018",
" 1980",
" 7.0",
" 8",
]
self.assertListEqual(outputs, EXPECTED_OUTPUTS)
def test_rag_token_generate_batch(self):
tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq")
retriever = RagRetriever.from_pretrained(
"facebook/rag-token-nq", index_name="custom", passages_path=self.dataset_path, index_path=self.index_path
)
rag_token = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever).to(
torch_device
)
if torch_device != "cpu":
rag_token.half()
input_dict = tokenizer(
self.questions_data,
return_tensors="pt",
padding=True,
truncation=True,
)
input_ids = input_dict.input_ids.to(torch_device)
attention_mask = input_dict.attention_mask.to(torch_device)
output_ids = rag_token.generate(
input_ids,
attention_mask=attention_mask,
)
outputs = tokenizer.decode(output_ids, skip_special_tokens=True)
EXPECTED_OUTPUTS = [
" albert einstein",
" september 22 , 2017",
" amplitude modulation",
" stefan persson",
" april 20 , 2018",
" the 1970s",
" 7.1. 2",
" 13",
]
self.assertListEqual(outputs, EXPECTED_OUTPUTS)
@require_torch
@require_retrieval
|
RagModelIntegrationTests
|
python
|
huggingface__transformers
|
src/transformers/models/sam3_tracker/configuration_sam3_tracker.py
|
{
"start": 6703,
"end": 11021
}
|
class ____(PreTrainedConfig):
r"""
[`Sam3TrackerConfig`] is the configuration class to store the configuration of a [`Sam3TrackerModel`]. It is used to instantiate a
SAM3_TRACKER model according to the specified arguments, defining the memory attention, memory encoder, and image encoder
configs. Instantiating a configuration defaults will yield a similar configuration to that of the SAM 2.1 Hiera-tiny
[facebook/sam3_tracker.1-hiera-tiny](https://huggingface.co/facebook/sam3_tracker.1-hiera-tiny) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vision_config (Union[`dict`, `Sam3TrackerVisionConfig`], *optional*):
Dictionary of configuration options used to initialize [`Sam3TrackerVisionConfig`].
prompt_encoder_config (Union[`dict`, `Sam3TrackerPromptEncoderConfig`], *optional*):
Dictionary of configuration options used to initialize [`Sam3TrackerPromptEncoderConfig`].
mask_decoder_config (Union[`dict`, `Sam3TrackerMaskDecoderConfig`], *optional*):
Dictionary of configuration options used to initialize [`Sam3TrackerMaskDecoderConfig`].
initializer_range (`float`, *optional*, defaults to 0.02):
Standard deviation for parameter initialization.
Example:
```python
>>> from transformers import (
... Sam3TrackerVisionConfig,
... Sam3TrackerPromptEncoderConfig,
... Sam3TrackerMaskDecoderConfig,
... Sam3TrackerModel,
... )
>>> # Initializing a Sam3TrackerConfig with `"facebook/sam3_tracker.1_hiera_tiny"` style configuration
>>> configuration = Sam3Trackerconfig()
>>> # Initializing a Sam3TrackerModel (with random weights) from the `"facebook/sam3_tracker.1_hiera_tiny"` style configuration
>>> model = Sam3TrackerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a Sam3TrackerConfig from a Sam3TrackerVisionConfig, Sam3TrackerPromptEncoderConfig, and Sam3TrackerMaskDecoderConfig
>>> # Initializing SAM3_TRACKER vision encoder, memory attention, and memory encoder configurations
>>> vision_config = Sam3TrackerVisionConfig()
>>> prompt_encoder_config = Sam3TrackerPromptEncoderConfig()
>>> mask_decoder_config = Sam3TrackerMaskDecoderConfig()
>>> config = Sam3TrackerConfig(vision_config, prompt_encoder_config, mask_decoder_config)
```"""
model_type = "sam3_tracker"
sub_configs = {
"vision_config": AutoConfig,
"prompt_encoder_config": Sam3TrackerPromptEncoderConfig,
"mask_decoder_config": Sam3TrackerMaskDecoderConfig,
}
def __init__(
self,
vision_config=None,
prompt_encoder_config=None,
mask_decoder_config=None,
initializer_range=0.02,
**kwargs,
):
vision_config = (
vision_config
if vision_config is not None
else {"backbone_feature_sizes": [[288, 288], [144, 144], [72, 72]]}
)
prompt_encoder_config = prompt_encoder_config if prompt_encoder_config is not None else {}
mask_decoder_config = mask_decoder_config if mask_decoder_config is not None else {}
if isinstance(vision_config, dict):
vision_config["model_type"] = vision_config.get("model_type", "sam3_vision_model")
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
if isinstance(prompt_encoder_config, Sam3TrackerPromptEncoderConfig):
prompt_encoder_config = prompt_encoder_config.to_dict()
if isinstance(mask_decoder_config, Sam3TrackerMaskDecoderConfig):
mask_decoder_config = mask_decoder_config.to_dict()
self.vision_config = vision_config
self.prompt_encoder_config = Sam3TrackerPromptEncoderConfig(**prompt_encoder_config)
self.mask_decoder_config = Sam3TrackerMaskDecoderConfig(**mask_decoder_config)
self.initializer_range = initializer_range
super().__init__(**kwargs)
__all__ = ["Sam3TrackerConfig", "Sam3TrackerPromptEncoderConfig", "Sam3TrackerMaskDecoderConfig"]
|
Sam3TrackerConfig
|
python
|
matplotlib__matplotlib
|
galleries/examples/misc/demo_agg_filter.py
|
{
"start": 2369,
"end": 2912
}
|
class ____(BaseFilter):
def __init__(self, sigma, alpha=0.3, color=(0, 0, 0), offsets=(0, 0)):
self.gauss_filter = GaussianFilter(sigma, alpha, color)
self.offset_filter = OffsetFilter(offsets)
def get_pad(self, dpi):
return max(self.gauss_filter.get_pad(dpi),
self.offset_filter.get_pad(dpi))
def process_image(self, padded_src, dpi):
t1 = self.gauss_filter.process_image(padded_src, dpi)
t2 = self.offset_filter.process_image(t1, dpi)
return t2
|
DropShadowFilter
|
python
|
pytorch__pytorch
|
torch/_functorch/_aot_autograd/descriptors.py
|
{
"start": 15897,
"end": 16243
}
|
class ____(DifferentiableAOTInput):
"""The input is a parameter, whose FQN is target"""
target: str
def expr(self) -> str:
return f"self.get_parameter({self.target!r})"
def is_param(self) -> bool:
return True
def is_buffer(self) -> bool:
return False
@dataclasses.dataclass(frozen=True)
|
ParamAOTInput
|
python
|
dagster-io__dagster
|
helm/dagster/schema/schema/charts/dagster/subschema/daemon.py
|
{
"start": 761,
"end": 1175
}
|
class ____(BaseModel, extra="forbid"):
maxConcurrentRuns: Optional[IntSource] = None
tagConcurrencyLimits: Optional[list[TagConcurrencyLimit]] = None
dequeueIntervalSeconds: Optional[IntSource] = None
dequeueNumWorkers: Optional[IntSource] = None
dequeueUseThreads: Optional[bool] = None
blockOpConcurrencyLimitedRuns: Optional[BlockOpConcurrencyLimitedRuns] = None
|
QueuedRunCoordinatorConfig
|
python
|
walkccc__LeetCode
|
solutions/923. 3Sum With Multiplicity/923.py
|
{
"start": 0,
"end": 566
}
|
class ____:
def threeSumMulti(self, arr: list[int], target: int) -> int:
MOD = 1_000_000_007
ans = 0
count = collections.Counter(arr)
for i, x in count.items():
for j, y in count.items():
k = target - i - j
if k not in count:
continue
if i == j and j == k:
ans = (ans + x * (x - 1) * (x - 2) // 6) % MOD
elif i == j and j != k:
ans = (ans + x * (x - 1) // 2 * count[k]) % MOD
elif i < j and j < k:
ans = (ans + x * y * count[k]) % MOD
return ans % MOD
|
Solution
|
python
|
numpy__numpy
|
numpy/typing/tests/data/pass/scalars.py
|
{
"start": 268,
"end": 335
}
|
class ____:
def __complex__(self) -> complex:
return 3j
|
C
|
python
|
huggingface__transformers
|
src/transformers/models/sam_hq/configuration_sam_hq.py
|
{
"start": 1220,
"end": 3356
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`SamHQPromptEncoderModel`].The [`SamHQPromptEncoderModel`]
module is used to encode the input 2D points and bounding boxes. Instantiating a configuration defaults will yield a
similar configuration to that of the SAM_HQ model. The configuration is used to store the configuration of the model.
[Uminosachi/sam-hq](https://huggingface.co/Uminosachi/sam-hq) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model's output.Read the documentation from
[`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the hidden states.
image_size (`int`, *optional*, defaults to 1024):
The expected output resolution of the image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
mask_input_channels (`int`, *optional*, defaults to 16):
The number of channels to be fed to the `MaskDecoder` module.
num_point_embeddings (`int`, *optional*, defaults to 4):
The number of point embeddings to be used.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the encoder and pooler.
"""
base_config_key = "prompt_encoder_config"
def __init__(
self,
hidden_size=256,
image_size=1024,
patch_size=16,
mask_input_channels=16,
num_point_embeddings=4,
hidden_act="gelu",
layer_norm_eps=1e-6,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.image_size = image_size
self.patch_size = patch_size
self.image_embedding_size = image_size // patch_size
self.mask_input_channels = mask_input_channels
self.num_point_embeddings = num_point_embeddings
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
|
SamHQPromptEncoderConfig
|
python
|
langchain-ai__langchain
|
libs/core/langchain_core/tracers/stdout.py
|
{
"start": 6425,
"end": 6715
}
|
class ____(FunctionCallbackHandler):
"""Tracer that prints to the console."""
name: str = "console_callback_handler"
def __init__(self, **kwargs: Any) -> None:
"""Create a ConsoleCallbackHandler."""
super().__init__(function=print, **kwargs)
|
ConsoleCallbackHandler
|
python
|
walkccc__LeetCode
|
solutions/2749. Minimum Operations to Make the Integer Zero/2749.py
|
{
"start": 0,
"end": 650
}
|
class ____:
def makeTheIntegerZero(self, num1: int, num2: int) -> int:
# If k operations are used, num1 - [(num2 + 2^{i_1}) + (num2 + 2^{i_2}) +
# ... + (num2 + 2^{i_k})] = 0. So, num1 - k * num2 = (2^{i_1} + 2^{i_2} +
# ... + 2^{i_k}), where i_1, i_2, ..., i_k are in the range [0, 60].
# Note that for any number x, we can use "x's bit count" operations to make
# x equal to 0. Additionally, we can also use x operations to deduct x by
# 2^0 (x times), which also results in 0.
for ops in range(61):
target = num1 - ops * num2
if target.bit_count() <= ops <= target:
return ops
return -1
|
Solution
|
python
|
pyca__cryptography
|
src/cryptography/hazmat/primitives/asymmetric/ed448.py
|
{
"start": 1976,
"end": 4002
}
|
class ____(metaclass=abc.ABCMeta):
@classmethod
def generate(cls) -> Ed448PrivateKey:
from cryptography.hazmat.backends.openssl.backend import backend
if not backend.ed448_supported():
raise UnsupportedAlgorithm(
"ed448 is not supported by this version of OpenSSL.",
_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM,
)
return rust_openssl.ed448.generate_key()
@classmethod
def from_private_bytes(cls, data: Buffer) -> Ed448PrivateKey:
from cryptography.hazmat.backends.openssl.backend import backend
if not backend.ed448_supported():
raise UnsupportedAlgorithm(
"ed448 is not supported by this version of OpenSSL.",
_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM,
)
return rust_openssl.ed448.from_private_bytes(data)
@abc.abstractmethod
def public_key(self) -> Ed448PublicKey:
"""
The Ed448PublicKey derived from the private key.
"""
@abc.abstractmethod
def sign(self, data: Buffer) -> bytes:
"""
Signs the data.
"""
@abc.abstractmethod
def private_bytes(
self,
encoding: _serialization.Encoding,
format: _serialization.PrivateFormat,
encryption_algorithm: _serialization.KeySerializationEncryption,
) -> bytes:
"""
The serialized bytes of the private key.
"""
@abc.abstractmethod
def private_bytes_raw(self) -> bytes:
"""
The raw bytes of the private key.
Equivalent to private_bytes(Raw, Raw, NoEncryption()).
"""
@abc.abstractmethod
def __copy__(self) -> Ed448PrivateKey:
"""
Returns a copy.
"""
@abc.abstractmethod
def __deepcopy__(self, memo: dict) -> Ed448PrivateKey:
"""
Returns a deep copy.
"""
if hasattr(rust_openssl, "x448"):
Ed448PrivateKey.register(rust_openssl.ed448.Ed448PrivateKey)
|
Ed448PrivateKey
|
python
|
run-llama__llama_index
|
llama-index-integrations/tools/llama-index-tools-azure-code-interpreter/llama_index/tools/azure_code_interpreter/base.py
|
{
"start": 1807,
"end": 11337
}
|
class ____(BaseToolSpec):
"""
Azure Code Interpreter tool spec.
Leverages Azure Dynamic Sessions to execute Python code.
"""
spec_functions = ["code_interpreter", "list_files"]
def __init__(
self,
pool_management_endpoint: Optional[str] = None,
session_id: Optional[str] = None,
local_save_path: Optional[str] = None,
sanitize_input: bool = True,
) -> None:
"""Initialize with parameters."""
self.pool_management_endpoint: str = pool_management_endpoint or os.getenv(
"AZURE_POOL_MANAGEMENT_ENDPOINT"
)
self.access_token: Optional[AccessToken] = None
def _access_token_provider_factory() -> Callable[[], Optional[str]]:
def access_token_provider() -> Optional[str]:
"""Create a function that returns an access token."""
if self.access_token is None or datetime.fromtimestamp(
self.access_token.expires_on, timezone.utc
) < (datetime.now(timezone.utc) + timedelta(minutes=5)):
credential = DefaultAzureCredential()
self.access_token = credential.get_token(
"https://dynamicsessions.io/.default"
)
return self.access_token.token
return access_token_provider
self.access_token_provider: Callable[[], Optional[str]] = (
_access_token_provider_factory()
)
"""A function that returns the access token to use for the session pool."""
self.session_id: str = session_id or str(uuid4())
"""The session ID to use for the session pool. Defaults to a random UUID."""
self.sanitize_input: bool = sanitize_input
"""Whether to sanitize input before executing it."""
if local_save_path:
if not os.path.exists(local_save_path):
raise Exception(f"Local save path {local_save_path} does not exist.")
self.local_save_path: Optional[str] = local_save_path
"""The local path to save files generated by Python interpreter."""
try:
_package_version = importlib.metadata.version(
"llamaindex-azure-code-interpreter"
)
except importlib.metadata.PackageNotFoundError:
_package_version = "0.0.0"
self.user_agent = (
f"llamaindex-azure-code-interpreter/{_package_version} (Language=Python)"
)
def _build_url(self, path: str) -> str:
pool_management_endpoint = self.pool_management_endpoint
if not pool_management_endpoint:
raise ValueError("pool_management_endpoint is not set")
if not pool_management_endpoint.endswith("/"):
pool_management_endpoint += "/"
encoded_session_id = urllib.parse.quote(self.session_id)
query = f"identifier={encoded_session_id}&api-version=2024-02-02-preview"
query_separator = "&" if "?" in pool_management_endpoint else "?"
return pool_management_endpoint + path + query_separator + query
def code_interpreter(self, python_code: str) -> dict:
"""
This tool is used to execute python commands when you need to perform calculations or computations in a Session.
Input should be a valid python command. The tool returns the result, stdout, and stderr.
Args:
python_code (str): Python code to be executed generated by llm.
"""
if self.sanitize_input:
python_code = _sanitize_input(python_code)
access_token = self.access_token_provider()
api_url = self._build_url("code/execute")
headers = {
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json",
"User-Agent": self.user_agent,
}
body = {
"properties": {
"codeInputType": "inline",
"executionType": "synchronous",
"code": python_code,
}
}
response = requests.post(api_url, headers=headers, json=body)
response.raise_for_status()
response_json = response.json()
if "properties" in response_json:
if (
"result" in response_json["properties"]
and response_json["properties"]["result"]
):
if isinstance(response_json["properties"]["result"], dict):
if "base64_data" in response_json["properties"]["result"]:
base64_encoded_data = response_json["properties"]["result"][
"base64_data"
]
if self.local_save_path:
file_path = f"{self.local_save_path}/{self.session_id}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.{response_json['properties']['result']['format']}"
decoded_data = base64.b64decode(base64_encoded_data)
with open(file_path, "wb") as f:
f.write(decoded_data)
# Check if file is written to the file path successfully. if so, update the response_json
response_json["properties"]["result"][
"saved_to_local_path"
] = response_json["properties"]["result"].pop("base64_data")
if os.path.exists(file_path):
response_json["properties"]["result"][
"saved_to_local_path"
] = True
else:
response_json["properties"]["result"][
"saved_to_local_path"
] = False
else:
response_json["properties"]["result"]["base64_data"] = ""
return response_json
def upload_file(
self,
data: Optional[Any] = None,
local_file_path: Optional[str] = None,
) -> List[RemoteFileMetadata]:
"""
Upload a file to the session under the path /mnt/data.
Args:
data: The data to upload.
local_file_path: The path to the local file to upload.
Returns:
List[RemoteFileMetadata]: The list of metadatas for the uploaded files.
"""
if data and local_file_path:
raise ValueError("data and local_file_path cannot be provided together")
if local_file_path:
remote_file_path = f"/mnt/data/{os.path.basename(local_file_path)}"
data = open(local_file_path, "rb")
access_token = self.access_token_provider()
if not remote_file_path.startswith("/mnt/data"):
remote_file_path = f"/mnt/data/{remote_file_path}"
api_url = self._build_url("files/upload")
headers = {
"Authorization": f"Bearer {access_token}",
}
files = [("file", (remote_file_path, data, "application/octet-stream"))]
response = requests.request("POST", api_url, headers=headers, files=files)
response.raise_for_status()
response_json = response.json()
remote_files_metadatas = []
for entry in response_json["value"]:
if "properties" in entry:
remote_files_metadatas.append(
RemoteFileMetadata.from_dict(entry["properties"])
)
return remote_files_metadatas
def download_file_to_local(
self, remote_file_path: str, local_file_path: Optional[str] = None
) -> Optional[BufferedReader]:
"""
Download a file from the session back to your local environment.
Args:
remote_file_path: The path to download the file from, relative to `/mnt/data`.
local_file_path: The path to save the downloaded file to. If not provided, the file is returned as a BufferedReader.
Returns:
BufferedReader: The data of the downloaded file.
"""
access_token = self.access_token_provider()
# In case if the file path LLM provides is absolute, remove the /mnt/data/ prefix
remote_file_path = remote_file_path.replace("/mnt/data/", "")
api_url = self._build_url(f"files/content/{remote_file_path}")
headers = {
"Authorization": f"Bearer {access_token}",
}
response = requests.get(api_url, headers=headers)
response.raise_for_status()
if local_file_path:
with open(local_file_path, "wb") as f:
f.write(response.content)
return None
return BytesIO(response.content)
def list_files(self) -> List[RemoteFileMetadata]:
"""
List the files in the session.
Returns:
List[RemoteFileMetadata]: The metadata for the files in the session
"""
access_token = self.access_token_provider()
api_url = self._build_url("files")
headers = {
"Authorization": f"Bearer {access_token}",
}
response = requests.get(api_url, headers=headers)
response.raise_for_status()
response_json = response.json()
return [
RemoteFileMetadata.from_dict(entry["properties"])
for entry in response_json["value"]
]
|
AzureCodeInterpreterToolSpec
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 63639,
"end": 64053
}
|
class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("alt", "image_url", "caption")
alt = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="alt")
image_url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="imageUrl")
caption = sgqlc.types.Field(String, graphql_name="caption")
|
CheckRunOutputImage
|
python
|
sphinx-doc__sphinx
|
sphinx/environment/adapters/asset.py
|
{
"start": 231,
"end": 573
}
|
class ____:
def __init__(self, env: BuildEnvironment) -> None:
self.env = env
def get_original_image_uri(self, name: str) -> str:
"""Get the original image URI."""
while _StrPath(name) in self.env.original_image_uri:
name = self.env.original_image_uri[_StrPath(name)]
return name
|
ImageAdapter
|
python
|
langchain-ai__langchain
|
libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_structured_output_retry.py
|
{
"start": 2262,
"end": 11955
}
|
class ____(BaseModel):
"""Weather report schema for testing."""
temperature: float
conditions: str
@tool
def get_weather(city: str) -> str:
"""Get the weather for a given city.
Args:
city: The city to get weather for.
Returns:
Weather information for the city.
"""
return f"The weather in {city} is sunny and 72 degrees."
def test_structured_output_retry_first_attempt_invalid() -> None:
"""Test structured output retry when first two attempts have invalid output."""
# First two attempts have invalid tool arguments, third attempt succeeds
# The model will call the WeatherReport structured output tool
tool_calls = [
# First attempt - invalid: wrong type for temperature
[
{
"name": "WeatherReport",
"id": "1",
"args": {"temperature": "not-a-float", "conditions": "sunny"},
}
],
# Second attempt - invalid: missing required field
[{"name": "WeatherReport", "id": "2", "args": {"temperature": 72.5}}],
# Third attempt - valid
[
{
"name": "WeatherReport",
"id": "3",
"args": {"temperature": 72.5, "conditions": "sunny"},
}
],
]
model = FakeToolCallingModel(tool_calls=tool_calls)
retry_middleware = StructuredOutputRetryMiddleware(max_retries=2)
agent = create_agent(
model=model,
tools=[get_weather],
middleware=[retry_middleware],
response_format=ToolStrategy(schema=WeatherReport, handle_errors=False),
checkpointer=InMemorySaver(),
)
result = agent.invoke(
{"messages": [HumanMessage("What's the weather in Tokyo?")]},
{"configurable": {"thread_id": "test"}},
)
# Verify we got a structured response
assert "structured_response" in result
structured = result["structured_response"]
assert isinstance(structured, WeatherReport)
assert structured.temperature == 72.5
assert structured.conditions == "sunny"
# Verify the model was called 3 times (initial + 2 retries)
assert model.index == 3
def test_structured_output_retry_exceeds_max_retries() -> None:
"""Test structured output retry raises error when max retries exceeded."""
# All three attempts return invalid arguments
tool_calls = [
[
{
"name": "WeatherReport",
"id": "1",
"args": {"temperature": "invalid", "conditions": "sunny"},
}
],
[
{
"name": "WeatherReport",
"id": "2",
"args": {"temperature": "also-invalid", "conditions": "cloudy"},
}
],
[
{
"name": "WeatherReport",
"id": "3",
"args": {"temperature": "still-invalid", "conditions": "rainy"},
}
],
]
model = FakeToolCallingModel(tool_calls=tool_calls)
retry_middleware = StructuredOutputRetryMiddleware(max_retries=2)
agent = create_agent(
model=model,
tools=[get_weather],
middleware=[retry_middleware],
response_format=ToolStrategy(schema=WeatherReport, handle_errors=False),
# No checkpointer - we expect this to fail
)
# Should raise StructuredOutputError after exhausting retries
with pytest.raises(StructuredOutputError):
agent.invoke(
{"messages": [HumanMessage("What's the weather in Tokyo?")]},
)
# Verify the model was called 3 times (initial + 2 retries)
assert model.index == 3
def test_structured_output_retry_succeeds_first_attempt() -> None:
"""Test structured output retry when first attempt succeeds (no retry needed)."""
# First attempt returns valid structured output
tool_calls = [
[
{
"name": "WeatherReport",
"id": "1",
"args": {"temperature": 68.0, "conditions": "cloudy"},
}
],
]
model = FakeToolCallingModel(tool_calls=tool_calls)
retry_middleware = StructuredOutputRetryMiddleware(max_retries=2)
agent = create_agent(
model=model,
tools=[get_weather],
middleware=[retry_middleware],
response_format=ToolStrategy(schema=WeatherReport, handle_errors=False),
checkpointer=InMemorySaver(),
)
result = agent.invoke(
{"messages": [HumanMessage("What's the weather in Paris?")]},
{"configurable": {"thread_id": "test"}},
)
# Verify we got a structured response
assert "structured_response" in result
structured = result["structured_response"]
assert isinstance(structured, WeatherReport)
assert structured.temperature == 68.0
assert structured.conditions == "cloudy"
# Verify the model was called only once
assert model.index == 1
def test_structured_output_retry_validation_error() -> None:
"""Test structured output retry with schema validation errors."""
# First attempt has wrong type, second has missing field, third succeeds
tool_calls = [
[
{
"name": "WeatherReport",
"id": "1",
"args": {"temperature": "seventy-two", "conditions": "sunny"},
}
],
[{"name": "WeatherReport", "id": "2", "args": {"temperature": 72.5}}],
[
{
"name": "WeatherReport",
"id": "3",
"args": {"temperature": 72.5, "conditions": "partly cloudy"},
}
],
]
model = FakeToolCallingModel(tool_calls=tool_calls)
retry_middleware = StructuredOutputRetryMiddleware(max_retries=2)
agent = create_agent(
model=model,
tools=[get_weather],
middleware=[retry_middleware],
response_format=ToolStrategy(schema=WeatherReport, handle_errors=False),
checkpointer=InMemorySaver(),
)
result = agent.invoke(
{"messages": [HumanMessage("What's the weather in London?")]},
{"configurable": {"thread_id": "test"}},
)
# Verify we got a structured response
assert "structured_response" in result
structured = result["structured_response"]
assert isinstance(structured, WeatherReport)
assert structured.temperature == 72.5
assert structured.conditions == "partly cloudy"
# Verify the model was called 3 times
assert model.index == 3
def test_structured_output_retry_zero_retries() -> None:
"""Test structured output retry with max_retries=0 (no retries allowed)."""
# First attempt returns invalid arguments
tool_calls = [
[
{
"name": "WeatherReport",
"id": "1",
"args": {"temperature": "invalid", "conditions": "sunny"},
}
],
[
{
"name": "WeatherReport",
"id": "2",
"args": {"temperature": 72.5, "conditions": "sunny"},
}
], # Would succeed if retried
]
model = FakeToolCallingModel(tool_calls=tool_calls)
retry_middleware = StructuredOutputRetryMiddleware(max_retries=0)
agent = create_agent(
model=model,
tools=[get_weather],
middleware=[retry_middleware],
response_format=ToolStrategy(schema=WeatherReport, handle_errors=False),
checkpointer=InMemorySaver(),
)
# Should fail immediately without retrying
with pytest.raises(StructuredOutputError):
agent.invoke(
{"messages": [HumanMessage("What's the weather in Berlin?")]},
{"configurable": {"thread_id": "test"}},
)
# Verify the model was called only once (no retries)
assert model.index == 1
def test_structured_output_retry_preserves_messages() -> None:
"""Test structured output retry preserves error feedback in messages."""
# First attempt invalid, second succeeds
tool_calls = [
[
{
"name": "WeatherReport",
"id": "1",
"args": {"temperature": "invalid", "conditions": "rainy"},
}
],
[
{
"name": "WeatherReport",
"id": "2",
"args": {"temperature": 75.0, "conditions": "rainy"},
}
],
]
model = FakeToolCallingModel(tool_calls=tool_calls)
retry_middleware = StructuredOutputRetryMiddleware(max_retries=1)
agent = create_agent(
model=model,
tools=[get_weather],
middleware=[retry_middleware],
response_format=ToolStrategy(schema=WeatherReport, handle_errors=False),
checkpointer=InMemorySaver(),
)
result = agent.invoke(
{"messages": [HumanMessage("What's the weather in Seattle?")]},
{"configurable": {"thread_id": "test"}},
)
# Verify structured response is correct
assert "structured_response" in result
structured = result["structured_response"]
assert structured.temperature == 75.0
assert structured.conditions == "rainy"
# Verify messages include the retry feedback
messages = result["messages"]
human_messages = [m for m in messages if isinstance(m, HumanMessage)]
# Should have at least 2 human messages: initial + retry feedback
assert len(human_messages) >= 2
# The retry feedback message should contain error information
retry_message = human_messages[-1]
assert "Error:" in retry_message.content
assert "Please try again" in retry_message.content
|
WeatherReport
|
python
|
numba__numba
|
numba/tests/test_sysinfo.py
|
{
"start": 310,
"end": 2489
}
|
class ____(TestCase):
def setUp(self):
super(TestSysInfo, self).setUp()
self.info = nsi.get_sysinfo()
self.safe_contents = {
int: (
nsi._cpu_count,
),
float: (
nsi._runtime,
),
str: (
nsi._machine,
nsi._cpu_name,
nsi._platform_name,
nsi._os_name,
nsi._os_version,
nsi._python_comp,
nsi._python_impl,
nsi._python_version,
nsi._llvm_version,
nsi._numpy_version,
),
bool: (
nsi._cu_dev_init,
nsi._svml_state,
nsi._svml_loaded,
nsi._svml_operational,
nsi._llvm_svml_patched,
nsi._tbb_thread,
nsi._openmp_thread,
nsi._wkq_thread,
nsi._numpy_AVX512_SKX_detected,
),
list: (
nsi._errors,
nsi._warnings,
),
dict: (
nsi._numba_env_vars,
),
datetime: (
nsi._start,
nsi._start_utc,
),
}
self.safe_keys = chain(*self.safe_contents.values())
def tearDown(self):
super(TestSysInfo, self).tearDown()
# System info might contain long strings or lists so delete it.
del self.info
def test_has_safe_keys(self):
for k in self.safe_keys:
with self.subTest(k=k):
self.assertIn(k, self.info)
def test_safe_content_type(self):
for t, keys in self.safe_contents.items():
for k in keys:
with self.subTest(k=k):
self.assertIsInstance(self.info[k], t)
def test_has_no_error(self):
self.assertFalse(self.info[nsi._errors])
def test_display_empty_info(self):
output = StringIO()
with redirect_stdout(output):
res = nsi.display_sysinfo({})
self.assertIsNone(res)
output.close()
|
TestSysInfo
|
python
|
getsentry__sentry
|
tests/sentry/integrations/jira/test_uninstalled.py
|
{
"start": 627,
"end": 3989
}
|
class ____(APITestCase):
external_id = "it2may+cody"
kid = "cudi"
shared_secret = "garden"
path = "/extensions/jira/uninstalled/"
def jwt_token_secret(self):
jira_signing_algorithm = "HS256"
return jwt.encode(
{
"iss": self.external_id,
"aud": absolute_uri(),
"qsh": get_query_hash(self.path, method="POST", query_params={}),
},
self.shared_secret,
algorithm=jira_signing_algorithm,
headers={"alg": jira_signing_algorithm},
)
def jwt_token_cdn(self):
jira_signing_algorithm = "RS256"
return jwt.encode(
{
"iss": self.external_id,
"aud": absolute_uri(),
"qsh": get_query_hash(self.path, method="POST", query_params={}),
},
RS256_KEY,
algorithm=jira_signing_algorithm,
headers={"kid": self.kid, "alg": jira_signing_algorithm},
)
@patch("sentry_sdk.set_tag")
@patch("sentry.integrations.utils.scope.bind_organization_context")
def test_with_shared_secret(
self, mock_bind_org_context: MagicMock, mock_set_tag: MagicMock
) -> None:
org = self.organization
integration = self.create_provider_integration(
provider="jira",
status=ObjectStatus.ACTIVE,
external_id=self.external_id,
metadata={"shared_secret": self.shared_secret},
)
integration.add_organization(org, self.user)
resp = self.client.post(
self.path, data={}, HTTP_AUTHORIZATION="JWT " + self.jwt_token_secret()
)
# We have to pull this from the DB again to see the updated status
integration = Integration.objects.get(id=integration.id)
mock_set_tag.assert_any_call("integration_id", integration.id)
with assume_test_silo_mode(SiloMode.REGION):
mock_bind_org_context.assert_called_with(serialize_rpc_organization(org))
assert integration.status == ObjectStatus.DISABLED
assert resp.status_code == 200
@patch("sentry_sdk.set_tag")
@patch("sentry.integrations.utils.scope.bind_organization_context")
@responses.activate
def test_with_key_id(self, mock_bind_org_context: MagicMock, mock_set_tag: MagicMock) -> None:
org = self.organization
integration = self.create_provider_integration(
provider="jira", status=ObjectStatus.ACTIVE, external_id=self.external_id
)
integration.add_organization(org, self.user)
responses.add(
responses.GET,
f"https://connect-install-keys.atlassian.com/{self.kid}",
body=RS256_PUB_KEY,
)
resp = self.client.post(
self.path, data={}, HTTP_AUTHORIZATION="JWT " + self.jwt_token_cdn()
)
# We have to pull this from the DB again to see the updated status
integration = Integration.objects.get(id=integration.id)
mock_set_tag.assert_any_call("integration_id", integration.id)
with assume_test_silo_mode(SiloMode.REGION):
mock_bind_org_context.assert_called_with(serialize_rpc_organization(org))
assert integration.status == ObjectStatus.DISABLED
assert resp.status_code == 200
|
JiraUninstalledTest
|
python
|
google__pytype
|
pytype/overlays/future_overlay.py
|
{
"start": 140,
"end": 450
}
|
class ____(overlay.Overlay):
"""A custom overlay for the 'future' module."""
def __init__(self, ctx):
member_map = {
"with_metaclass": metaclass.WithMetaclass.make,
}
ast = ctx.loader.import_name("future.utils")
super().__init__(ctx, "future.utils", member_map, ast)
|
FutureUtilsOverlay
|
python
|
rq__rq
|
rq/executions.py
|
{
"start": 373,
"end": 4291
}
|
class ____:
"""Class to represent an execution of a job."""
def __init__(self, id: str, job_id: str, connection: Redis):
self.id = id
self.job_id = job_id
self.connection = connection
right_now = now()
self.created_at = right_now
self.last_heartbeat = right_now
self._job: Optional[Job] = None
def __eq__(self, other: object) -> bool:
if not isinstance(other, Execution):
return False
return self.id == other.id
@property
def key(self) -> str:
return f'rq:execution:{self.composite_key}'
@property
def job(self) -> Job:
if self._job:
return self._job
self._job = Job.fetch(id=self.job_id, connection=self.connection)
return self._job
@property
def composite_key(self):
return f'{self.job_id}:{self.id}'
@classmethod
def fetch(cls, id: str, job_id: str, connection: Redis) -> 'Execution':
"""Fetch an execution from Redis."""
execution = cls(id=id, job_id=job_id, connection=connection)
execution.refresh()
return execution
def refresh(self):
"""Refresh execution data from Redis."""
data = self.connection.hgetall(self.key)
if not data:
raise ValueError(f'Execution {self.id} not found in Redis')
self.created_at = datetime.fromtimestamp(float(data[b'created_at']), tz=timezone.utc)
self.last_heartbeat = datetime.fromtimestamp(float(data[b'last_heartbeat']), tz=timezone.utc)
@classmethod
def from_composite_key(cls, composite_key: str, connection: Redis) -> 'Execution':
"""A combination of job_id and execution_id separated by a colon."""
job_id, execution_id = parse_composite_key(composite_key)
return cls(id=execution_id, job_id=job_id, connection=connection)
@classmethod
def create(cls, job: Job, ttl: int, pipeline: 'Pipeline') -> 'Execution':
"""Save execution data to Redis."""
id = uuid4().hex
execution = cls(id=id, job_id=job.id, connection=job.connection)
execution.save(ttl=ttl, pipeline=pipeline)
ExecutionRegistry(job_id=job.id, connection=pipeline).add(execution=execution, ttl=ttl, pipeline=pipeline)
job.started_job_registry.add_execution(execution, pipeline=pipeline, ttl=ttl, xx=False)
return execution
def save(self, ttl: int, pipeline: Optional['Pipeline'] = None):
"""Save execution data to Redis and JobExecutionRegistry."""
connection = pipeline if pipeline is not None else self.connection
connection.hset(self.key, mapping=self.serialize())
# Still unsure how to handle TTL, but this should be tied to heartbeat TTL
connection.expire(self.key, ttl)
def delete(self, job: Job, pipeline: 'Pipeline'):
"""Delete an execution from Redis."""
pipeline.delete(self.key)
job.started_job_registry.remove_execution(execution=self, pipeline=pipeline)
ExecutionRegistry(job_id=self.job_id, connection=self.connection).remove(execution=self, pipeline=pipeline)
def serialize(self) -> dict:
return {
'id': self.id,
'created_at': self.created_at.timestamp(),
'last_heartbeat': self.last_heartbeat.timestamp(),
}
def heartbeat(self, started_job_registry: StartedJobRegistry, ttl: int, pipeline: 'Pipeline'):
"""Update execution heartbeat."""
# TODO: worker heartbeat should be tied to execution heartbeat
self.last_heartbeat = now()
pipeline.hset(self.key, 'last_heartbeat', self.last_heartbeat.timestamp())
pipeline.expire(self.key, ttl)
started_job_registry.add_execution(self, ttl=ttl, pipeline=pipeline, xx=True)
ExecutionRegistry(job_id=self.job_id, connection=pipeline).add(execution=self, ttl=ttl, pipeline=pipeline)
|
Execution
|
python
|
xlwings__xlwings
|
xlwings/constants.py
|
{
"start": 116244,
"end": 116435
}
|
class ____:
xlSpanishTuteoAndVoseo = 1 # from enum XlSpanishModes
xlSpanishTuteoOnly = 0 # from enum XlSpanishModes
xlSpanishVoseoOnly = 2 # from enum XlSpanishModes
|
SpanishModes
|
python
|
google__jax
|
tests/pallas/tpu_sparsecore_pallas_test.py
|
{
"start": 61575,
"end": 61720
}
|
class ____(TCTilingMixin, PipelineTest):
pass
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
PipelineTestWithTCTiling
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/runtime/caching/storage/dummy_cache_storage_test.py
|
{
"start": 990,
"end": 2971
}
|
class ____(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
self.context = CacheStorageContext(
function_key="func-key",
function_display_name="func-display-name",
persist="disk",
)
self.dummy_cache_storage = DummyCacheStorage()
self.storage_manager = MemoryCacheStorageManager()
self.storage = self.storage_manager.create(self.context)
def test_in_memory_wrapped_dummy_cache_storage_get_not_found(self):
"""
Test that storage.get() returns CacheStorageKeyNotFoundError when key is not
present.
"""
with pytest.raises(CacheStorageKeyNotFoundError):
self.storage.get("some-key")
def test_in_memory_wrapped_dummy_cache_storage_get_found(self):
"""
Test that storage.get() returns the value when key is present.
"""
self.storage.set("some-key", b"some-value")
assert self.storage.get("some-key") == b"some-value"
def test_in_memory_wrapped_dummy_cache_storage_storage_set(self):
"""
Test that storage.set() sets the value correctly.
"""
self.storage.set("new-key", b"new-value")
assert self.storage.get("new-key") == b"new-value"
def test_in_memory_wrapped_dummy_cache_storage_storage_set_override(self):
"""
Test that storage.set() overrides the value.
"""
self.storage.set("another_key", b"another_value")
self.storage.set("another_key", b"new_value")
assert self.storage.get("another_key") == b"new_value"
def test_in_memory_wrapped_dummy_cache_storage_storage_delete(self):
"""
Test that storage.delete() deletes the value correctly.
"""
self.storage.set("new-key", b"new-value")
self.storage.delete("new-key")
with pytest.raises(CacheStorageKeyNotFoundError):
self.storage.get("new-key")
|
DummyCacheStorageManagerTest
|
python
|
getsentry__sentry-python
|
sentry_sdk/profiler/transaction_profiler.py
|
{
"start": 17661,
"end": 21851
}
|
class ____(ABC):
mode = "unknown" # type: ProfilerMode
def __init__(self, frequency):
# type: (int) -> None
self.interval = 1.0 / frequency
self.sampler = self.make_sampler()
# cap the number of new profiles at any time so it does not grow infinitely
self.new_profiles = deque(maxlen=128) # type: Deque[Profile]
self.active_profiles = set() # type: Set[Profile]
def __enter__(self):
# type: () -> Scheduler
self.setup()
return self
def __exit__(self, ty, value, tb):
# type: (Optional[Any], Optional[Any], Optional[Any]) -> None
self.teardown()
@abstractmethod
def setup(self):
# type: () -> None
pass
@abstractmethod
def teardown(self):
# type: () -> None
pass
def ensure_running(self):
# type: () -> None
"""
Ensure the scheduler is running. By default, this method is a no-op.
The method should be overridden by any implementation for which it is
relevant.
"""
return None
def start_profiling(self, profile):
# type: (Profile) -> None
self.ensure_running()
self.new_profiles.append(profile)
def make_sampler(self):
# type: () -> Callable[..., None]
cwd = os.getcwd()
cache = LRUCache(max_size=256)
def _sample_stack(*args, **kwargs):
# type: (*Any, **Any) -> None
"""
Take a sample of the stack on all the threads in the process.
This should be called at a regular interval to collect samples.
"""
# no profiles taking place, so we can stop early
if not self.new_profiles and not self.active_profiles:
# make sure to clear the cache if we're not profiling so we dont
# keep a reference to the last stack of frames around
return
# This is the number of profiles we want to pop off.
# It's possible another thread adds a new profile to
# the list and we spend longer than we want inside
# the loop below.
#
# Also make sure to set this value before extracting
# frames so we do not write to any new profiles that
# were started after this point.
new_profiles = len(self.new_profiles)
now = nanosecond_time()
try:
sample = [
(str(tid), extract_stack(frame, cache, cwd))
for tid, frame in sys._current_frames().items()
]
except AttributeError:
# For some reason, the frame we get doesn't have certain attributes.
# When this happens, we abandon the current sample as it's bad.
capture_internal_exception(sys.exc_info())
return
# Move the new profiles into the active_profiles set.
#
# We cannot directly add the to active_profiles set
# in `start_profiling` because it is called from other
# threads which can cause a RuntimeError when it the
# set sizes changes during iteration without a lock.
#
# We also want to avoid using a lock here so threads
# that are starting profiles are not blocked until it
# can acquire the lock.
for _ in range(new_profiles):
self.active_profiles.add(self.new_profiles.popleft())
inactive_profiles = []
for profile in self.active_profiles:
if profile.active:
profile.write(now, sample)
else:
# If a profile is marked inactive, we buffer it
# to `inactive_profiles` so it can be removed.
# We cannot remove it here as it would result
# in a RuntimeError.
inactive_profiles.append(profile)
for profile in inactive_profiles:
self.active_profiles.remove(profile)
return _sample_stack
|
Scheduler
|
python
|
has2k1__plotnine
|
plotnine/themes/themeable.py
|
{
"start": 55469,
"end": 55712
}
|
class ____(themeable):
"""
How to box up multiple legends
Parameters
----------
theme_element : Literal["vertical", "horizontal"]
Whether to stack up the legends vertically or
horizontally.
"""
|
legend_box
|
python
|
django__django
|
tests/migration_test_data_persistence/migrations/0001_initial.py
|
{
"start": 43,
"end": 647
}
|
class ____(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name="Book",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
primary_key=True,
serialize=False,
auto_created=True,
),
),
("title", models.CharField(max_length=100)),
],
options={},
bases=(models.Model,),
),
]
|
Migration
|
python
|
run-llama__llama_index
|
llama-index-integrations/readers/llama-index-readers-dashvector/llama_index/readers/dashvector/base.py
|
{
"start": 178,
"end": 3145
}
|
class ____(BaseReader):
"""
DashVector reader.
Args:
api_key (str): DashVector API key.
endpoint (str): DashVector cluster endpoint.
"""
def __init__(self, api_key: str, endpoint: str):
"""Initialize with parameters."""
try:
import dashvector
except ImportError:
raise ImportError(
"`dashvector` package not found, please run `pip install dashvector`"
)
self._client: dashvector.Client = dashvector.Client(
api_key=api_key, endpoint=endpoint
)
def load_data(
self,
collection_name: str,
vector: Optional[List[float]],
topk: int,
filter: Optional[str] = None,
include_vector: bool = True,
partition: Optional[str] = None,
output_fields: Optional[List[str]] = None,
sparse_vector: Optional[Dict[int, float]] = None,
) -> List[Document]:
"""
Load data from DashVector.
Args:
collection_name (str): Name of the collection.
vector (List[float]): Query vector.
topk (int): Number of results to return.
filter (Optional[str]): doc fields filter
conditions that meet the SQL where clause specification.detail in https://help.aliyun.com/document_detail/2513006.html?spm=a2c4g.2510250.0.0.40d25637QMI4eV
include_vector (bool): Whether to include the embedding in the response.Defaults to True.
partition (Optional[str]): The partition name
to query. Defaults to None.
output_fields (Optional[List[str]]): The fields
to return. Defaults to None, meaning all fields
sparse_vector (Optional[Dict[int, float]]): The
sparse vector to query.Defaults to None.
Returns:
List[Document]: A list of documents.
"""
collection = self._client.get(collection_name)
if not collection:
raise ValueError(
f"Failed to get collection: {collection_name},Error: {collection}"
)
ret = collection.query(
vector=vector,
topk=topk,
filter=filter,
include_vector=include_vector,
partition=partition,
output_fields=output_fields,
sparse_vector=sparse_vector,
)
if not ret:
raise Exception(f"Failed to query document,Error: {ret}")
doc_metas = ret.output
documents = []
for doc_meta in doc_metas:
node_content = json.loads(doc_meta.fields["_node_content"])
document = Document(
id_=doc_meta.id,
text=node_content["text"],
metadata=node_content["metadata"],
embedding=doc_meta.vector,
)
documents.append(document)
return documents
|
DashVectorReader
|
python
|
urllib3__urllib3
|
src/urllib3/util/url.py
|
{
"start": 2942,
"end": 15205
}
|
class ____(
typing.NamedTuple(
"Url",
[
("scheme", typing.Optional[str]),
("auth", typing.Optional[str]),
("host", typing.Optional[str]),
("port", typing.Optional[int]),
("path", typing.Optional[str]),
("query", typing.Optional[str]),
("fragment", typing.Optional[str]),
],
)
):
"""
Data structure for representing an HTTP URL. Used as a return value for
:func:`parse_url`. Both the scheme and host are normalized as they are
both case-insensitive according to RFC 3986.
"""
def __new__( # type: ignore[no-untyped-def]
cls,
scheme: str | None = None,
auth: str | None = None,
host: str | None = None,
port: int | None = None,
path: str | None = None,
query: str | None = None,
fragment: str | None = None,
):
if path and not path.startswith("/"):
path = "/" + path
if scheme is not None:
scheme = scheme.lower()
return super().__new__(cls, scheme, auth, host, port, path, query, fragment)
@property
def hostname(self) -> str | None:
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self) -> str:
"""Absolute path including the query string."""
uri = self.path or "/"
if self.query is not None:
uri += "?" + self.query
return uri
@property
def authority(self) -> str | None:
"""
Authority component as defined in RFC 3986 3.2.
This includes userinfo (auth), host and port.
i.e.
userinfo@host:port
"""
userinfo = self.auth
netloc = self.netloc
if netloc is None or userinfo is None:
return netloc
else:
return f"{userinfo}@{netloc}"
@property
def netloc(self) -> str | None:
"""
Network location including host and port.
If you need the equivalent of urllib.parse's ``netloc``,
use the ``authority`` property instead.
"""
if self.host is None:
return None
if self.port:
return f"{self.host}:{self.port}"
return self.host
@property
def url(self) -> str:
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example:
.. code-block:: python
import urllib3
U = urllib3.util.parse_url("https://google.com/mail/")
print(U.url)
# "https://google.com/mail/"
print( urllib3.util.Url("https", "username:password",
"host.com", 80, "/path", "query", "fragment"
).url
)
# "https://username:password@host.com:80/path?query#fragment"
"""
scheme, auth, host, port, path, query, fragment = self
url = ""
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + "://"
if auth is not None:
url += auth + "@"
if host is not None:
url += host
if port is not None:
url += ":" + str(port)
if path is not None:
url += path
if query is not None:
url += "?" + query
if fragment is not None:
url += "#" + fragment
return url
def __str__(self) -> str:
return self.url
@typing.overload
def _encode_invalid_chars(
component: str, allowed_chars: typing.Container[str]
) -> str: # Abstract
...
@typing.overload
def _encode_invalid_chars(
component: None, allowed_chars: typing.Container[str]
) -> None: # Abstract
...
def _encode_invalid_chars(
component: str | None, allowed_chars: typing.Container[str]
) -> str | None:
"""Percent-encodes a URI component without reapplying
onto an already percent-encoded component.
"""
if component is None:
return component
component = to_str(component)
# Normalize existing percent-encoded bytes.
# Try to see if the component we're encoding is already percent-encoded
# so we can skip all '%' characters but still encode all others.
component, percent_encodings = _PERCENT_RE.subn(
lambda match: match.group(0).upper(), component
)
uri_bytes = component.encode("utf-8", "surrogatepass")
is_percent_encoded = percent_encodings == uri_bytes.count(b"%")
encoded_component = bytearray()
for i in range(0, len(uri_bytes)):
# Will return a single character bytestring
byte = uri_bytes[i : i + 1]
byte_ord = ord(byte)
if (is_percent_encoded and byte == b"%") or (
byte_ord < 128 and byte.decode() in allowed_chars
):
encoded_component += byte
continue
encoded_component.extend(b"%" + (hex(byte_ord)[2:].encode().zfill(2).upper()))
return encoded_component.decode()
def _remove_path_dot_segments(path: str) -> str:
# See http://tools.ietf.org/html/rfc3986#section-5.2.4 for pseudo-code
segments = path.split("/") # Turn the path into a list of segments
output = [] # Initialize the variable to use to store output
for segment in segments:
# '.' is the current directory, so ignore it, it is superfluous
if segment == ".":
continue
# Anything other than '..', should be appended to the output
if segment != "..":
output.append(segment)
# In this case segment == '..', if we can, we should pop the last
# element
elif output:
output.pop()
# If the path starts with '/' and the output is empty or the first string
# is non-empty
if path.startswith("/") and (not output or output[0]):
output.insert(0, "")
# If the path starts with '/.' or '/..' ensure we add one more empty
# string to add a trailing '/'
if path.endswith(("/.", "/..")):
output.append("")
return "/".join(output)
@typing.overload
def _normalize_host(host: None, scheme: str | None) -> None: ...
@typing.overload
def _normalize_host(host: str, scheme: str | None) -> str: ...
def _normalize_host(host: str | None, scheme: str | None) -> str | None:
if host:
if scheme in _NORMALIZABLE_SCHEMES:
is_ipv6 = _IPV6_ADDRZ_RE.match(host)
if is_ipv6:
# IPv6 hosts of the form 'a::b%zone' are encoded in a URL as
# such per RFC 6874: 'a::b%25zone'. Unquote the ZoneID
# separator as necessary to return a valid RFC 4007 scoped IP.
match = _ZONE_ID_RE.search(host)
if match:
start, end = match.span(1)
zone_id = host[start:end]
if zone_id.startswith("%25") and zone_id != "%25":
zone_id = zone_id[3:]
else:
zone_id = zone_id[1:]
zone_id = _encode_invalid_chars(zone_id, _UNRESERVED_CHARS)
return f"{host[:start].lower()}%{zone_id}{host[end:]}"
else:
return host.lower()
elif not _IPV4_RE.match(host):
return to_str(
b".".join([_idna_encode(label) for label in host.split(".")]),
"ascii",
)
return host
def _idna_encode(name: str) -> bytes:
if not name.isascii():
try:
import idna
except ImportError:
raise LocationParseError(
"Unable to parse URL without the 'idna' module"
) from None
try:
return idna.encode(name.lower(), strict=True, std3_rules=True)
except idna.IDNAError:
raise LocationParseError(
f"Name '{name}' is not a valid IDNA label"
) from None
return name.lower().encode("ascii")
def _encode_target(target: str) -> str:
"""Percent-encodes a request target so that there are no invalid characters
Pre-condition for this function is that 'target' must start with '/'.
If that is the case then _TARGET_RE will always produce a match.
"""
match = _TARGET_RE.match(target)
if not match: # Defensive:
raise LocationParseError(f"{target!r} is not a valid request URI")
path, query = match.groups()
encoded_target = _encode_invalid_chars(path, _PATH_CHARS)
if query is not None:
query = _encode_invalid_chars(query, _QUERY_CHARS)
encoded_target += "?" + query
return encoded_target
def parse_url(url: str) -> Url:
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
This parser is RFC 3986 and RFC 6874 compliant.
The parser logic and helper functions are based heavily on
work done in the ``rfc3986`` module.
:param str url: URL to parse into a :class:`.Url` namedtuple.
Partly backwards-compatible with :mod:`urllib.parse`.
Example:
.. code-block:: python
import urllib3
print( urllib3.util.parse_url('http://google.com/mail/'))
# Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
print( urllib3.util.parse_url('google.com:80'))
# Url(scheme=None, host='google.com', port=80, path=None, ...)
print( urllib3.util.parse_url('/foo?bar'))
# Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
if not url:
# Empty
return Url()
source_url = url
if not _SCHEME_RE.search(url):
url = "//" + url
scheme: str | None
authority: str | None
auth: str | None
host: str | None
port: str | None
port_int: int | None
path: str | None
query: str | None
fragment: str | None
try:
scheme, authority, path, query, fragment = _URI_RE.match(url).groups() # type: ignore[union-attr]
normalize_uri = scheme is None or scheme.lower() in _NORMALIZABLE_SCHEMES
if scheme:
scheme = scheme.lower()
if authority:
auth, _, host_port = authority.rpartition("@")
auth = auth or None
host, port = _HOST_PORT_RE.match(host_port).groups() # type: ignore[union-attr]
if auth and normalize_uri:
auth = _encode_invalid_chars(auth, _USERINFO_CHARS)
if port == "":
port = None
else:
auth, host, port = None, None, None
if port is not None:
port_int = int(port)
if not (0 <= port_int <= 65535):
raise LocationParseError(url)
else:
port_int = None
host = _normalize_host(host, scheme)
if normalize_uri and path:
path = _remove_path_dot_segments(path)
path = _encode_invalid_chars(path, _PATH_CHARS)
if normalize_uri and query:
query = _encode_invalid_chars(query, _QUERY_CHARS)
if normalize_uri and fragment:
fragment = _encode_invalid_chars(fragment, _FRAGMENT_CHARS)
except (ValueError, AttributeError) as e:
raise LocationParseError(source_url) from e
# For the sake of backwards compatibility we put empty
# string values for path if there are any defined values
# beyond the path in the URL.
# TODO: Remove this when we break backwards compatibility.
if not path:
if query is not None or fragment is not None:
path = ""
else:
path = None
return Url(
scheme=scheme,
auth=auth,
host=host,
port=port_int,
path=path,
query=query,
fragment=fragment,
)
|
Url
|
python
|
pypa__warehouse
|
warehouse/accounts/interfaces.py
|
{
"start": 158,
"end": 218
}
|
class ____(RateLimiterException):
pass
|
TooManyFailedLogins
|
python
|
sympy__sympy
|
sympy/utilities/_compilation/runners.py
|
{
"start": 9068,
"end": 9630
}
|
class ____(CompilerRunner):
environ_key_compiler = 'CXX'
environ_key_flags = 'CXXFLAGS'
compiler_dict = OrderedDict([
('gnu', 'g++'),
('intel', 'icpc'),
('llvm', 'clang++'),
])
# First is the default, c++0x == c++11
standards = ('c++98', 'c++0x')
std_formater = {
'g++': '-std={}'.format,
'icpc': '-std={}'.format,
'clang++': '-std={}'.format,
}
compiler_name_vendor_mapping = {
'g++': 'gnu',
'icpc': 'intel',
'clang++': 'llvm'
}
|
CppCompilerRunner
|
python
|
dagster-io__dagster
|
examples/docs_snippets/docs_snippets/guides/components/asset_factory/asset_factory_component.py
|
{
"start": 304,
"end": 2404
}
|
class ____(dg.Component, dg.Model, dg.Resolvable):
# highlight-start
access_key_id: str = dg.Field
secret_access_key: str = dg.Field
etl_job: list[EtlJob]
# highlight-end
def build_defs(self, context: dg.ComponentLoadContext) -> dg.Definitions:
_assets = []
for etl in self.etl_job:
asset_key = f"etl_{etl.bucket}_{etl.target_object}".replace(".", "_")
def create_etl_asset(etl_config):
@dg.asset(name=asset_key)
def _etl_asset(context):
s3_client = s3.get_client()
with tempfile.TemporaryDirectory() as root:
source_path = f"{root}/{etl_config.source_object}"
target_path = f"{root}/{etl_config.target_object}"
# these steps could be split into separate assets, but
# for brevity we will keep them together.
# 1. extract
s3_client.download_file(
etl_config.bucket, etl_config.source_object, source_path
)
# 2. transform
db = duckdb.connect(":memory:")
db.execute(
f"CREATE TABLE source AS SELECT * FROM read_csv('{source_path}');"
)
db.query(etl_config.sql).to_csv(target_path)
# 3. load
s3_client.upload_file(
etl_config.bucket, etl_config.target_object, target_path
)
return _etl_asset
_assets.append(create_etl_asset(etl))
_resources = {
"s3": s3.S3Resource(
aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key,
)
}
# highlight-start
return dg.Definitions(assets=_assets, resources=_resources)
# highlight-end
# end_asset_factory_component
|
AssetFactory
|
python
|
PyCQA__pylint
|
tests/functional/p/protected_access.py
|
{
"start": 561,
"end": 1042
}
|
class ____:
@property
def _light_internal(self) -> None:
return None
@staticmethod
def func(light) -> None:
print(light._light_internal) # [protected-access]
def func(light: Light) -> None:
print(light._light_internal) # [protected-access]
# os._exit is excluded from the protected-access check by default
print(os._exit)
# BaseTomato._sauce is included in the `exclude-protected` list
# and does not emit a `protected-access` message:
|
Light
|
python
|
django__django
|
django/contrib/postgres/search.py
|
{
"start": 8663,
"end": 8962
}
|
class ____(SearchQueryCombinable, CombinedExpression):
def __init__(self, lhs, connector, rhs, config, output_field=None):
self.config = config
super().__init__(lhs, connector, rhs, output_field)
def __str__(self):
return "(%s)" % super().__str__()
|
CombinedSearchQuery
|
python
|
streamlit__streamlit
|
lib/streamlit/elements/bokeh_chart.py
|
{
"start": 940,
"end": 2746
}
|
class ____:
@gather_metrics("bokeh_chart")
def bokeh_chart(
self,
figure: object, # noqa: ARG002
use_container_width: bool = True, # noqa: ARG002
) -> DeltaGenerator:
"""Display an interactive Bokeh chart.
Bokeh is a charting library for Python. You can find
more about Bokeh at https://bokeh.pydata.org.
.. Important::
This command has been deprecated and removed. Please use our custom
component, |streamlit-bokeh|_, instead. Calling st.bokeh_chart will
do nothing.
.. |streamlit-bokeh| replace:: ``streamlit-bokeh``
.. _streamlit-bokeh: https://github.com/streamlit/streamlit-bokeh
Parameters
----------
figure : bokeh.plotting.figure.Figure
A Bokeh figure to plot.
use_container_width : bool
Whether to override the figure's native width with the width of
the parent container. If ``use_container_width`` is ``True`` (default),
Streamlit sets the width of the figure to match the width of the parent
container. If ``use_container_width`` is ``False``, Streamlit sets the
width of the chart to fit its contents according to the plotting library,
up to the width of the parent container.
"""
show_deprecation_warning(
"st.bokeh_chart has been deprecated and removed. "
"Please use our custom component, "
"[streamlit-bokeh](https://github.com/streamlit/streamlit-bokeh), "
"instead. Calling st.bokeh_chart will do nothing."
)
return self.dg
@property
def dg(self) -> DeltaGenerator:
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
|
BokehMixin
|
python
|
django__django
|
tests/csrf_tests/tests.py
|
{
"start": 7653,
"end": 8036
}
|
class ____(TestingHttpRequest):
"""
TestingHttpRequest that can raise errors when accessing POST data.
"""
post_error = None
def _get_post(self):
if self.post_error is not None:
raise self.post_error
return self._post
def _set_post(self, post):
self._post = post
POST = property(_get_post, _set_post)
|
PostErrorRequest
|
python
|
getsentry__sentry
|
src/sentry/models/organizationonboardingtask.py
|
{
"start": 1544,
"end": 3344
}
|
class ____(BaseManager["OrganizationOnboardingTask"]):
def record(
self,
organization_id: int,
task: int,
status: OnboardingTaskStatus = OnboardingTaskStatus.COMPLETE,
**kwargs,
) -> bool:
"""Record the completion of an onboarding task. Caches the completion. Returns whether the task was created or not."""
if status != OnboardingTaskStatus.COMPLETE:
raise ValueError(
f"status={status} unsupported must be {OnboardingTaskStatus.COMPLETE}."
)
if options.get("sentry:skip-record-onboarding-tasks-if-complete"):
onboarding_complete_option = OrganizationOption.objects.get_value(
organization_id, "onboarding:complete", None
)
if onboarding_complete_option:
return False
cache_key = f"organizationonboardingtask:{organization_id}:{task}"
if cache.get(cache_key) is None:
if options.get("sentry.send_onboarding_task_metrics"):
metrics.incr(
"sentry.onboarding.task.cache_miss",
tags={
"organization_id": organization_id,
"task": task,
},
sample_rate=1.0,
)
defaults = {
**kwargs,
"status": status,
}
_, created = self.update_or_create(
organization_id=organization_id,
task=task,
defaults=defaults,
)
# Store marker to prevent running all the time
cache.set(cache_key, 1, 60 * 60 * 24 * 7) # 1 week
return created
return False
|
OrganizationOnboardingTaskManager
|
python
|
gevent__gevent
|
src/gevent/tests/test__subprocess.py
|
{
"start": 12371,
"end": 14977
}
|
class ____(unittest.TestCase):
@mock.patch('os.closerange')
@mock.patch('gevent.subprocess._set_inheritable')
@mock.patch('gevent.subprocess.os_close')
def test_close_fds_brute_force(self, close, set_inheritable, closerange):
keep = (
4, 5,
# Leave a hole
# 6,
7,
)
subprocess.Popen._close_fds_brute_force(keep, None)
closerange.assert_has_calls([
mock.call(3, 4),
mock.call(8, subprocess.MAXFD),
])
set_inheritable.assert_has_calls([
mock.call(4, True),
mock.call(5, True),
])
close.assert_called_once_with(6)
@mock.patch('gevent.subprocess.Popen._close_fds_brute_force')
@mock.patch('os.listdir')
def test_close_fds_from_path_bad_values(self, listdir, brute_force):
listdir.return_value = 'Not an Integer'
subprocess.Popen._close_fds_from_path('path', [], 42)
brute_force.assert_called_once_with([], 42)
@mock.patch('os.listdir')
@mock.patch('os.closerange')
@mock.patch('gevent.subprocess._set_inheritable')
@mock.patch('gevent.subprocess.os_close')
def test_close_fds_from_path(self, close, set_inheritable, closerange, listdir):
keep = (
4, 5,
# Leave a hole
# 6,
7,
)
listdir.return_value = ['1', '6', '37']
subprocess.Popen._close_fds_from_path('path', keep, 5)
self.assertEqual([], closerange.mock_calls)
set_inheritable.assert_has_calls([
mock.call(4, True),
mock.call(7, True),
])
close.assert_has_calls([
mock.call(6),
mock.call(37),
])
@mock.patch('gevent.subprocess.Popen._close_fds_brute_force')
@mock.patch('os.path.isdir')
def test_close_fds_no_dir(self, isdir, brute_force):
isdir.return_value = False
subprocess.Popen._close_fds([], 42)
brute_force.assert_called_once_with([], 42)
isdir.assert_has_calls([
mock.call('/proc/self/fd'),
mock.call('/dev/fd'),
])
@mock.patch('gevent.subprocess.Popen._close_fds_from_path')
@mock.patch('gevent.subprocess.Popen._close_fds_brute_force')
@mock.patch('os.path.isdir')
def test_close_fds_with_dir(self, isdir, brute_force, from_path):
isdir.return_value = True
subprocess.Popen._close_fds([7], 42)
self.assertEqual([], brute_force.mock_calls)
from_path.assert_called_once_with('/proc/self/fd', [7], 42)
|
TestFDs
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_joins.py
|
{
"start": 84092,
"end": 101887
}
|
class ____(fixtures.MappedTest, AssertsCompiledSQL):
run_setup_mappers = "once"
run_inserts = "once"
run_deletes = None
__dialect__ = "default"
@classmethod
def define_tables(cls, metadata):
Table(
"nodes",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("parent_id", Integer, ForeignKey("nodes.id")),
Column("data", String(30)),
)
@classmethod
def setup_classes(cls):
class Node(cls.Comparable):
def append(self, node):
self.children.append(node)
@classmethod
def setup_mappers(cls):
Node, nodes = cls.classes.Node, cls.tables.nodes
cls.mapper_registry.map_imperatively(
Node,
nodes,
properties={
"children": relationship(
Node,
lazy="select",
join_depth=3,
backref=backref("parent", remote_side=[nodes.c.id]),
)
},
)
@classmethod
def insert_data(cls, connection):
Node = cls.classes.Node
sess = Session(connection)
n1 = Node(data="n1")
n1.append(Node(data="n11"))
n1.append(Node(data="n12"))
n1.append(Node(data="n13"))
n1.children[1].append(Node(data="n121"))
n1.children[1].append(Node(data="n122"))
n1.children[1].append(Node(data="n123"))
sess.add(n1)
sess.flush()
sess.close()
def test_join_4_explicit_join(self):
Node = self.classes.Node
sess = fixture_session()
na = aliased(Node)
na2 = aliased(Node)
# this one is a great example of how to show how the API changes;
# while it requires the explicitness of aliased(Node), the whole
# guesswork of joinpoint / aliased goes away and the whole thing
# is simpler
#
# .join("parent", aliased=True)
# .filter(Node.data == "n12")
# .join("parent", aliased=True, from_joinpoint=True)
# .filter(Node.data == "n1")
#
# becomes:
#
# na = aliased(Node)
# na2 = aliased(Node)
#
# ...
# .join(na, Node.parent)
# .filter(na.data == "n12")
# .join(na2, na.parent)
# .filter(na2.data == "n1")
#
q = (
sess.query(Node)
.filter(Node.data == "n122")
.join(na, Node.parent)
.filter(na.data == "n12")
.join(na2, na.parent)
.filter(na2.data == "n1")
)
self.assert_compile(
q,
"SELECT nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id, "
"nodes.data AS nodes_data FROM nodes JOIN nodes AS nodes_1 "
"ON nodes_1.id = nodes.parent_id JOIN nodes AS nodes_2 "
"ON nodes_2.id = nodes_1.parent_id WHERE nodes.data = :data_1 "
"AND nodes_1.data = :data_2 AND nodes_2.data = :data_3",
checkparams={"data_1": "n122", "data_2": "n12", "data_3": "n1"},
)
node = q.first()
eq_(node.data, "n122")
def test_from_self_inside_excludes_outside(self):
"""test the propagation of aliased() from inside to outside
on a from_self()..
"""
Node = self.classes.Node
sess = fixture_session()
n1 = aliased(Node)
# n1 is not inside the from_self(), so all cols must be maintained
# on the outside
subq = (
sess.query(Node)
.filter(Node.data == "n122")
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery()
)
na = aliased(Node, subq)
self.assert_compile(
sess.query(n1, na.id),
"SELECT nodes_1.id AS nodes_1_id, "
"nodes_1.parent_id AS nodes_1_parent_id, "
"nodes_1.data AS nodes_1_data, anon_1.nodes_id AS anon_1_nodes_id "
"FROM nodes AS nodes_1, (SELECT nodes.id AS nodes_id, "
"nodes.parent_id AS nodes_parent_id, "
"nodes.data AS nodes_data FROM "
"nodes WHERE nodes.data = :data_1) AS anon_1",
use_default_dialect=True,
)
parent = aliased(Node)
grandparent = aliased(Node)
subq = (
sess.query(Node, parent, grandparent)
.join(parent, Node.parent)
.join(grandparent, parent.parent)
.filter(Node.data == "n122")
.filter(parent.data == "n12")
.filter(grandparent.data == "n1")
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery()
)
na = aliased(Node, subq)
pa = aliased(parent, subq)
ga = aliased(grandparent, subq)
q = sess.query(na, pa, ga).limit(1)
# parent, grandparent *are* inside the from_self(), so they
# should get aliased to the outside.
self.assert_compile(
q,
"SELECT anon_1.nodes_id AS anon_1_nodes_id, "
"anon_1.nodes_parent_id AS anon_1_nodes_parent_id, "
"anon_1.nodes_data AS anon_1_nodes_data, "
"anon_1.nodes_1_id AS anon_1_nodes_1_id, "
"anon_1.nodes_1_parent_id AS anon_1_nodes_1_parent_id, "
"anon_1.nodes_1_data AS anon_1_nodes_1_data, "
"anon_1.nodes_2_id AS anon_1_nodes_2_id, "
"anon_1.nodes_2_parent_id AS anon_1_nodes_2_parent_id, "
"anon_1.nodes_2_data AS anon_1_nodes_2_data "
"FROM (SELECT nodes.id AS nodes_id, nodes.parent_id "
"AS nodes_parent_id, nodes.data AS nodes_data, "
"nodes_1.id AS nodes_1_id, "
"nodes_1.parent_id AS nodes_1_parent_id, "
"nodes_1.data AS nodes_1_data, nodes_2.id AS nodes_2_id, "
"nodes_2.parent_id AS nodes_2_parent_id, nodes_2.data AS "
"nodes_2_data FROM nodes JOIN nodes AS nodes_1 ON "
"nodes_1.id = nodes.parent_id JOIN nodes AS nodes_2 "
"ON nodes_2.id = nodes_1.parent_id "
"WHERE nodes.data = :data_1 AND nodes_1.data = :data_2 AND "
"nodes_2.data = :data_3) AS anon_1 LIMIT :param_1",
{"param_1": 1},
use_default_dialect=True,
)
def test_join_to_self_no_aliases_raises(self):
Node = self.classes.Node
s = fixture_session()
assert_raises_message(
sa.exc.InvalidRequestError,
r"Can't construct a join from Mapper\[Node\(nodes\)\] to "
r"Mapper\[Node\(nodes\)\], they are the same entity",
s.query(Node).join(Node.children)._compile_context,
)
def test_explicit_join_2(self):
Node = self.classes.Node
n1 = aliased(Node)
n2 = aliased(Node)
self.assert_compile(
join(Node, n1, Node.children).join(n2, n1.children),
"nodes JOIN nodes AS nodes_1 ON nodes.id = nodes_1.parent_id "
"JOIN nodes AS nodes_2 ON nodes_1.id = nodes_2.parent_id",
use_default_dialect=True,
)
def test_explicit_join_3(self):
Node = self.classes.Node
n1 = aliased(Node)
n2 = aliased(Node)
self.assert_compile(
join(Node, n1, Node.children).join(n2, Node.children),
"nodes JOIN nodes AS nodes_1 ON nodes.id = nodes_1.parent_id "
"JOIN nodes AS nodes_2 ON nodes.id = nodes_2.parent_id",
use_default_dialect=True,
)
def test_explicit_join_4(self):
Node = self.classes.Node
sess = fixture_session()
n1 = aliased(Node)
n2 = aliased(Node)
self.assert_compile(
sess.query(Node).join(n1, Node.children).join(n2, n1.children),
"SELECT nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id, "
"nodes.data AS nodes_data FROM nodes JOIN nodes AS nodes_1 "
"ON nodes.id = nodes_1.parent_id "
"JOIN nodes AS nodes_2 ON nodes_1.id = nodes_2.parent_id",
use_default_dialect=True,
)
def test_explicit_join_5(self):
Node = self.classes.Node
sess = fixture_session()
n1 = aliased(Node)
n2 = aliased(Node)
self.assert_compile(
sess.query(Node).join(n1, Node.children).join(n2, Node.children),
"SELECT nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id, "
"nodes.data AS nodes_data FROM nodes JOIN nodes AS nodes_1 "
"ON nodes.id = nodes_1.parent_id "
"JOIN nodes AS nodes_2 ON nodes.id = nodes_2.parent_id",
use_default_dialect=True,
)
def test_explicit_join_6(self):
Node = self.classes.Node
sess = fixture_session()
n1 = aliased(Node)
node = (
sess.query(Node)
.select_from(join(Node, n1, n1.children))
.filter(n1.data == "n122")
.first()
)
assert node.data == "n12"
def test_explicit_join_7(self):
Node = self.classes.Node
sess = fixture_session()
n1 = aliased(Node)
n2 = aliased(Node)
node = (
sess.query(Node)
.select_from(join(Node, n1, Node.children).join(n2, n1.children))
.filter(n2.data == "n122")
.first()
)
assert node.data == "n1"
def test_explicit_join_8(self):
Node = self.classes.Node
sess = fixture_session()
n1 = aliased(Node)
n2 = aliased(Node)
# mix explicit and named onclauses
node = (
sess.query(Node)
.select_from(
join(Node, n1, Node.id == n1.parent_id).join(n2, n1.children)
)
.filter(n2.data == "n122")
.first()
)
assert node.data == "n1"
def test_explicit_join_9(self):
Node = self.classes.Node
sess = fixture_session()
n1 = aliased(Node)
n2 = aliased(Node)
node = (
sess.query(Node)
.select_from(join(Node, n1, Node.parent).join(n2, n1.parent))
.filter(
and_(Node.data == "n122", n1.data == "n12", n2.data == "n1")
)
.first()
)
assert node.data == "n122"
def test_explicit_join_10(self):
Node = self.classes.Node
sess = fixture_session()
n1 = aliased(Node)
n2 = aliased(Node)
eq_(
list(
sess.query(Node)
.select_from(join(Node, n1, Node.parent).join(n2, n1.parent))
.filter(
and_(
Node.data == "n122", n1.data == "n12", n2.data == "n1"
)
)
.with_entities(Node.data, n1.data, n2.data)
),
[("n122", "n12", "n1")],
)
def test_join_to_nonaliased(self):
Node = self.classes.Node
sess = fixture_session()
n1 = aliased(Node)
# using 'n1.parent' implicitly joins to unaliased Node
eq_(
sess.query(n1).join(n1.parent).filter(Node.data == "n1").all(),
[
Node(parent_id=1, data="n11", id=2),
Node(parent_id=1, data="n12", id=3),
Node(parent_id=1, data="n13", id=4),
],
)
# explicit (new syntax)
eq_(
sess.query(n1)
.join(Node, n1.parent)
.filter(Node.data == "n1")
.all(),
[
Node(parent_id=1, data="n11", id=2),
Node(parent_id=1, data="n12", id=3),
Node(parent_id=1, data="n13", id=4),
],
)
def test_multiple_explicit_entities_one(self):
Node = self.classes.Node
sess = fixture_session()
parent = aliased(Node)
grandparent = aliased(Node)
eq_(
sess.query(Node, parent, grandparent)
.join(parent, Node.parent)
.join(grandparent, parent.parent)
.filter(Node.data == "n122")
.filter(parent.data == "n12")
.filter(grandparent.data == "n1")
.first(),
(Node(data="n122"), Node(data="n12"), Node(data="n1")),
)
def test_multiple_explicit_entities_two(self):
Node = self.classes.Node
sess = fixture_session()
parent = aliased(Node)
grandparent = aliased(Node)
subq = (
sess.query(Node, parent, grandparent)
.join(parent, Node.parent)
.join(grandparent, parent.parent)
.filter(Node.data == "n122")
.filter(parent.data == "n12")
.filter(grandparent.data == "n1")
.subquery()
)
na = aliased(Node, subq)
pa = aliased(parent, subq)
ga = aliased(grandparent, subq)
eq_(
sess.query(na, pa, ga).first(),
(Node(data="n122"), Node(data="n12"), Node(data="n1")),
)
def test_multiple_explicit_entities_three(self):
Node = self.classes.Node
sess = fixture_session()
parent = aliased(Node)
grandparent = aliased(Node)
# same, change order around
subq = (
sess.query(parent, grandparent, Node)
.join(parent, Node.parent)
.join(grandparent, parent.parent)
.filter(Node.data == "n122")
.filter(parent.data == "n12")
.filter(grandparent.data == "n1")
.subquery()
)
na = aliased(Node, subq)
pa = aliased(parent, subq)
ga = aliased(grandparent, subq)
eq_(
sess.query(pa, ga, na).first(),
(Node(data="n12"), Node(data="n1"), Node(data="n122")),
)
def test_multiple_explicit_entities_four(self):
Node = self.classes.Node
sess = fixture_session()
parent = aliased(Node)
grandparent = aliased(Node)
eq_(
sess.query(Node, parent, grandparent)
.join(parent, Node.parent)
.join(grandparent, parent.parent)
.filter(Node.data == "n122")
.filter(parent.data == "n12")
.filter(grandparent.data == "n1")
.options(joinedload(Node.children))
.first(),
(Node(data="n122"), Node(data="n12"), Node(data="n1")),
)
def test_multiple_explicit_entities_five(self):
Node = self.classes.Node
sess = fixture_session()
parent = aliased(Node)
grandparent = aliased(Node)
subq = (
sess.query(Node, parent, grandparent)
.join(parent, Node.parent)
.join(grandparent, parent.parent)
.filter(Node.data == "n122")
.filter(parent.data == "n12")
.filter(grandparent.data == "n1")
.subquery()
)
na = aliased(Node, subq)
pa = aliased(parent, subq)
ga = aliased(grandparent, subq)
eq_(
sess.query(na, pa, ga).options(joinedload(na.children)).first(),
(Node(data="n122"), Node(data="n12"), Node(data="n1")),
)
def test_any(self):
Node = self.classes.Node
sess = fixture_session()
eq_(
sess.query(Node)
.filter(Node.children.any(Node.data == "n1"))
.all(),
[],
)
eq_(
sess.query(Node)
.filter(Node.children.any(Node.data == "n12"))
.all(),
[Node(data="n1")],
)
eq_(
sess.query(Node)
.filter(~Node.children.any())
.order_by(Node.id)
.all(),
[
Node(data="n11"),
Node(data="n13"),
Node(data="n121"),
Node(data="n122"),
Node(data="n123"),
],
)
def test_has(self):
Node = self.classes.Node
sess = fixture_session()
eq_(
sess.query(Node)
.filter(Node.parent.has(Node.data == "n12"))
.order_by(Node.id)
.all(),
[Node(data="n121"), Node(data="n122"), Node(data="n123")],
)
eq_(
sess.query(Node)
.filter(Node.parent.has(Node.data == "n122"))
.all(),
[],
)
eq_(
sess.query(Node).filter(~Node.parent.has()).all(),
[Node(data="n1")],
)
def test_contains(self):
Node = self.classes.Node
sess = fixture_session()
n122 = sess.query(Node).filter(Node.data == "n122").one()
eq_(
sess.query(Node).filter(Node.children.contains(n122)).all(),
[Node(data="n12")],
)
n13 = sess.query(Node).filter(Node.data == "n13").one()
eq_(
sess.query(Node).filter(Node.children.contains(n13)).all(),
[Node(data="n1")],
)
def test_eq_ne(self):
Node = self.classes.Node
sess = fixture_session()
n12 = sess.query(Node).filter(Node.data == "n12").one()
eq_(
sess.query(Node).filter(Node.parent == n12).all(),
[Node(data="n121"), Node(data="n122"), Node(data="n123")],
)
eq_(
sess.query(Node).filter(Node.parent != n12).all(),
[
Node(data="n1"),
Node(data="n11"),
Node(data="n12"),
Node(data="n13"),
],
)
|
SelfReferentialTest
|
python
|
huggingface__transformers
|
src/transformers/trainer_utils.py
|
{
"start": 7494,
"end": 7645
}
|
class ____(ExplicitEnum):
END = "end"
EVERY_SAVE = "every_save"
CHECKPOINT = "checkpoint"
ALL_CHECKPOINTS = "all_checkpoints"
|
HubStrategy
|
python
|
jazzband__django-waffle
|
waffle/tests/test_mixin.py
|
{
"start": 2726,
"end": 3722
}
|
class ____(TestCase):
def setUp(self):
super().setUp()
self.request = get()
def test_switch_must_be_active(self):
view = views.SwitchView
self.assertRaises(Http404, process_request, self.request, view)
Switch.objects.create(name='foo', active=True)
response = process_request(self.request, view)
self.assertEqual(b'foo', response.content)
def test_switch_must_be_inactive(self):
view = views.SwitchOffView
response = process_request(self.request, view)
self.assertEqual(b'foo', response.content)
Switch.objects.create(name='foo', active=True)
self.assertRaises(Http404, process_request, self.request, view)
def test_no_override_with_cookie(self):
Switch.objects.create(name='foo', active=False)
self.request.COOKIES['dwf_foo'] = 'True'
self.assertRaises(Http404, process_request, self.request,
views.SwitchView)
|
WaffleSwitchMixinTest
|
python
|
pyparsing__pyparsing
|
pyparsing/util.py
|
{
"start": 5001,
"end": 5367
}
|
class ____(dict):
"""
A memoizing mapping that retains all deleted items
"""
def __delitem__(self, key):
pass
def _escape_regex_range_chars(s: str) -> str:
# escape these chars: ^-[]
for c in r"\^-[]":
s = s.replace(c, _bslash + c)
s = s.replace("\n", r"\n")
s = s.replace("\t", r"\t")
return str(s)
|
UnboundedMemo
|
python
|
apache__airflow
|
airflow-core/tests/unit/cli/commands/test_connection_command.py
|
{
"start": 27075,
"end": 28402
}
|
class ____:
parser = cli_parser.get_parser()
def setup_method(self):
clear_db_connections(add_default_connections_back=False)
def test_cli_delete_connections(self, session, stdout_capture):
merge_conn(
Connection(
conn_id="new1",
conn_type="mysql",
description="mysql description",
host="mysql",
login="root",
password="",
schema="airflow",
),
session=session,
)
# Delete connections
with stdout_capture as stdout:
connection_command.connections_delete(self.parser.parse_args(["connections", "delete", "new1"]))
# Check deletion stdout
assert "Successfully deleted connection with `conn_id`=new1" in stdout.getvalue()
# Check deletions
result = session.query(Connection).filter(Connection.conn_id == "new1").first()
assert result is None
def test_cli_delete_invalid_connection(self):
# Attempt to delete a non-existing connection
with pytest.raises(SystemExit, match=r"Did not find a connection with `conn_id`=fake"):
connection_command.connections_delete(self.parser.parse_args(["connections", "delete", "fake"]))
|
TestCliDeleteConnections
|
python
|
RaRe-Technologies__gensim
|
gensim/test/test_similarities.py
|
{
"start": 82082,
"end": 83218
}
|
class ____(unittest.TestCase):
def test_editdist_same_unicode_kind_latin1(self):
"""Test editdist returns the expected result with two Latin-1 strings."""
expected = 2
actual = editdist('Zizka', 'siska')
assert expected == actual
def test_editdist_same_unicode_kind_ucs2(self):
"""Test editdist returns the expected result with two UCS-2 strings."""
expected = 2
actual = editdist('Žižka', 'šiška')
assert expected == actual
def test_editdist_same_unicode_kind_ucs4(self):
"""Test editdist returns the expected result with two UCS-4 strings."""
expected = 2
actual = editdist('Žižka 😀', 'šiška 😀')
assert expected == actual
def test_editdist_different_unicode_kinds(self):
"""Test editdist returns the expected result with strings of different Unicode kinds."""
expected = 2
actual = editdist('Žižka', 'siska')
assert expected == actual
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
|
TestFastSS
|
python
|
huggingface__transformers
|
src/transformers/models/swin2sr/modeling_swin2sr.py
|
{
"start": 30260,
"end": 33714
}
|
class ____(Swin2SRPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
if config.num_channels == 3 and config.num_channels_out == 3:
mean = torch.tensor([0.4488, 0.4371, 0.4040]).view(1, 3, 1, 1)
else:
mean = torch.zeros(1, 1, 1, 1)
self.register_buffer("mean", mean, persistent=False)
self.img_range = config.img_range
self.first_convolution = nn.Conv2d(config.num_channels, config.embed_dim, 3, 1, 1)
self.embeddings = Swin2SREmbeddings(config)
self.encoder = Swin2SREncoder(config, grid_size=self.embeddings.patch_embeddings.patches_resolution)
self.layernorm = nn.LayerNorm(config.embed_dim, eps=config.layer_norm_eps)
self.patch_unembed = Swin2SRPatchUnEmbeddings(config)
self.conv_after_body = nn.Conv2d(config.embed_dim, config.embed_dim, 3, 1, 1)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
def pad_and_normalize(self, pixel_values):
_, _, height, width = pixel_values.size()
# 1. pad
window_size = self.config.window_size
modulo_pad_height = (window_size - height % window_size) % window_size
modulo_pad_width = (window_size - width % window_size) % window_size
pixel_values = nn.functional.pad(pixel_values, (0, modulo_pad_width, 0, modulo_pad_height), "reflect")
# 2. normalize
mean = self.mean.type_as(pixel_values)
pixel_values = (pixel_values - mean) * self.img_range
return pixel_values
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
_, _, height, width = pixel_values.shape
# some preprocessing: padding + normalization
pixel_values = self.pad_and_normalize(pixel_values)
embeddings = self.first_convolution(pixel_values)
embedding_output, input_dimensions = self.embeddings(embeddings)
encoder_outputs = self.encoder(
embedding_output,
input_dimensions,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
sequence_output = self.patch_unembed(sequence_output, (height, width))
sequence_output = self.conv_after_body(sequence_output) + embeddings
if not return_dict:
output = (sequence_output,) + encoder_outputs[1:]
return output
return BaseModelOutput(
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
|
Swin2SRModel
|
python
|
walkccc__LeetCode
|
solutions/952. Largest Component Size by Common Factor/952.py
|
{
"start": 514,
"end": 964
}
|
class ____:
def largestComponentSize(self, nums: list[int]) -> int:
ans = 0
uf = UnionFind(max(nums) + 1)
count = collections.Counter()
for num in nums:
for x in range(2, math.isqrt(num) + 1):
if num % x == 0:
uf.unionByRank(num, x)
uf.unionByRank(num, num // x)
for num in nums:
numRoot = uf.find(num)
count[numRoot] += 1
ans = max(ans, count[numRoot])
return ans
|
Solution
|
python
|
bokeh__bokeh
|
src/bokeh/util/compiler.py
|
{
"start": 3723,
"end": 4195
}
|
class ____(Implementation):
''' Base class for representing Bokeh custom model implementations that may
be given as inline code in some language.
Args:
code (str) :
The source code for the implementation
file (str, optional)
A file path to a file containing the source text (default: None)
'''
def __init__(self, code: str, file: str|None = None) -> None:
self.code = code
self.file = file
|
Inline
|
python
|
psf__requests
|
tests/test_utils.py
|
{
"start": 5046,
"end": 5531
}
|
class ____:
@pytest.mark.parametrize(
"value, expected",
(
([("key", "val")], [("key", "val")]),
((("key", "val"),), [("key", "val")]),
({"key": "val"}, [("key", "val")]),
(None, None),
),
)
def test_valid(self, value, expected):
assert to_key_val_list(value) == expected
def test_invalid(self):
with pytest.raises(ValueError):
to_key_val_list("string")
|
TestToKeyValList
|
python
|
PyCQA__pylint
|
tests/functional/u/unexpected_special_method_signature.py
|
{
"start": 2945,
"end": 3096
}
|
class ____:
def __enter__(self):
return self
def __exit__(self, exc_type=None, value=None, tb=None):
pass
|
SecondGoodContextManager
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/test_client_server.py
|
{
"start": 2270,
"end": 2326
}
|
class ____(Model):
values = Dict(String, Any)
|
DictModel
|
python
|
realpython__materials
|
python-unittest/vehicles.py
|
{
"start": 249,
"end": 490
}
|
class ____(Vehicle):
def __init__(self, make, model, loading_capacity):
super().__init__(make, model)
self.loading_capacity = loading_capacity
def vehicle_factory(cls, *args, **kwargs):
return cls(*args, **kwargs)
|
Truck
|
python
|
walkccc__LeetCode
|
solutions/227. Basic Calculator II/227.py
|
{
"start": 0,
"end": 658
}
|
class ____:
def calculate(self, s: str) -> int:
ans = 0
prevNum = 0
currNum = 0
op = '+'
for i, c in enumerate(s):
if c.isdigit():
currNum = currNum * 10 + int(c)
if not c.isdigit() and c != ' ' or i == len(s) - 1:
if op == '+' or op == '-':
ans += prevNum
prevNum = currNum if op == '+' else -currNum
elif op == '*':
prevNum = prevNum * currNum
elif op == '/':
if prevNum < 0:
prevNum = math.ceil(prevNum / currNum)
else:
prevNum = prevNum // currNum
op = c
currNum = 0
return ans + prevNum
|
Solution
|
python
|
agronholm__apscheduler
|
src/apscheduler/datastores/base.py
|
{
"start": 281,
"end": 678
}
|
class ____(DataStore):
"""Base class for data stores."""
_event_broker: EventBroker = attrs.field(init=False)
_logger: Logger = attrs.field(init=False)
async def start(
self, exit_stack: AsyncExitStack, event_broker: EventBroker, logger: Logger
) -> None:
self._event_broker = event_broker
self._logger = logger
@attrs.define(kw_only=True)
|
BaseDataStore
|
python
|
numba__numba
|
numba/testing/main.py
|
{
"start": 6085,
"end": 19688
}
|
class ____(unittest.main):
"""
A TestProgram subclass adding the following options:
* a -R option to enable reference leak detection
* a --profile option to enable profiling of the test run
* a -m option for parallel execution
* a -l option to (only) list tests
Currently the options are only added in 3.4+.
"""
refleak = False
profile = False
multiprocess = False
useslice = None
list = False
tags = None
exclude_tags = None
random_select = None
random_seed = 42
def __init__(self, *args, **kwargs):
topleveldir = kwargs.pop('topleveldir', None)
kwargs['testLoader'] = TestLoader(topleveldir)
# HACK to force unittest not to change warning display options
# (so that NumbaWarnings don't appear all over the place)
sys.warnoptions.append(':x')
self.nomultiproc = kwargs.pop('nomultiproc', False)
super(NumbaTestProgram, self).__init__(*args, **kwargs)
def _getParentArgParser(self):
# NOTE: this hook only exists on Python 3.4+. The options won't be
# added in earlier versions (which use optparse - 3.3 - or getopt()
# - 2.x).
parser = super(NumbaTestProgram, self)._getParentArgParser()
if self.testRunner is None:
parser.add_argument('-R', '--refleak', dest='refleak',
action='store_true',
help='Detect reference / memory leaks')
parser.add_argument('-m', '--multiprocess', dest='multiprocess',
nargs='?',
type=int,
const=multiprocessing.cpu_count(),
help='Parallelize tests')
parser.add_argument('-l', '--list', dest='list',
action='store_true',
help='List tests without running them')
parser.add_argument('--tags', dest='tags', type=str,
help='Comma-separated list of tags to select '
'a subset of the test suite')
parser.add_argument('--exclude-tags', dest='exclude_tags', type=str,
help='Comma-separated list of tags to de-select '
'a subset of the test suite')
parser.add_argument('--random', dest='random_select', type=float,
help='Random proportion of tests to select')
parser.add_argument('--profile', dest='profile',
action='store_true',
help='Profile the test run')
parser.add_argument('-j', '--slice', dest='useslice', nargs='?',
type=str, const="None",
help='Shard the test sequence')
def git_diff_str(x):
if x != 'ancestor':
raise ValueError("invalid option for --gitdiff")
return x
parser.add_argument('-g', '--gitdiff', dest='gitdiff', type=git_diff_str,
default=False, nargs='?',
help=('Run tests from changes made against '
'origin/main as identified by `git diff`. '
'If set to "ancestor", the diff compares '
'against the common ancestor.'))
return parser
def _handle_tags(self, argv, tagstr):
found = None
for x in argv:
if tagstr in x:
if found is None:
found = x
else:
raise ValueError("argument %s supplied repeatedly" % tagstr)
if found is not None:
posn = argv.index(found)
try:
if found == tagstr: # --tagstr <arg>
tag_args = argv[posn + 1].strip()
argv.remove(tag_args)
else: # --tagstr=<arg>
if '=' in found:
tag_args = found.split('=')[1].strip()
else:
raise AssertionError('unreachable')
except IndexError:
# at end of arg list, raise
msg = "%s requires at least one tag to be specified"
raise ValueError(msg % tagstr)
# see if next arg is "end options" or some other flag
if tag_args.startswith('-'):
raise ValueError("tag starts with '-', probably a syntax error")
# see if tag is something like "=<tagname>" which is likely a syntax
# error of form `--tags =<tagname>`, note the space prior to `=`.
if '=' in tag_args:
msg = "%s argument contains '=', probably a syntax error"
raise ValueError(msg % tagstr)
attr = tagstr[2:].replace('-', '_')
setattr(self, attr, tag_args)
argv.remove(found)
def parseArgs(self, argv):
if '-l' in argv:
argv.remove('-l')
self.list = True
super(NumbaTestProgram, self).parseArgs(argv)
# If at this point self.test doesn't exist, it is because
# no test ID was given in argv. Use the default instead.
if not hasattr(self, 'test') or not self.test.countTestCases():
self.testNames = (self.defaultTest,)
self.createTests()
if self.tags:
tags = [s.strip() for s in self.tags.split(',')]
self.test = _choose_tagged_tests(self.test, tags, mode='include')
if self.exclude_tags:
tags = [s.strip() for s in self.exclude_tags.split(',')]
self.test = _choose_tagged_tests(self.test, tags, mode='exclude')
if self.random_select:
self.test = _choose_random_tests(self.test, self.random_select,
self.random_seed)
if self.gitdiff is not False:
self.test = _choose_gitdiff_tests(
self.test,
use_common_ancestor=(self.gitdiff == 'ancestor'),
)
if self.verbosity <= 0:
# We aren't interested in informational messages / warnings when
# running with '-q'.
self.buffer = True
def _do_discovery(self, argv, Loader=None):
# Disable unittest's implicit test discovery when parsing
# CLI arguments, as it can select other tests than Numba's
# (e.g. some test_xxx module that may happen to be directly
# reachable from sys.path)
return
def runTests(self):
if self.refleak:
self.testRunner = RefleakTestRunner
if not hasattr(sys, "gettotalrefcount"):
warnings.warn("detecting reference leaks requires a debug build "
"of Python, only memory leaks will be detected")
elif self.list:
self.testRunner = TestLister(self.useslice)
elif self.testRunner is None:
self.testRunner = BasicTestRunner(self.useslice,
verbosity=self.verbosity,
failfast=self.failfast,
buffer=self.buffer)
if self.multiprocess and not self.nomultiproc:
if self.multiprocess < 1:
msg = ("Value specified for the number of processes to use in "
"running the suite must be > 0")
raise ValueError(msg)
self.testRunner = ParallelTestRunner(runner.TextTestRunner,
self.multiprocess,
self.useslice,
verbosity=self.verbosity,
failfast=self.failfast,
buffer=self.buffer)
def run_tests_real():
super(NumbaTestProgram, self).runTests()
if self.profile:
filename = os.path.splitext(
os.path.basename(sys.modules['__main__'].__file__)
)[0] + '.prof'
p = cProfile.Profile(timer=time.perf_counter) # 3.3+
p.enable()
try:
p.runcall(run_tests_real)
finally:
p.disable()
print("Writing test profile data into %r" % (filename,))
p.dump_stats(filename)
else:
run_tests_real()
# These are tests which are generated and injected into the test suite, what
# gets injected depends on features of the test environment, e.g. TBB presence
# it's important for doing the CI "slice tests" that these are run at the end
# See notes in `_flatten_suite` for why. Simple substring matching is used to
# determine a match.
_GENERATED = (
"numba.cuda.tests.cudapy.test_libdevice.TestLibdeviceCompilation",
"numba.tests.test_num_threads",
"numba.tests.test_parallel_backend",
"numba.tests.test_svml",
"numba.tests.test_ufuncs",
)
def _flatten_suite_inner(test):
"""
Workhorse for _flatten_suite
"""
tests = []
if isinstance(test, (unittest.TestSuite, list, tuple)):
for x in test:
tests.extend(_flatten_suite_inner(x))
else:
tests.append(test)
return tests
def _flatten_suite(test):
"""
Expand nested suite into list of test cases.
"""
tests = _flatten_suite_inner(test)
# Strip out generated tests and stick them at the end, this is to make sure
# that tests appear in a consistent order regardless of features available.
# This is so that a slice through the test suite e.g. (1::N) would likely be
# consistent up to the point of the generated tests, which rely on specific
# features.
generated = set()
for t in tests:
for g in _GENERATED:
if g in str(t):
generated.add(t)
normal = set(tests) - generated
def key(x):
return x.__module__, type(x).__name__, x._testMethodName
tests = sorted(normal, key=key)
tests.extend(sorted(list(generated), key=key))
return tests
def _choose_gitdiff_tests(tests, *, use_common_ancestor=False):
try:
from git import Repo
except ImportError:
raise ValueError("gitpython needed for git functionality")
repo = Repo('.')
path = os.path.join('numba', 'tests')
if use_common_ancestor:
print(f"Git diff by common ancestor")
target = 'origin/main...HEAD'
else:
target = 'origin/main..HEAD'
gdiff_paths = repo.git.diff(target, path, name_only=True).split()
# normalise the paths as they are unix style from repo.git.diff
gdiff_paths = [os.path.normpath(x) for x in gdiff_paths]
selected = []
gdiff_paths = [os.path.join(repo.working_dir, x) for x in gdiff_paths]
for test in _flatten_suite(tests):
assert isinstance(test, unittest.TestCase)
fname = inspect.getsourcefile(test.__class__)
if fname in gdiff_paths:
selected.append(test)
print("Git diff identified %s tests" % len(selected))
return unittest.TestSuite(selected)
def _choose_tagged_tests(tests, tags, mode='include'):
"""
Select tests that are tagged/not tagged with at least one of the given tags.
Set mode to 'include' to include the tests with tags, or 'exclude' to
exclude the tests with the tags.
"""
selected = []
tags = set(tags)
for test in _flatten_suite(tests):
assert isinstance(test, unittest.TestCase)
func = getattr(test, test._testMethodName)
try:
# Look up the method's underlying function (Python 2)
func = func.im_func
except AttributeError:
pass
found_tags = getattr(func, 'tags', None)
# only include the test if the tags *are* present
if mode == 'include':
if found_tags is not None and found_tags & tags:
selected.append(test)
elif mode == 'exclude':
# only include the test if the tags *are not* present
if found_tags is None or not (found_tags & tags):
selected.append(test)
else:
raise ValueError("Invalid 'mode' supplied: %s." % mode)
return unittest.TestSuite(selected)
def _choose_random_tests(tests, ratio, seed):
"""
Choose a given proportion of tests at random.
"""
rnd = random.Random()
rnd.seed(seed)
if isinstance(tests, unittest.TestSuite):
tests = _flatten_suite(tests)
tests = rnd.sample(tests, int(len(tests) * ratio))
tests = sorted(tests, key=lambda case: case.id())
return unittest.TestSuite(tests)
# The reference leak detection code is liberally taken and adapted from
# Python's own Lib/test/regrtest.py.
def _refleak_cleanup():
# Collect cyclic trash and read memory statistics immediately after.
func1 = sys.getallocatedblocks
try:
func2 = sys.gettotalrefcount
except AttributeError:
func2 = lambda: 42
# Flush standard output, so that buffered data is sent to the OS and
# associated Python objects are reclaimed.
for stream in (sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__):
if stream is not None:
stream.flush()
sys._clear_type_cache()
# This also clears the various internal CPython freelists.
gc.collect()
return func1(), func2()
|
NumbaTestProgram
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_query.py
|
{
"start": 216628,
"end": 226598
}
|
class ____(_fixtures.FixtureTest, AssertsCompiledSQL):
run_inserts = None
__dialect__ = "default"
def _fixture1(self):
User, Address, Dingaling, HasDingaling = (
self.classes.User,
self.classes.Address,
self.classes.Dingaling,
self.classes.HasDingaling,
)
users, addresses, dingalings, has_dingaling = (
self.tables.users,
self.tables.addresses,
self.tables.dingalings,
self.tables.has_dingaling,
)
self.mapper_registry.map_imperatively(User, users)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={
"user": relationship(User),
"special_user": relationship(
User,
primaryjoin=and_(
users.c.id == addresses.c.user_id,
users.c.name == addresses.c.email_address,
),
viewonly=True,
),
},
)
self.mapper_registry.map_imperatively(Dingaling, dingalings)
self.mapper_registry.map_imperatively(
HasDingaling,
has_dingaling,
properties={
"dingaling": relationship(
Dingaling,
primaryjoin=and_(
dingalings.c.id == has_dingaling.c.dingaling_id,
dingalings.c.data == "hi",
),
)
},
)
def test_filter_with_transient_dont_assume_pk(self):
self._fixture1()
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
q = sess.query(Address).filter(Address.user == User())
assert_raises_message(
sa_exc.StatementError,
"Can't resolve value for column users.id on object "
".User at .*; no value has been set for this column",
q.all,
)
def test_filter_with_transient_given_pk(self):
self._fixture1()
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
q = sess.query(Address).filter(Address.user == User(id=None))
with expect_warnings("Got None for value of column "):
self.assert_compile(
q,
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses WHERE :param_1 = addresses.user_id",
checkparams={"param_1": None},
)
def test_filter_with_transient_given_pk_but_only_later(self):
self._fixture1()
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
u1 = User()
# id is not set, so evaluates to NEVER_SET
q = sess.query(Address).filter(Address.user == u1)
# but we set it, so we should get the warning
u1.id = None
with expect_warnings("Got None for value of column "):
self.assert_compile(
q,
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses WHERE :param_1 = addresses.user_id",
checkparams={"param_1": None},
)
def test_filter_with_transient_warn_for_none_against_non_pk(self):
self._fixture1()
User, Address = self.classes.User, self.classes.Address
s = fixture_session()
q = s.query(Address).filter(
Address.special_user == User(id=None, name=None)
)
with expect_warnings("Got None for value of column"):
self.assert_compile(
q,
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses WHERE :param_1 = addresses.user_id "
"AND :param_2 = addresses.email_address",
checkparams={"param_1": None, "param_2": None},
)
def test_filter_with_persistent_non_pk_col_is_default_null(self):
# test #4676 - comparison to a persistent column that is
# NULL in the database, but is not fetched
self._fixture1()
Dingaling, HasDingaling = (
self.classes.Dingaling,
self.classes.HasDingaling,
)
s = fixture_session()
d = Dingaling(id=1)
s.add(d)
s.flush()
assert "data" not in d.__dict__
q = s.query(HasDingaling).filter_by(dingaling=d)
with expect_warnings("Got None for value of column"):
self.assert_compile(
q,
"SELECT has_dingaling.id AS has_dingaling_id, "
"has_dingaling.dingaling_id AS has_dingaling_dingaling_id "
"FROM has_dingaling WHERE :param_1 = "
"has_dingaling.dingaling_id AND :param_2 = :data_1",
checkparams={"param_1": 1, "param_2": None, "data_1": "hi"},
)
def test_filter_with_detached_non_pk_col_is_default_null(self):
self._fixture1()
Dingaling, HasDingaling = (
self.classes.Dingaling,
self.classes.HasDingaling,
)
s = fixture_session()
d = Dingaling()
s.add(d)
s.flush()
s.commit()
d.id
s.expire(d, ["data"])
s.expunge(d)
assert "data" not in d.__dict__
assert "id" in d.__dict__
q = s.query(HasDingaling).filter_by(dingaling=d)
# this case we still can't handle, object is detached so we assume
# nothing
assert_raises_message(
sa_exc.StatementError,
r"Can't resolve value for column dingalings.data on "
r"object .*Dingaling.* the object is detached and "
r"the value was expired",
q.all,
)
def test_filter_with_detached_non_pk_col_has_value(self):
self._fixture1()
Dingaling, HasDingaling = (
self.classes.Dingaling,
self.classes.HasDingaling,
)
s = fixture_session()
d = Dingaling(data="some data")
s.add(d)
s.commit()
s.expire(d)
assert "data" not in d.__dict__
q = s.query(HasDingaling).filter_by(dingaling=d)
self.assert_compile(
q,
"SELECT has_dingaling.id AS has_dingaling_id, "
"has_dingaling.dingaling_id AS has_dingaling_dingaling_id "
"FROM has_dingaling WHERE :param_1 = "
"has_dingaling.dingaling_id AND :param_2 = :data_1",
checkparams={"param_1": 1, "param_2": "some data", "data_1": "hi"},
)
def test_with_parent_with_transient_assume_pk(self):
self._fixture1()
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
q = sess.query(User).filter(
with_parent(Address(user_id=None), Address.user)
)
with expect_warnings("Got None for value of column"):
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = :param_1",
checkparams={"param_1": None},
)
def test_with_parent_with_transient_warn_for_none_against_non_pk(self):
self._fixture1()
User, Address = self.classes.User, self.classes.Address
s = fixture_session()
q = s.query(User).filter(
with_parent(
Address(user_id=None, email_address=None), Address.special_user
)
)
with expect_warnings("Got None for value of column"):
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = :param_1 "
"AND users.name = :param_2",
checkparams={"param_1": None, "param_2": None},
)
def test_negated_contains_or_equals_plain_m2o(self):
self._fixture1()
User, Address = self.classes.User, self.classes.Address
s = fixture_session()
q = s.query(Address).filter(Address.user != User(id=None))
with expect_warnings("Got None for value of column"):
self.assert_compile(
q,
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses "
"WHERE addresses.user_id != :user_id_1 "
"OR addresses.user_id IS NULL",
checkparams={"user_id_1": None},
)
def test_negated_contains_or_equals_complex_rel(self):
self._fixture1()
User, Address = self.classes.User, self.classes.Address
s = fixture_session()
# this one does *not* warn because we do the criteria
# without deferral
q = s.query(Address).filter(Address.special_user != User(id=None))
self.assert_compile(
q,
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses "
"WHERE NOT (EXISTS (SELECT 1 "
"FROM users "
"WHERE users.id = addresses.user_id AND "
"users.name = addresses.email_address AND users.id IS NULL))",
checkparams={},
)
|
WithTransientOnNone
|
python
|
sympy__sympy
|
sympy/physics/quantum/hilbert.py
|
{
"start": 6725,
"end": 7707
}
|
class ____(HilbertSpace):
"""The Hilbert space for second quantization.
Technically, this Hilbert space is a infinite direct sum of direct
products of single particle Hilbert spaces [1]_. This is a mess, so we have
a class to represent it directly.
Examples
========
>>> from sympy.physics.quantum.hilbert import FockSpace
>>> hs = FockSpace()
>>> hs
F
>>> hs.dimension
oo
References
==========
.. [1] https://en.wikipedia.org/wiki/Fock_space
"""
def __new__(cls):
obj = Basic.__new__(cls)
return obj
@property
def dimension(self):
return S.Infinity
def _sympyrepr(self, printer, *args):
return "FockSpace()"
def _sympystr(self, printer, *args):
return "F"
def _pretty(self, printer, *args):
ustr = '\N{LATIN CAPITAL LETTER F}'
return prettyForm(ustr)
def _latex(self, printer, *args):
return r'\mathcal{F}'
|
FockSpace
|
python
|
numpy__numpy
|
numpy/_core/tests/test_unicode.py
|
{
"start": 5146,
"end": 5301
}
|
class ____(CreateValues):
"""Check the creation of valued arrays (size 1, UCS4 values)"""
ulen = 1
ucs_value = ucs4_value
|
TestCreateValues_1_UCS4
|
python
|
pypa__pip
|
src/pip/_internal/exceptions.py
|
{
"start": 27114,
"end": 28366
}
|
class ____(DiagnosticPipError):
"""Raised when the downloader receives fewer bytes than advertised
in the Content-Length header."""
reference = "incomplete-download"
def __init__(self, download: _FileDownload) -> None:
# Dodge circular import.
from pip._internal.utils.misc import format_size
assert download.size is not None
download_status = (
f"{format_size(download.bytes_received)}/{format_size(download.size)}"
)
if download.reattempts:
retry_status = f"after {download.reattempts + 1} attempts "
hint = "Use --resume-retries to configure resume attempt limit."
else:
# Download retrying is not enabled.
retry_status = ""
hint = "Consider using --resume-retries to enable download resumption."
message = Text(
f"Download failed {retry_status}because not enough bytes "
f"were received ({download_status})"
)
super().__init__(
message=message,
context=f"URL: {download.link.redacted_url}",
hint_stmt=hint,
note_stmt="This is an issue with network connectivity, not pip.",
)
|
IncompleteDownloadError
|
python
|
wandb__wandb
|
tests/fixtures/wandb_backend_spy/spy.py
|
{
"start": 7269,
"end": 14840
}
|
class ____:
"""A snapshot of the W&B backend state."""
_spy: WandbBackendSpy | None
def run_ids(self) -> set[str]:
"""Returns the IDs of all runs."""
spy = self._assert_valid()
return set(spy._runs.keys())
def uploaded_files(self, *, run_id: str) -> set[str]:
"""Returns the set of files uploaded for the run.
This is based on the values reported in the "uploaded" field of
FileStream requests, and doesn't track actual file uploads.
"""
spy = self._assert_valid()
return spy._runs[run_id]._uploaded_files
def history(self, *, run_id: str) -> dict[int, Any]:
"""Returns the history file for the run.
The file is represented as a dict that maps integer offsets to
JSON objects.
Args:
run_id: The ID of the run.
Raises:
KeyError: if the run does not exist.
"""
spy = self._assert_valid()
try:
run = spy._runs[run_id]
except KeyError as e:
raise KeyError(f"No run with ID {run_id}") from e
history_file = run._file_stream_files.get("wandb-history.jsonl", {})
history_parsed: dict[int, Any] = {}
for offset, line in history_file.items():
history_parsed[offset] = json.loads(line)
return history_parsed
def output(self, *, run_id: str) -> dict[int, str]:
"""Returns the run's console logs uploaded via FileStream.
The file is represented as a dict that maps integer offsets to
the printed output string.
Args:
run_id: The ID of the run.
Raises:
KeyError: if the run does not exist.
"""
spy = self._assert_valid()
try:
run = spy._runs[run_id]
except KeyError as e:
raise KeyError(f"No run with ID {run_id}") from e
return dict(run._file_stream_files.get("output.log", {}))
def summary(self, *, run_id: str) -> Any:
"""Returns the summary for the run as a JSON object.
Args:
run_id: The ID of the run.
Raises:
KeyError: if the run does not exist.
"""
spy = self._assert_valid()
try:
run = spy._runs[run_id]
except KeyError as e:
raise KeyError(f"No run with ID {run_id}") from e
summary_file = run._file_stream_files.get("wandb-summary.json", {})
last_line_offset = max(summary_file.keys(), default=None)
if last_line_offset is None:
return {}
return json.loads(summary_file[last_line_offset])
def system_metrics(self, *, run_id: str) -> dict[int, Any]:
"""Returns the system metrics file for the run.
Args:
run_id: The ID of the run.
Raises:
KeyError: if the run does not exist.
"""
spy = self._assert_valid()
try:
run = spy._runs[run_id]
except KeyError as e:
raise KeyError(f"No run with ID {run_id}") from e
events_file = run._file_stream_files.get("wandb-events.jsonl", {})
events_parsed: dict[int, Any] = {}
for offset, line in events_file.items():
events_parsed[offset] = json.loads(line)
return events_parsed
def config(self, *, run_id: str) -> dict[str, Any]:
"""Returns the config for the run as a JSON object.
Args:
run_id: The ID of the run.
Raises:
KeyError: if the run does not exist.
AssertionError: if no config was uploaded for the run.
"""
spy = self._assert_valid()
try:
config = spy._runs[run_id]._config_json_string
except KeyError as e:
raise KeyError(f"No run with ID {run_id}") from e
if config is None:
raise AssertionError(f"No config for run {run_id}")
return json.loads(config)
def telemetry(self, *, run_id: str) -> dict[str, Any]:
"""Returns the telemetry for the run as a JSON object.
Args:
run_id: The ID of the run.
Raises:
KeyError: if the run does not exist.
AssertionError: if no telemetry was uploaded for the run.
"""
config = self.config(run_id=run_id)
try:
return config["_wandb"]["value"]["t"]
except KeyError as e:
raise AssertionError(f"No telemetry for run {run_id}") from e
def metrics(self, *, run_id: str) -> dict[str, Any]:
"""Returns the metrics for the run as a JSON object.
Args:
run_id: The ID of the run.
Raises:
KeyError: if the run does not exist.
AssertionError: if no metrics were uploaded for the run.
"""
config = self.config(run_id=run_id)
try:
return config["_wandb"]["value"]["m"]
except KeyError as e:
raise AssertionError(f"No metrics for run {run_id}") from e
def tags(self, *, run_id: str) -> list[str]:
"""Returns the run's tags.
Args:
run_id: The ID of the run.
Raises:
KeyError: if the run does not exist.
"""
spy = self._assert_valid()
try:
return spy._runs[run_id]._tags
except KeyError as e:
raise KeyError(f"No run with ID {run_id}") from e
def remote(self, *, run_id: str) -> str | None:
"""Returns the run's remote repository, if any.
Args:
run_id: The ID of the run.
Raises:
KeyError: if the run does not exist.
"""
spy = self._assert_valid()
try:
return spy._runs[run_id]._remote
except KeyError as e:
raise KeyError(f"No run with ID {run_id}") from e
def commit(self, *, run_id: str) -> str | None:
"""Returns the run's commit, if any.
Args:
run_id: The ID of the run.
Raises:
KeyError: if the run does not exist.
"""
spy = self._assert_valid()
try:
return spy._runs[run_id]._commit
except KeyError as e:
raise KeyError(f"No run with ID {run_id}") from e
def sweep_name(self, *, run_id: str) -> str | None:
"""Returns the sweep to which the run belongs, if any.
Args:
run_id: The ID of the run.
Raises:
KeyError: if the run does not exist.
"""
spy = self._assert_valid()
try:
return spy._runs[run_id]._sweep_name
except KeyError as e:
raise KeyError(f"No run with ID {run_id}") from e
def was_ever_preempting(self, *, run_id: str) -> bool:
"""Returns whether the run was ever marked 'preempting'."""
spy = self._assert_valid()
return spy._runs[run_id]._was_ever_preempting
def completed(self, *, run_id: str) -> bool:
"""Returns whether the run was marked as completed."""
spy = self._assert_valid()
return spy._runs[run_id]._completed
def exit_code(self, *, run_id: str) -> int | None:
"""Returns the exit code of the run."""
spy = self._assert_valid()
return spy._runs[run_id]._exit_code
def _assert_valid(self) -> WandbBackendSpy:
"""Raise an error if we're not inside freeze()."""
if not self._spy:
raise AssertionError("Snapshot cannot be used outside of freeze().")
return self._spy
|
WandbBackendSnapshot
|
python
|
pytorch__pytorch
|
torch/__init__.py
|
{
"start": 73233,
"end": 73457
}
|
class ____(_LegacyStorage):
@classproperty
def dtype(self):
_warn_typed_storage_removal(stacklevel=3)
return self._dtype
@classproperty
def _dtype(self):
return torch.qint8
|
QInt8Storage
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-azure/dagster_azure/fakes/fake_adls2_resource.py
|
{
"start": 1975,
"end": 2950
}
|
class ____:
"""Stateful mock of an ADLS2 service client for testing.
Wraps a ``mock.MagicMock``. Containers are implemented using an in-memory dict.
"""
def __init__(self, account_name, credential="fake-creds"):
self._account_name = account_name
self._credential = mock.MagicMock()
self._credential.account_key = credential
self._file_systems = {}
@property
def account_name(self):
return self._account_name
@property
def credential(self):
return self._credential
@property
def file_systems(self):
return self._file_systems
def get_file_system_client(self, file_system):
return self._file_systems.setdefault(
file_system, FakeADLS2FilesystemClient(self.account_name, file_system)
)
def get_file_client(self, file_system, file_path):
return self.get_file_system_client(file_system).get_file_client(file_path)
|
FakeADLS2ServiceClient
|
python
|
facelessuser__soupsieve
|
tests/test_level4/test_scope.py
|
{
"start": 73,
"end": 2758
}
|
class ____(util.TestCase):
"""Test scope selectors."""
MARKUP = """
<html id="root">
<head>
</head>
<body>
<div id="div">
<p id="0" class="somewordshere">Some text <span id="1"> in a paragraph</span>.</p>
<a id="2" href="http://google.com">Link</a>
<span id="3" class="herewords">Direct child</span>
<pre id="pre" class="wordshere">
<span id="4">Child 1</span>
<span id="5">Child 2</span>
<span id="6">Child 3</span>
</pre>
</div>
</body>
</html>
"""
def test_scope_is_root(self):
"""Test scope is the root when the a specific element is not the target of the select call."""
# Scope is root when applied to a document node
self.assert_selector(
self.MARKUP,
":scope",
["root"],
flags=util.HTML
)
self.assert_selector(
self.MARKUP,
":scope > body > div",
["div"],
flags=util.HTML
)
def test_scope_cannot_select_target(self):
"""Test that scope, the element which scope is called on, cannot be selected."""
for parser in util.available_parsers(
'html.parser', 'lxml', 'html5lib', 'xml'):
soup = self.soup(self.MARKUP, parser)
el = soup.html
# Scope is the element we are applying the select to, and that element is never returned
self.assertTrue(len(sv.select(':scope', el, flags=sv.DEBUG)) == 0)
def test_scope_is_select_target(self):
"""Test that scope is the element which scope is called on."""
for parser in util.available_parsers(
'html.parser', 'lxml', 'html5lib', 'xml'):
soup = self.soup(self.MARKUP, parser)
el = soup.html
# Scope here means the current element under select
ids = [el.attrs['id'] for el in sv.select(':scope div', el, flags=sv.DEBUG)]
self.assertEqual(sorted(ids), sorted(['div']))
el = soup.body
ids = [el.attrs['id'] for el in sv.select(':scope div', el, flags=sv.DEBUG)]
self.assertEqual(sorted(ids), sorted(['div']))
# `div` is the current element under select, and it has no `div` elements.
el = soup.div
ids = [el.attrs['id'] for el in sv.select(':scope div', el, flags=sv.DEBUG)]
self.assertEqual(sorted(ids), sorted([]))
# `div` does have an element with the class `.wordshere`
ids = [el.attrs['id'] for el in sv.select(':scope .wordshere', el, flags=sv.DEBUG)]
self.assertEqual(sorted(ids), sorted(['pre']))
|
TestScope
|
python
|
django__django
|
tests/custom_managers/models.py
|
{
"start": 5486,
"end": 5597
}
|
class ____(Car):
class Meta:
proxy = True
default_manager_name = "fast_cars"
|
FastCarAsDefault
|
python
|
scipy__scipy
|
benchmarks/benchmarks/go_benchmark_functions/go_funcs_T.py
|
{
"start": 9839,
"end": 11344
}
|
class ____(Benchmark):
r"""
Trigonometric 2 objective function.
This class defines the Trigonometric 2 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Trigonometric2}}(x) = 1 + \sum_{i=1}^{n} 8 \sin^2
\left[7(x_i - 0.9)^2 \right]
+ 6 \sin^2 \left[14(x_i - 0.9)^2 \right]
+ (x_i - 0.9)^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-500, 500]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 1` for :math:`x_i = 0.9` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-500.0] * self.N,
[500.0] * self.N))
self.custom_bounds = [(0, 2), (0, 2)]
self.global_optimum = [[0.9 for _ in range(self.N)]]
self.fglob = 1.0
def fun(self, x, *args):
self.nfev += 1
vec = (8 * sin(7 * (x - 0.9) ** 2) ** 2
+ 6 * sin(14 * (x - 0.9) ** 2) ** 2
+ (x - 0.9) ** 2)
return 1.0 + sum(vec)
|
Trigonometric02
|
python
|
huggingface__transformers
|
tests/models/marian/test_modeling_marian.py
|
{
"start": 17742,
"end": 19632
}
|
class ____(MarianIntegrationTest):
@slow
def test_forward(self):
src, tgt = ["I am a small frog"], ["Ich bin ein kleiner Frosch."]
expected_ids = [38, 121, 14, 697, 38848, 0]
model_inputs = self.tokenizer(src, text_target=tgt, return_tensors="pt").to(torch_device)
self.assertListEqual(expected_ids, model_inputs.input_ids[0].tolist())
desired_keys = {
"input_ids",
"attention_mask",
"labels",
}
self.assertSetEqual(desired_keys, set(model_inputs.keys()))
model_inputs["decoder_input_ids"] = shift_tokens_right(
model_inputs.labels, self.tokenizer.pad_token_id, self.model.config.decoder_start_token_id
)
model_inputs["return_dict"] = True
model_inputs["use_cache"] = False
with torch.no_grad():
outputs = self.model(**model_inputs)
max_indices = outputs.logits.argmax(-1)
self.tokenizer.batch_decode(max_indices)
def test_unk_support(self):
t = self.tokenizer
ids = t(["||"], return_tensors="pt").to(torch_device).input_ids[0].tolist()
self.assertEqual(ids[-1], t.eos_token_id)
self.assertTrue(all(token_id == t.unk_token_id for token_id in ids[:-1]))
def test_pad_not_split(self):
input_ids_w_pad = self.tokenizer(["I am a small frog <pad>"], return_tensors="pt").input_ids[0].tolist()
expected_w_pad = [38, 121, 14, 697, 38848, self.tokenizer.pad_token_id, 0] # pad
self.assertListEqual(expected_w_pad, input_ids_w_pad)
@slow
def test_batch_generation_en_de(self):
self._assert_generated_batch_equal_expected()
def test_auto_config(self):
config = AutoConfig.from_pretrained(self.model_name)
self.assertIsInstance(config, MarianConfig)
@require_sentencepiece
@require_tokenizers
|
TestMarian_EN_DE_More
|
python
|
pytorch__pytorch
|
torch/testing/_internal/common_device_type.py
|
{
"start": 50872,
"end": 50993
}
|
class ____(skipIf):
def __init__(self, dep, reason):
super().__init__(dep, reason, device_type="mps")
|
skipMPSIf
|
python
|
lepture__authlib
|
authlib/jose/rfc7518/rsa_key.py
|
{
"start": 863,
"end": 4581
}
|
class ____(AsymmetricKey):
"""Key class of the ``RSA`` key type."""
kty = "RSA"
PUBLIC_KEY_CLS = RSAPublicKey
PRIVATE_KEY_CLS = RSAPrivateKeyWithSerialization
PUBLIC_KEY_FIELDS = ["e", "n"]
PRIVATE_KEY_FIELDS = ["d", "dp", "dq", "e", "n", "p", "q", "qi"]
REQUIRED_JSON_FIELDS = ["e", "n"]
SSH_PUBLIC_PREFIX = b"ssh-rsa"
def dumps_private_key(self):
numbers = self.private_key.private_numbers()
return {
"n": int_to_base64(numbers.public_numbers.n),
"e": int_to_base64(numbers.public_numbers.e),
"d": int_to_base64(numbers.d),
"p": int_to_base64(numbers.p),
"q": int_to_base64(numbers.q),
"dp": int_to_base64(numbers.dmp1),
"dq": int_to_base64(numbers.dmq1),
"qi": int_to_base64(numbers.iqmp),
}
def dumps_public_key(self):
numbers = self.public_key.public_numbers()
return {"n": int_to_base64(numbers.n), "e": int_to_base64(numbers.e)}
def load_private_key(self):
obj = self._dict_data
if "oth" in obj: # pragma: no cover
# https://tools.ietf.org/html/rfc7518#section-6.3.2.7
raise ValueError('"oth" is not supported yet')
public_numbers = RSAPublicNumbers(
base64_to_int(obj["e"]), base64_to_int(obj["n"])
)
if has_all_prime_factors(obj):
numbers = RSAPrivateNumbers(
d=base64_to_int(obj["d"]),
p=base64_to_int(obj["p"]),
q=base64_to_int(obj["q"]),
dmp1=base64_to_int(obj["dp"]),
dmq1=base64_to_int(obj["dq"]),
iqmp=base64_to_int(obj["qi"]),
public_numbers=public_numbers,
)
else:
d = base64_to_int(obj["d"])
p, q = rsa_recover_prime_factors(public_numbers.n, d, public_numbers.e)
numbers = RSAPrivateNumbers(
d=d,
p=p,
q=q,
dmp1=rsa_crt_dmp1(d, p),
dmq1=rsa_crt_dmq1(d, q),
iqmp=rsa_crt_iqmp(p, q),
public_numbers=public_numbers,
)
return numbers.private_key(default_backend())
def load_public_key(self):
numbers = RSAPublicNumbers(
base64_to_int(self._dict_data["e"]), base64_to_int(self._dict_data["n"])
)
return numbers.public_key(default_backend())
@classmethod
def generate_key(cls, key_size=2048, options=None, is_private=False) -> "RSAKey":
if key_size < 512:
raise ValueError("key_size must not be less than 512")
if key_size % 8 != 0:
raise ValueError("Invalid key_size for RSAKey")
raw_key = rsa.generate_private_key(
public_exponent=65537,
key_size=key_size,
backend=default_backend(),
)
if not is_private:
raw_key = raw_key.public_key()
return cls.import_key(raw_key, options=options)
@classmethod
def import_dict_key(cls, raw, options=None):
cls.check_required_fields(raw)
key = cls(options=options)
key._dict_data = raw
if "d" in raw and not has_all_prime_factors(raw):
# reload dict key
key.load_raw_key()
key.load_dict_key()
return key
def has_all_prime_factors(obj):
props = ["p", "q", "dp", "dq", "qi"]
props_found = [prop in obj for prop in props]
if all(props_found):
return True
if any(props_found):
raise ValueError(
"RSA key must include all parameters if any are present besides d"
)
return False
|
RSAKey
|
python
|
doocs__leetcode
|
solution/0400-0499/0488.Zuma Game/Solution.py
|
{
"start": 0,
"end": 853
}
|
class ____:
def findMinStep(self, board: str, hand: str) -> int:
def remove(s):
while len(s):
next = re.sub(r'B{3,}|G{3,}|R{3,}|W{3,}|Y{3,}', '', s)
if len(next) == len(s):
break
s = next
return s
visited = set()
q = deque([(board, hand)])
while q:
state, balls = q.popleft()
if not state:
return len(hand) - len(balls)
for ball in set(balls):
b = balls.replace(ball, '', 1)
for i in range(1, len(state) + 1):
s = state[:i] + ball + state[i:]
s = remove(s)
if s not in visited:
visited.add(s)
q.append((s, b))
return -1
|
Solution
|
python
|
huggingface__transformers
|
src/transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py
|
{
"start": 56685,
"end": 60918
}
|
class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: Qwen3OmniMoeTextConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
self.mrope_section = config.rope_parameters.get("mrope_section", [24, 20, 20])
@staticmethod
def compute_default_rope_parameters(
config: Optional[Qwen3OmniMoeTextConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
# In contrast to other models, Qwen3OmniMoeThinker has different position ids for the grids
# So we expand the inv_freq to shape (3, ...)
if position_ids.ndim == 2:
position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1)
inv_freq_expanded = self.inv_freq[None, None, :, None].float().expand(3, position_ids.shape[1], -1, 1)
position_ids_expanded = position_ids[:, :, None, :].float() # shape (3, bs, 1, positions)
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(2, 3)
freqs = self.apply_interleaved_mrope(freqs, self.mrope_section)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
def apply_interleaved_mrope(self, freqs, mrope_section):
"""Apply interleaved MRoPE to 3D rotary embeddings.
Reorganizes frequency layout from chunked [TTT...HHH...WWW] to
interleaved [THWTHWTHW...TT], preserving frequency continuity.
args:
x: (3, bs, seq_len, head_dim // 2)
mrope_section: (3,)
returns:
x_t: (bs, seq_len, head_dim // 2)
"""
freqs_t = freqs[0] # just overwrite the first dimension T
for dim, offset in enumerate((1, 2), start=1): # H, W
length = mrope_section[dim] * 3
idx = slice(offset, length, 3)
freqs_t[..., idx] = freqs[dim, ..., idx]
return freqs_t
|
Qwen3OmniMoeThinkerTextRotaryEmbedding
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 143963,
"end": 144610
}
|
class ____(sgqlc.types.Input):
"""Autogenerated input type of ArchiveProjectV2Item"""
__schema__ = github_schema
__field_names__ = ("project_id", "item_id", "client_mutation_id")
project_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="projectId")
"""The ID of the Project to archive the item from."""
item_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="itemId")
"""The ID of the ProjectV2Item to archive."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
|
ArchiveProjectV2ItemInput
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/initsubclass1.py
|
{
"start": 542,
"end": 610
}
|
class ____(ClassA, param1="0", param3=datetime.now()):
pass
|
ClassC
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-the-safest-path-in-a-grid.py
|
{
"start": 33,
"end": 765
}
|
class ____(object): # Time: O(n * alpha(n)), Space: O(n)
def __init__(self, n):
self.set = range(n)
self.rank = [0]*n
def find_set(self, x):
stk = []
while self.set[x] != x: # path compression
stk.append(x)
x = self.set[x]
while stk:
self.set[stk.pop()] = x
return x
def union_set(self, x, y):
x, y = self.find_set(x), self.find_set(y)
if x == y:
return False
if self.rank[x] > self.rank[y]: # union by rank
x, y = y, x
self.set[x] = self.set[y]
if self.rank[x] == self.rank[y]:
self.rank[y] += 1
return True
# bfs, bucket sort, union find
|
UnionFind
|
python
|
django__django
|
tests/serializers/test_xml.py
|
{
"start": 3670,
"end": 4700
}
|
class ____(
SerializersTransactionTestBase, TransactionTestCase
):
serializer_name = "xml"
fwd_ref_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object pk="1" model="serializers.article">
<field to="serializers.author" name="author" rel="ManyToOneRel">1</field>
<field type="CharField" name="headline">Forward references pose no problem</field>
<field type="DateTimeField" name="pub_date">2006-06-16T15:00:00</field>
<field to="serializers.category" name="categories" rel="ManyToManyRel">
<object pk="1"></object>
</field>
<field to="serializers.categorymetadata" name="meta_data" rel="ManyToManyRel"></field>
</object>
<object pk="1" model="serializers.author">
<field type="CharField" name="name">Agnes</field>
</object>
<object pk="1" model="serializers.category">
<field type="CharField" name="name">Reference</field></object>
</django-objects>""" # NOQA
|
XmlSerializerTransactionTestCase
|
python
|
charliermarsh__ruff
|
crates/ruff_python_formatter/resources/test/fixtures/ruff/docstring.py
|
{
"start": 845,
"end": 1011
}
|
class ____():
# don't lose this class comment ...
"""Empty class.
But it has comments
""" # ... neither lose this class comment
|
CommentBeforeDocstring
|
python
|
ansible__ansible
|
test/lib/ansible_test/_internal/util.py
|
{
"start": 20347,
"end": 20809
}
|
class ____(WrappedThread, metaclass=abc.ABCMeta):
"""Thread to read stdout from a subprocess."""
def __init__(self, handle: t.IO[bytes], buffer: t.BinaryIO, name: str) -> None:
super().__init__(self._run, f'{self.__class__.__name__}: {name}')
self.handle = handle
self.buffer = buffer
self.lines: list[bytes] = []
@abc.abstractmethod
def _run(self) -> None:
"""Workload to run on a thread."""
|
ReaderThread
|
python
|
keras-team__keras
|
integration_tests/torch_workflow_test.py
|
{
"start": 310,
"end": 996
}
|
class ____(testing.TestCase):
def test_keras_layer_in_nn_module(self):
net = Net()
# Test using Keras layer in a nn.Module.
# Test forward pass
self.assertAllEqual(list(net(torch.empty(100, 10)).shape), [100, 1])
# Test KerasVariables are added as nn.Parameter.
self.assertLen(list(net.parameters()), 2)
# Test using KerasVariable as a torch tensor for torch ops.
kernel = net.fc1.kernel
transposed_kernel = torch.transpose(kernel, 0, 1)
self.assertIsInstance(kernel, KerasVariable)
self.assertIsInstance(
torch.mul(kernel, transposed_kernel), torch.Tensor
)
|
TorchWorkflowTest
|
python
|
getsentry__sentry
|
src/sentry/models/grouplink.py
|
{
"start": 570,
"end": 1080
}
|
class ____(BaseManager["GroupLink"]):
def get_group_issues(self, group: Group, external_issue_id: str | None = None) -> QuerySet:
kwargs = dict(
group=group,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
relationship=GroupLink.Relationship.references,
)
if external_issue_id is not None:
kwargs["linked_id"] = external_issue_id
return self.filter(**kwargs)
@region_silo_model
|
GroupLinkManager
|
python
|
yandexdataschool__Practical_RL
|
week06_policy_based/atari_wrappers.py
|
{
"start": 1359,
"end": 2591
}
|
class ____(Wrapper):
"""Makes fire action when reseting environment.
Some environments are fixed until the agent makes the fire action,
this wrapper makes this action so that the epsiode starts automatically.
"""
def __init__(self, env):
super().__init__(env)
action_meanings = env.unwrapped.get_action_meanings()
if len(action_meanings) < 3:
raise ValueError(
"env.unwrapped.get_action_meanings() must be of length >= 3"
f"but is of length {len(action_meanings)}"
)
if env.unwrapped.get_action_meanings()[1] != "FIRE":
raise ValueError(
"env.unwrapped.get_action_meanings() must have 'FIRE' "
f"under index 1, but is {action_meanings}"
)
def step(self, action):
return self.env.step(action)
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, terminated, truncated, _ = self.env.step(1)
if terminated or truncated:
self.env.reset(**kwargs)
obs, _, terminated, truncated, _ = self.env.step(2)
if terminated or truncated:
self.env.reset(**kwargs)
return obs, {}
|
FireReset
|
python
|
Pylons__pyramid
|
tests/test_urldispatch.py
|
{
"start": 25119,
"end": 25206
}
|
class ____:
def __init__(self, generator):
self.generate = generator
|
DummyRoute
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_table10.py
|
{
"start": 315,
"end": 2492
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("table10.xlsx")
self.ignore_files = [
"xl/calcChain.xml",
"[Content_Types].xml",
"xl/_rels/workbook.xml.rels",
]
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with tables."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
xformat = workbook.add_format({"num_format": 2})
worksheet.set_column("B:K", 10.288)
worksheet.write_string("A1", "Column1")
worksheet.write_string("B1", "Column2")
worksheet.write_string("C1", "Column3")
worksheet.write_string("D1", "Column4")
worksheet.write_string("E1", "Column5")
worksheet.write_string("F1", "Column6")
worksheet.write_string("G1", "Column7")
worksheet.write_string("H1", "Column8")
worksheet.write_string("I1", "Column9")
worksheet.write_string("J1", "Column10")
worksheet.write_string("K1", "Total")
data = [0, 0, 0, None, None, 0, 0, 0, 0, 0]
worksheet.write_row("B4", data)
worksheet.write_row("B5", data)
worksheet.add_table(
"B3:K6",
{
"total_row": 1,
"columns": [
{"total_string": "Total"},
{},
{"total_function": "average"},
{"total_function": "count"},
{"total_function": "count_nums"},
{"total_function": "max"},
{"total_function": "min"},
{"total_function": "sum"},
{"total_function": "stdDev"},
{
"total_function": "var",
"formula": "SUM(Table1[[#This Row],[Column1]:[Column3]])",
"format": xformat,
},
],
},
)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
walkccc__LeetCode
|
solutions/2441. Largest Positive Integer That Exists With Its Negative/2441.py
|
{
"start": 0,
"end": 220
}
|
class ____:
def findMaxK(self, nums: list[int]) -> int:
ans = -1
seen = set()
for num in nums:
if -num in seen:
ans = max(ans, abs(num))
else:
seen.add(num)
return ans
|
Solution
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/core/property/test_primitive.py
|
{
"start": 8155,
"end": 12580
}
|
class ____:
def test_eq(self) -> None:
assert bcpp.Int() is not int
assert (bcpp.Int() == bcpp.Int()) is True
assert (bcpp.Int(default=0) == bcpp.Int()) is True
assert (bcpp.Int(default=1) == bcpp.Int()) is False
assert (bcpp.Int() == bcpp.Int(default=1)) is False
assert (bcpp.Int(default=1) == bcpp.Int(default=1)) is True
assert (bcpp.Int(help="heplful") == bcpp.Int()) is False
assert (bcpp.Int() == bcpp.Int(help="heplful")) is False
assert (bcpp.Int(help="heplful") == bcpp.Int(help="heplful")) is True
def f(s: str) -> int:
return int(s)
assert (bcpp.Int().accepts(bcpp.String, f) == bcpp.Int()) is False
assert (bcpp.Int() == bcpp.Int().accepts(bcpp.String, f)) is False
assert (bcpp.Int().accepts(bcpp.String, f) == bcpp.Int().accepts(bcpp.String, f)) is True
def g(_o: HasProps, v: int) -> bool:
return v >= 0
assert (bcpp.Int().asserts(g, ">= 0") == bcpp.Int()) is False
assert (bcpp.Int() == bcpp.Int().asserts(g, ">= 0")) is False
assert (bcpp.Int().asserts(g, ">= 0") == bcpp.Int().asserts(g, ">= 0")) is True
def test_clone(self) -> None:
p0 = bcpp.Int()
c0 = p0()
assert c0.default == 0
assert c0.help is None
assert c0.alternatives == []
assert c0.assertions == []
assert p0 is not c0
assert p0 == c0
p1 = bcpp.Int(default=10, help="helpful")
c1 = p1()
assert c1.default == 10
assert c1.help == "helpful"
assert c1.alternatives == []
assert c1.assertions == []
assert p1 is not c1
assert p1 == c1
p2 = bcpp.Int()
c2 = p2(default=20, help="helpful")
assert c2.default == 20
assert c2.help == "helpful"
assert c2.alternatives == []
assert c2.assertions == []
assert p2 is not c2
assert p2 != c2
p3 = bcpp.Int(default=10, help="helpful")
c3 = p3(default=20, help="unhelpful")
assert c3.default == 20
assert c3.help == "unhelpful"
assert c3.alternatives == []
assert c3.assertions == []
assert p3 is not c3
assert p3 != c3
def test_valid(self) -> None:
prop = bcpp.Int()
assert prop.is_valid(0)
assert prop.is_valid(1)
assert prop.is_valid(np.int8(0))
assert prop.is_valid(np.int8(1))
assert prop.is_valid(np.int16(0))
assert prop.is_valid(np.int16(1))
assert prop.is_valid(np.int32(0))
assert prop.is_valid(np.int32(1))
assert prop.is_valid(np.int64(0))
assert prop.is_valid(np.int64(1))
assert prop.is_valid(np.uint8(0))
assert prop.is_valid(np.uint8(1))
assert prop.is_valid(np.uint16(0))
assert prop.is_valid(np.uint16(1))
assert prop.is_valid(np.uint32(0))
assert prop.is_valid(np.uint32(1))
assert prop.is_valid(np.uint64(0))
assert prop.is_valid(np.uint64(1))
def test_invalid(self) -> None:
prop = bcpp.Int()
assert not prop.is_valid(None)
assert not prop.is_valid(False)
assert not prop.is_valid(True)
assert not prop.is_valid(0.0)
assert not prop.is_valid(1.0)
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid("")
assert not prop.is_valid(())
assert not prop.is_valid([])
assert not prop.is_valid({})
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
assert not prop.is_valid(np.bool_(False))
assert not prop.is_valid(np.bool_(True))
assert not prop.is_valid(np.float16(0))
assert not prop.is_valid(np.float16(1))
assert not prop.is_valid(np.float32(0))
assert not prop.is_valid(np.float32(1))
assert not prop.is_valid(np.float64(0))
assert not prop.is_valid(np.float64(1))
assert not prop.is_valid(np.complex64(1.0+1.0j))
assert not prop.is_valid(np.complex128(1.0+1.0j))
if hasattr(np, "complex256"):
assert not prop.is_valid(np.complex256(1.0+1.0j))
def test_has_ref(self) -> None:
prop = bcpp.Int()
assert not prop.has_ref
def test_str(self) -> None:
prop = bcpp.Int()
assert str(prop) == "Int"
|
Test_Int
|
python
|
Textualize__textual
|
src/textual/widgets/_tabbed_content.py
|
{
"start": 6939,
"end": 23958
}
|
class ____(Widget):
"""A container with associated tabs to toggle content visibility."""
ALLOW_MAXIMIZE = True
DEFAULT_CSS = """
TabbedContent {
height: auto;
&> ContentTabs {
dock: top;
}
}
"""
active: reactive[str] = reactive("", init=False)
"""The ID of the active tab, or empty string if none are active."""
class TabActivated(Message):
"""Posted when the active tab changes."""
ALLOW_SELECTOR_MATCH = {"pane"}
"""Additional message attributes that can be used with the [`on` decorator][textual.on]."""
def __init__(self, tabbed_content: TabbedContent, tab: ContentTab) -> None:
"""Initialize message.
Args:
tabbed_content: The TabbedContent widget.
tab: The Tab widget that was selected (contains the tab label).
"""
self.tabbed_content = tabbed_content
"""The `TabbedContent` widget that contains the tab activated."""
self.tab = tab
"""The `Tab` widget that was selected (contains the tab label)."""
self.pane = tabbed_content.get_pane(tab)
"""The `TabPane` widget that was activated by selecting the tab."""
super().__init__()
@property
def control(self) -> TabbedContent:
"""The `TabbedContent` widget that contains the tab activated.
This is an alias for [`TabActivated.tabbed_content`][textual.widgets.TabbedContent.TabActivated.tabbed_content]
and is used by the [`on`][textual.on] decorator.
"""
return self.tabbed_content
def __rich_repr__(self) -> Result:
yield self.tabbed_content
yield self.tab
yield self.pane
class Cleared(Message):
"""Posted when no tab pane is active.
This can happen if all tab panes are removed or if the currently active tab
pane is unset.
"""
def __init__(self, tabbed_content: TabbedContent) -> None:
"""Initialize message.
Args:
tabbed_content: The TabbedContent widget.
"""
self.tabbed_content = tabbed_content
"""The `TabbedContent` widget that contains the tab activated."""
super().__init__()
@property
def control(self) -> TabbedContent:
"""The `TabbedContent` widget that was cleared of all tab panes.
This is an alias for [`Cleared.tabbed_content`][textual.widgets.TabbedContent.Cleared.tabbed_content]
and is used by the [`on`][textual.on] decorator.
"""
return self.tabbed_content
def __init__(
self,
*titles: ContentType,
initial: str = "",
name: str | None = None,
id: str | None = None,
classes: str | None = None,
disabled: bool = False,
):
"""Initialize a TabbedContent widgets.
Args:
*titles: Positional argument will be used as title.
initial: The id of the initial tab, or empty string to select the first tab.
name: The name of the tabbed content.
id: The ID of the tabbed content in the DOM.
classes: The CSS classes of the tabbed content.
disabled: Whether the tabbed content is disabled or not.
"""
self.titles = [self.render_str(title) for title in titles]
self._tab_content: list[Widget] = []
self._initial = initial
self._tab_counter = 0
super().__init__(name=name, id=id, classes=classes, disabled=disabled)
@property
def active_pane(self) -> TabPane | None:
"""The currently active pane, or `None` if no pane is active."""
active = self.active
if not active:
return None
return self.get_pane(self.active)
@staticmethod
def _set_id(content: TabPane, new_id: int) -> TabPane:
"""Set an id on the content, if not already present.
Args:
content: a TabPane.
new_id: Numeric ID to make the pane ID from.
Returns:
The same TabPane.
"""
if content.id is None:
content.id = f"tab-{new_id}"
return content
def _generate_tab_id(self) -> int:
"""Auto generate a new tab id.
Returns:
An auto-incrementing integer.
"""
self._tab_counter += 1
return self._tab_counter
def compose(self) -> ComposeResult:
"""Compose the tabbed content."""
# Wrap content in a `TabPane` if required.
pane_content = [
self._set_id(
(
content
if isinstance(content, TabPane)
else TabPane(title or self.render_str(f"Tab {index}"), content)
),
self._generate_tab_id(),
)
for index, (title, content) in enumerate(
zip_longest(self.titles, self._tab_content), 1
)
]
# Get a tab for each pane
tabs = [
ContentTab(
content._title,
content.id or "",
disabled=content.disabled,
)
for content in pane_content
]
# Yield the tabs, and ensure they're linked to this TabbedContent.
# It's important to associate the Tabs with the TabbedContent, so that this
# TabbedContent can determine whether a message received from a Tabs instance
# has been sent from this Tabs, or from a Tabs that may exist as a descendant
# deeper in the DOM.
yield ContentTabs(*tabs, active=self._initial or None, tabbed_content=self)
# Yield the content switcher and panes
with ContentSwitcher(initial=self._initial or None):
yield from pane_content
def add_pane(
self,
pane: TabPane,
*,
before: TabPane | str | None = None,
after: TabPane | str | None = None,
) -> AwaitComplete:
"""Add a new pane to the tabbed content.
Args:
pane: The pane to add.
before: Optional pane or pane ID to add the pane before.
after: Optional pane or pane ID to add the pane after.
Returns:
An optionally awaitable object that waits for the pane to be added.
Raises:
Tabs.TabError: If there is a problem with the addition request.
Note:
Only one of `before` or `after` can be provided. If both are
provided an exception is raised.
"""
if isinstance(before, TabPane):
before = before.id
if isinstance(after, TabPane):
after = after.id
tabs = self.get_child_by_type(ContentTabs)
pane = self._set_id(pane, self._generate_tab_id())
assert pane.id is not None
pane.display = False
return AwaitComplete(
tabs.add_tab(
ContentTab(pane._title, pane.id),
before=before if before is None else ContentTab.add_prefix(before),
after=after if after is None else ContentTab.add_prefix(after),
),
self.get_child_by_type(ContentSwitcher).mount(pane),
)
def remove_pane(self, pane_id: str) -> AwaitComplete:
"""Remove a given pane from the tabbed content.
Args:
pane_id: The ID of the pane to remove.
Returns:
An optionally awaitable object that waits for the pane to be removed
and the Cleared message to be posted.
"""
removal_awaitables: list[Awaitable] = [
self.get_child_by_type(ContentTabs).remove_tab(
ContentTab.add_prefix(pane_id)
)
]
try:
removal_awaitables.append(
self.get_child_by_type(ContentSwitcher)
.get_child_by_id(pane_id)
.remove()
)
except NoMatches:
# It's possible that the content itself may have gone away via
# other means; so allow that to be a no-op.
pass
return AwaitComplete(*removal_awaitables)
def clear_panes(self) -> AwaitComplete:
"""Remove all the panes in the tabbed content.
Returns:
An optionally awaitable object which waits for all panes to be removed
and the Cleared message to be posted.
"""
await_clear = gather(
self.get_child_by_type(ContentTabs).clear(),
self.get_child_by_type(ContentSwitcher).remove_children(),
)
async def _clear_content() -> None:
await await_clear
return AwaitComplete(_clear_content())
def compose_add_child(self, widget: Widget) -> None:
"""When using the context manager compose syntax, we want to attach nodes to the switcher.
Args:
widget: A Widget to add.
"""
self._tab_content.append(widget)
def _on_tabs_tab_activated(self, event: Tabs.TabActivated) -> None:
"""User clicked a tab."""
if self._is_associated_tabs(event.tabs):
# The message is relevant, so consume it and update state accordingly.
event.stop()
assert event.tab.id is not None
switcher = self.get_child_by_type(ContentSwitcher)
switcher.current = ContentTab.sans_prefix(event.tab.id)
with self.prevent(self.TabActivated):
# We prevent TabbedContent.TabActivated because it is also
# posted from the watcher for active, we're also about to
# post it below too, which is valid as here we're reacting
# to what the Tabs are doing. This ensures we don't get
# doubled-up messages.
self.active = ContentTab.sans_prefix(event.tab.id)
self.post_message(
TabbedContent.TabActivated(
tabbed_content=self,
tab=self.get_child_by_type(ContentTabs).get_content_tab(
self.active
),
)
)
def _on_tab_pane_focused(self, event: TabPane.Focused) -> None:
"""One of the panes contains a widget that was programmatically focused."""
event.stop()
if event.tab_pane.id is not None:
self.active = event.tab_pane.id
def _on_tabs_cleared(self, event: Tabs.Cleared) -> None:
"""Called when there are no active tabs. The tabs may have been cleared,
or they may all be hidden."""
if self._is_associated_tabs(event.tabs):
event.stop()
self.get_child_by_type(ContentSwitcher).current = None
self.active = ""
def _is_associated_tabs(self, tabs: Tabs) -> bool:
"""Determine whether a tab is associated with this TabbedContent or not.
A tab is "associated" with a `TabbedContent`, if it's one of the tabs that can
be used to control it. These have a special type: `ContentTab`, and are linked
back to this `TabbedContent` instance via a `tabbed_content` attribute.
Args:
tabs: The Tabs instance to check.
Returns:
True if the tab is associated with this `TabbedContent`.
"""
return isinstance(tabs, ContentTabs) and tabs.tabbed_content is self
def _watch_active(self, active: str) -> None:
"""Switch tabs when the active attributes changes."""
with self.prevent(Tabs.TabActivated, Tabs.Cleared):
self.get_child_by_type(ContentTabs).active = ContentTab.add_prefix(active)
self.get_child_by_type(ContentSwitcher).current = active
if active:
self.post_message(
TabbedContent.TabActivated(
tabbed_content=self,
tab=self.get_child_by_type(ContentTabs).get_content_tab(active),
)
)
else:
self.post_message(
TabbedContent.Cleared(tabbed_content=self).set_sender(self)
)
@property
def tab_count(self) -> int:
"""Total number of tabs."""
return self.get_child_by_type(ContentTabs).tab_count
def get_tab(self, pane_id: str | TabPane) -> Tab:
"""Get the `Tab` associated with the given ID or `TabPane`.
Args:
pane_id: The ID of the pane, or the pane itself.
Returns:
The Tab associated with the ID.
Raises:
ValueError: Raised if no ID was available.
"""
if target_id := (pane_id if isinstance(pane_id, str) else pane_id.id):
return self.get_child_by_type(ContentTabs).get_content_tab(target_id)
raise ValueError(
"'pane_id' must be a non-empty string or a TabPane with an id."
)
def get_pane(self, pane_id: str | ContentTab) -> TabPane:
"""Get the `TabPane` associated with the given ID or tab.
Args:
pane_id: The ID of the pane to get, or the Tab it is associated with.
Returns:
The `TabPane` associated with the ID or the given tab.
Raises:
ValueError: Raised if no ID was available.
"""
target_id: str | None = None
if isinstance(pane_id, ContentTab):
target_id = (
pane_id.id if pane_id.id is None else ContentTab.sans_prefix(pane_id.id)
)
else:
target_id = pane_id
if target_id:
pane = self.get_child_by_type(ContentSwitcher).get_child_by_id(target_id)
assert isinstance(pane, TabPane)
return pane
raise ValueError(
"'pane_id' must be a non-empty string or a ContentTab with an id."
)
def _on_tabs_tab_disabled(self, event: Tabs.TabDisabled) -> None:
"""Disable the corresponding tab pane."""
if event.tabs.parent is not self:
return
event.stop()
tab_id = event.tab.id or ""
try:
with self.prevent(TabPane.Disabled):
self.get_child_by_type(ContentSwitcher).get_child_by_id(
ContentTab.sans_prefix(tab_id), expect_type=TabPane
).disabled = True
except NoMatches:
return
def _on_tab_pane_disabled(self, event: TabPane.Disabled) -> None:
"""Disable the corresponding tab."""
event.stop()
try:
with self.prevent(Tab.Disabled):
self.get_tab(event.tab_pane).disabled = True
except NoMatches:
return
def _on_tabs_tab_enabled(self, event: Tabs.TabEnabled) -> None:
"""Enable the corresponding tab pane."""
if event.tabs.parent is not self:
return
event.stop()
tab_id = event.tab.id or ""
try:
with self.prevent(TabPane.Enabled):
self.get_child_by_type(ContentSwitcher).get_child_by_id(
ContentTab.sans_prefix(tab_id), expect_type=TabPane
).disabled = False
except NoMatches:
return
def _on_tab_pane_enabled(self, event: TabPane.Enabled) -> None:
"""Enable the corresponding tab."""
event.stop()
try:
with self.prevent(Tab.Disabled):
self.get_tab(event.tab_pane).disabled = False
except NoMatches:
return
def disable_tab(self, tab_id: str) -> None:
"""Disables the tab with the given ID.
Args:
tab_id: The ID of the [`TabPane`][textual.widgets.TabPane] to disable.
Raises:
Tabs.TabError: If there are any issues with the request.
"""
self.get_child_by_type(ContentTabs).disable(tab_id)
def enable_tab(self, tab_id: str) -> None:
"""Enables the tab with the given ID.
Args:
tab_id: The ID of the [`TabPane`][textual.widgets.TabPane] to enable.
Raises:
Tabs.TabError: If there are any issues with the request.
"""
self.get_child_by_type(ContentTabs).enable(tab_id)
def hide_tab(self, tab_id: str) -> None:
"""Hides the tab with the given ID.
Args:
tab_id: The ID of the [`TabPane`][textual.widgets.TabPane] to hide.
Raises:
Tabs.TabError: If there are any issues with the request.
"""
self.get_child_by_type(ContentTabs).hide(tab_id)
def show_tab(self, tab_id: str) -> None:
"""Shows the tab with the given ID.
Args:
tab_id: The ID of the [`TabPane`][textual.widgets.TabPane] to show.
Raises:
Tabs.TabError: If there are any issues with the request.
"""
self.get_child_by_type(ContentTabs).show(tab_id)
|
TabbedContent
|
python
|
sqlalchemy__sqlalchemy
|
test/dialect/oracle/test_dialect.py
|
{
"start": 30562,
"end": 33353
}
|
class ____:
@property
def name(self):
raise NotImplementedError
@property
def dbapi(self):
raise NotImplementedError
@property
def dialect_cls(self):
raise NotImplementedError
def test_cx_oracle_service_name(self):
url_string = f"oracle+{self.name}://scott:tiger@host/?service_name=hr"
eng = create_engine(url_string, _initialize=False)
cargs, cparams = eng.dialect.create_connect_args(eng.url)
assert "SERVICE_NAME=hr" in cparams["dsn"]
assert "SID=hr" not in cparams["dsn"]
def test_cx_oracle_service_name_bad(self):
url_string = (
f"oracle+{self.name}://scott:tiger@host/hr1?service_name=hr2"
)
assert_raises(
exc.InvalidRequestError,
create_engine,
url_string,
_initialize=False,
)
def _test_db_opt(self, url_string, key, value):
url_obj = url.make_url(url_string)
dialect = self.dialect_cls(dbapi=self.dbapi)
arg, kw = dialect.create_connect_args(url_obj)
eq_(kw[key], value)
def _test_db_opt_unpresent(self, url_string, key):
url_obj = url.make_url(url_string)
dialect = self.dialect_cls(dbapi=self.dbapi)
arg, kw = dialect.create_connect_args(url_obj)
assert key not in kw
def test_mode(self):
self._test_db_opt(
f"oracle+{self.name}://scott:tiger@host/?mode=sYsDBA",
"mode",
self.dbapi.SYSDBA,
)
self._test_db_opt(
f"oracle+{self.name}://scott:tiger@host/?mode=SYSOPER",
"mode",
self.dbapi.SYSOPER,
)
def test_int_mode(self):
self._test_db_opt(
f"oracle+{self.name}://scott:tiger@host/?mode=32767", "mode", 32767
)
@testing.requires.cxoracle6_or_greater
def test_purity(self):
self._test_db_opt(
f"oracle+{self.name}://scott:tiger@host/?purity=attr_purity_new",
"purity",
self.dbapi.ATTR_PURITY_NEW,
)
def test_encoding(self):
self._test_db_opt(
f"oracle+{self.name}://scott:tiger@host/"
"?encoding=AMERICAN_AMERICA.UTF8",
"encoding",
"AMERICAN_AMERICA.UTF8",
)
def test_threaded(self):
self._test_db_opt(
f"oracle+{self.name}://scott:tiger@host/?threaded=true",
"threaded",
True,
)
self._test_db_opt_unpresent(
f"oracle+{self.name}://scott:tiger@host/", "threaded"
)
def test_events(self):
self._test_db_opt(
f"oracle+{self.name}://scott:tiger@host/?events=true",
"events",
True,
)
|
BaseConnectArgsTest
|
python
|
realpython__materials
|
python-class/square.py
|
{
"start": 0,
"end": 380
}
|
class ____:
def __init__(self, side):
self.side = side
@property
def side(self):
return self._side
@side.setter
def side(self, value):
if not isinstance(value, int | float) or value <= 0:
raise ValueError("positive number expected")
self._side = value
def calculate_area(self):
return self._side**2
|
Square
|
python
|
pypa__pipenv
|
pipenv/installers.py
|
{
"start": 7167,
"end": 8161
}
|
class ____(Installer):
def _find_installer(self):
return self._find_python_installer_by_name_and_env("asdf", "ASDF_DIR")
def iter_installable_versions(self):
"""Iterate through CPython versions available for asdf to install."""
for name in self._run("list-all", "python").stdout.splitlines():
try:
version = Version.parse(name.strip())
except ValueError:
continue
yield version
def install(self, version):
"""Install the given version with asdf.
The version must be a ``Version`` instance representing a version
found in asdf.
A ValueError is raised if the given version does not have a match in
asdf. A InstallerError is raised if the asdf command fails.
"""
c = self._run(
"install",
"python",
str(version),
timeout=self.project.s.PIPENV_INSTALL_TIMEOUT,
)
return c
|
Asdf
|
python
|
realpython__materials
|
duck-typing-python/vehicles_abc.py
|
{
"start": 38,
"end": 515
}
|
class ____(ABC):
def __init__(self, make, model, color):
self.make = make
self.model = model
self.color = color
@abstractmethod
def start(self):
raise NotImplementedError("This method must be implemented")
@abstractmethod
def stop(self):
raise NotImplementedError("This method must be implemented")
@abstractmethod
def drive(self):
raise NotImplementedError("This method must be implemented")
|
Vehicle
|
python
|
aio-libs__aiohttp
|
aiohttp/web_urldispatcher.py
|
{
"start": 22803,
"end": 24435
}
|
class ____(PrefixResource):
def __init__(self, prefix: str, app: "Application") -> None:
super().__init__(prefix)
self._app = app
self._add_prefix_to_resources(prefix)
def add_prefix(self, prefix: str) -> None:
super().add_prefix(prefix)
self._add_prefix_to_resources(prefix)
def _add_prefix_to_resources(self, prefix: str) -> None:
router = self._app.router
for resource in router.resources():
# Since the canonical path of a resource is about
# to change, we need to unindex it and then reindex
router.unindex_resource(resource)
resource.add_prefix(prefix)
router.index_resource(resource)
def url_for(self, *args: str, **kwargs: str) -> URL:
raise RuntimeError(".url_for() is not supported by sub-application root")
def get_info(self) -> _InfoDict:
return {"app": self._app, "prefix": self._prefix}
async def resolve(self, request: Request) -> _Resolve:
match_info = await self._app.router.resolve(request)
match_info.add_app(self._app)
if isinstance(match_info.http_exception, HTTPMethodNotAllowed):
methods = match_info.http_exception.allowed_methods
else:
methods = set()
return match_info, methods
def __len__(self) -> int:
return len(self._app.router.routes())
def __iter__(self) -> Iterator[AbstractRoute]:
return iter(self._app.router.routes())
def __repr__(self) -> str:
return f"<PrefixedSubAppResource {self._prefix} -> {self._app!r}>"
|
PrefixedSubAppResource
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/dep_diamond_patch_mid2/package.py
|
{
"start": 217,
"end": 807
}
|
class ____(Package):
r"""Package that requires a patch on a dependency
W
/ \
X Y
\ /
Z
This is package Y
"""
homepage = "http://www.example.com"
url = "http://www.example.com/patch-a-dependency-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
# single patch file in repo
depends_on(
"patch",
patches=[
patch(
"http://example.com/urlpatch.patch",
sha256="mid21234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234",
)
],
)
|
DepDiamondPatchMid2
|
python
|
wandb__wandb
|
wandb/sdk/backend/backend.py
|
{
"start": 368,
"end": 1327
}
|
class ____:
interface: InterfaceBase | None
_settings: Settings
_done: bool
_service: service_connection.ServiceConnection | None
def __init__(
self,
settings: Settings,
service: service_connection.ServiceConnection | None = None,
) -> None:
self._done = False
self.interface = None
self._settings = settings
self._service = service
def ensure_launched(self) -> None:
"""Launch backend worker if not running."""
assert self._settings.run_id
assert self._service
self.interface = self._service.make_interface(
stream_id=self._settings.run_id,
)
def server_status(self) -> None:
"""Report server status."""
def cleanup(self) -> None:
# TODO: make _done atomic
if self._done:
return
self._done = True
if self.interface:
self.interface.join()
|
Backend
|
python
|
pikepdf__pikepdf
|
src/pikepdf/canvas.py
|
{
"start": 1659,
"end": 2607
}
|
class ____(ABC):
"""Base class for fonts."""
@abstractmethod
def text_width(
self, text: str | bytes, fontsize: float | int | Decimal
) -> float | int | Decimal:
"""Estimate the width of a text string when rendered with the given font."""
@abstractmethod
def register(self, pdf: Pdf) -> Dictionary:
"""Register the font.
Create several data structures in the Pdf to describe the font.
After registering the font, the returned object should be added to the
/Resources dictionary of any page or Form XObject that uses the font. For
example one might write:
```python
page.Resources.Font[Name.Arial] = font.register(pdf)
```
The same object can be used for multiple pages or Form XObjects, since it is
an indirect object.
Returns a Dictionary suitable for insertion into a /Resources /Font dictionary.
"""
|
Font
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.