language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
run-llama__llama_index
|
llama-index-integrations/embeddings/llama-index-embeddings-mixedbreadai/llama_index/embeddings/mixedbreadai/base.py
|
{
"start": 422,
"end": 8042
}
|
class ____(BaseEmbedding):
"""
Class to get embeddings using the mixedbread ai embedding API with models such as 'mixedbread-ai/mxbai-embed-large-v1'.
Args:
api_key (Optional[str]): mixedbread ai API key. Defaults to None.
model_name (str): Model for embedding. Defaults to "mixedbread-ai/mxbai-embed-large-v1".
encoding_format (EncodingFormat): Encoding format for embeddings. Defaults to "float".
normalized (bool): Whether to normalize the embeddings. Defaults to True.
dimensions (Optional[int]): Number of dimensions for embeddings. Only applicable for models with matryoshka support.
prompt (Optional[str]): An optional prompt to provide context to the model.
embed_batch_size (Optional[int]): The batch size for embedding calls. Defaults to 128.
callback_manager (Optional[CallbackManager]): Manager for handling callbacks.
timeout (Optional[float]): Timeout for API calls.
max_retries (Optional[int]): Maximum number of retries for API calls.
httpx_client (Optional[httpx.Client]): Custom HTTPX client.
httpx_async_client (Optional[httpx.AsyncClient]): Custom asynchronous HTTPX client.
"""
api_key: str = Field(description="The mixedbread ai API key.", min_length=1)
model_name: str = Field(
default="mixedbread-ai/mxbai-embed-large-v1",
description="Model to use for embeddings.",
min_length=1,
)
encoding_format: EncodingFormat = Field(
default="float", description="Encoding format for the embeddings."
)
normalized: bool = Field(
default=True, description="Whether to normalize the embeddings."
)
dimensions: Optional[int] = Field(
default=None,
description="Number of dimensions for embeddings. Only applicable for models with matryoshka support.",
gt=0,
)
prompt: Optional[str] = Field(
default=None,
description="An optional prompt to provide context to the model.",
min_length=1,
)
embed_batch_size: int = Field(
default=128, description="The batch size for embedding calls.", gt=0, le=256
)
_client: Mixedbread = PrivateAttr()
_async_client: AsyncMixedbread = PrivateAttr()
def __init__(
self,
api_key: Optional[str] = None,
model_name: str = "mixedbread-ai/mxbai-embed-large-v1",
encoding_format: EncodingFormat = "float",
normalized: bool = True,
dimensions: Optional[int] = None,
prompt: Optional[str] = None,
embed_batch_size: Optional[int] = None,
callback_manager: Optional[CallbackManager] = None,
timeout: Optional[float] = None,
max_retries: Optional[int] = None,
httpx_client: Optional[httpx.Client] = None,
httpx_async_client: Optional[httpx.AsyncClient] = None,
**kwargs: Any,
):
if embed_batch_size is None:
embed_batch_size = 128 # Default batch size for mixedbread ai
try:
api_key = api_key or os.environ["MXBAI_API_KEY"]
except KeyError:
raise ValueError(
"Must pass in mixedbread ai API key or "
"specify via MXBAI_API_KEY environment variable "
)
super().__init__(
api_key=api_key,
model_name=model_name,
encoding_format=encoding_format,
normalized=normalized,
dimensions=dimensions,
prompt=prompt,
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
**kwargs,
)
self._client = Mixedbread(
api_key=api_key,
timeout=timeout,
http_client=httpx_client,
max_retries=max_retries if max_retries is not None else DEFAULT_MAX_RETRIES,
)
self._async_client = AsyncMixedbread(
api_key=api_key,
timeout=timeout,
http_client=httpx_async_client,
max_retries=max_retries if max_retries is not None else DEFAULT_MAX_RETRIES,
)
@classmethod
def class_name(cls) -> str:
return "MixedbreadAIEmbedding"
def _get_embedding(self, texts: List[str]) -> List[List[float]]:
"""
Get embeddings for a list of texts using the mixedbread ai API.
Args:
texts (List[str]): List of texts to embed.
Returns:
List[List[float]]: List of embeddings.
"""
response = self._client.embed(
model=self.model_name,
input=texts,
encoding_format=self.encoding_format,
normalized=self.normalized,
dimensions=self.dimensions,
prompt=self.prompt,
)
return [item.embedding for item in response.data]
async def _aget_embedding(self, texts: List[str]) -> List[List[float]]:
"""
Asynchronously get embeddings for a list of texts using the mixedbread ai API.
Args:
texts (List[str]): List of texts to embed.
Returns:
List[List[float]]: List of embeddings.
"""
response = await self._async_client.embed(
model=self.model_name,
input=texts,
encoding_format=self.encoding_format,
normalized=self.normalized,
dimensions=self.dimensions,
prompt=self.prompt,
)
return [item.embedding for item in response.data]
def _get_query_embedding(self, query: str) -> List[float]:
"""
Get embedding for a query using the mixedbread ai API.
Args:
query (str): Query text.
Returns:
List[float]: Embedding for the query.
"""
return self._get_embedding([query])[0]
async def _aget_query_embedding(self, query: str) -> List[float]:
"""
Asynchronously get embedding for a query using the mixedbread ai API.
Args:
query (str): Query text.
Returns:
List[float]: Embedding for the query.
"""
r = await self._aget_embedding([query])
return r[0]
def _get_text_embedding(self, text: str) -> List[float]:
"""
Get embedding for a text using the mixedbread ai API.
Args:
text (str): Text to embed.
Returns:
List[float]: Embedding for the text.
"""
return self._get_embedding([text])[0]
async def _aget_text_embedding(self, text: str) -> List[float]:
"""
Asynchronously get embedding for a text using the mixedbread ai API.
Args:
text (str): Text to embed.
Returns:
List[float]: Embedding for the text.
"""
r = await self._aget_embedding([text])
return r[0]
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""
Get embeddings for multiple texts using the mixedbread ai API.
Args:
texts (List[str]): List of texts to embed.
Returns:
List[List[float]]: List of embeddings.
"""
return self._get_embedding(texts)
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""
Asynchronously get embeddings for multiple texts using the mixedbread ai API.
Args:
texts (List[str]): List of texts to embed.
Returns:
List[List[float]]: List of embeddings.
"""
return await self._aget_embedding(texts)
|
MixedbreadAIEmbedding
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_instance.py
|
{
"start": 10280,
"end": 11656
}
|
class ____(ConcurrencyTestSuite):
def test_default_concurrency(self, graphql_context):
# no limits
all_limits = fetch_all_concurrency_limits(graphql_context)
assert len(all_limits) == 0
# default limits are empty
limit = fetch_concurrency_limit(graphql_context, "foo")
assert limit is not None
assert limit["slotCount"] == 0
assert limit["limit"] == 1
assert limit["usingDefaultLimit"]
# set a limit
set_concurrency_limit(graphql_context, "foo", 0)
limit = fetch_concurrency_limit(graphql_context, "foo")
assert limit is not None
assert limit["slotCount"] == 0
assert limit["limit"] == 0
assert not limit["usingDefaultLimit"]
# instance settings
results = execute_dagster_graphql(graphql_context, INSTANCE_QUERY)
assert results.data == {
"instance": {
"runQueuingSupported": True,
"hasInfo": graphql_context.show_instance_config,
"useAutoMaterializeSensors": graphql_context.instance.auto_materialize_use_sensors,
"poolConfig": {
"poolGranularity": None,
"defaultPoolLimit": 1,
"opGranularityRunBuffer": None,
},
}
}
|
TestConcurrencyInstanceSettings
|
python
|
google__pytype
|
pytype/pretty_printer.py
|
{
"start": 341,
"end": 3474
}
|
class ____(pretty_printer_base.PrettyPrinterBase):
"""Pretty print types for errors."""
def print_generic_type(self, t) -> str:
convert = self.ctx.pytd_convert
generic = pytd_utils.MakeClassOrContainerType(
t.to_pytd_type_of_instance().base_type,
t.formal_type_parameters.keys(),
False,
)
with convert.set_output_mode(convert.OutputMode.DETAILED):
return self.print_pytd(generic)
def print_type_of_instance(self, t: types.BaseValue, instance=None) -> str:
"""Print abstract value t as a pytd type."""
assert isinstance(t, abstract.BaseValue)
convert = self.ctx.pytd_convert
if (
isinstance(t, (abstract.Unknown, abstract.Unsolvable, abstract.Class))
or t.is_late_annotation()
):
with convert.set_output_mode(convert.OutputMode.DETAILED):
return self.print_pytd(t.to_pytd_type_of_instance(instance=instance))
elif isinstance(t, abstract.Union):
return self.join_printed_types(
self.print_type_of_instance(o) for o in t.options
)
elif t.is_concrete:
typ = typing.cast(abstract.PythonConstant, t)
return re.sub(
r"(\\n|\s)+", " ", typ.str_of_constant(self.print_type_of_instance)
)
elif (
isinstance(t, (abstract.AnnotationClass, abstract.Singleton))
or t.cls == t
):
return t.name
else:
return f"<instance of {self.print_type_of_instance(t.cls, t)}>"
def print_type(self, t, literal=False) -> str:
convert = self.ctx.pytd_convert
if literal:
output_mode = convert.OutputMode.LITERAL
else:
output_mode = convert.OutputMode.DETAILED
with convert.set_output_mode(output_mode):
return self.print_pytd(t.to_pytd_type())
def print_function_def(self, fn: types.Function) -> str:
convert = self.ctx.pytd_convert
name = fn.name.rsplit(".", 1)[-1] # We want `def bar()` not `def Foo.bar()`
with convert.set_output_mode(convert.OutputMode.DETAILED):
pytd_def = convert.value_to_pytd_def(self.ctx.root_node, fn, name)
return pytd_utils.Print(pytd_def)
def print_var_type(self, var: cfg.Variable, node: cfg.CFGNode) -> str:
"""Print a pytype variable as a type."""
if not var.bindings:
return "nothing"
convert = self.ctx.pytd_convert
with convert.set_output_mode(convert.OutputMode.DETAILED):
typ = pytd_utils.JoinTypes(
b.data.to_pytd_type()
for b in abstract_utils.expand_type_parameter_instances(var.bindings)
if node.HasCombination([b])
)
return self.print_pytd(typ)
def show_variable(self, var: cfg.Variable) -> str:
"""Show variable as 'name: typ' or 'pyval: typ' if available."""
if not var.data:
return self.print_pytd(pytd.NothingType())
val = var.data[0]
name = self.ctx.vm.get_var_name(var)
typ = self.join_printed_types(self.print_type(t) for t in var.data)
if name:
return f"'{name}: {typ}'"
elif len(var.data) == 1 and hasattr(val, "pyval"):
name = self.show_constant(val)
return f"'{name}: {typ}'"
else:
return f"'{typ}'"
|
PrettyPrinter
|
python
|
getsentry__sentry
|
tests/sentry/feedback/endpoints/test_error_page_embed.py
|
{
"start": 7644,
"end": 11156
}
|
class ____(TestCase):
def setUp(self) -> None:
self.project = self.create_project()
self.project.update_option("sentry:origins", ["example.com"])
self.key = self.create_project_key(self.project)
self.event_id = uuid4().hex
self.path = "{}?eventId={}&dsn={}".format(
reverse("sentry-error-page-embed"),
quote(self.event_id),
quote(self.key.dsn_public),
)
self.environment = Environment.objects.create(
organization_id=self.project.organization_id,
name="production",
)
self.environment.add_project(self.project)
def make_event(self, **kwargs):
min_ago = before_now(minutes=1).isoformat()
result = {
"event_id": "a" * 32,
"message": "foo",
"timestamp": min_ago,
"level": logging.ERROR,
"logger": "default",
"tags": [],
}
result.update(kwargs)
return self.store_event(data=result, project_id=self.project.id, assert_no_errors=False)
def test_environment_gets_user_report(self) -> None:
self.make_event(environment=self.environment.name, event_id=self.event_id)
self.login_as(user=self.user)
response = self.client.post(
self.path,
{"name": "Jane Bloggs", "email": "jane@example.com", "comments": "This is an example!"},
HTTP_REFERER="http://example.com",
)
assert response.status_code == 200, response.content
assert UserReport.objects.get(event_id=self.event_id).environment_id == self.environment.id
@mock.patch("sentry.feedback.usecases.ingest.create_feedback.produce_occurrence_to_kafka")
def test_calls_feedback_shim_if_ff_enabled(
self, mock_produce_occurrence_to_kafka: mock.MagicMock
) -> None:
self.make_event(environment=self.environment.name, event_id=self.event_id)
self.client.post(
self.path,
{
"name": "Jane Bloggs",
"email": "jane@example.com",
"comments": "This is an example!",
},
HTTP_REFERER="http://example.com",
HTTP_ACCEPT="application/json",
)
assert len(mock_produce_occurrence_to_kafka.mock_calls) == 1
mock_event_data = mock_produce_occurrence_to_kafka.call_args_list[0][1]["event_data"]
assert mock_event_data["contexts"]["feedback"]["contact_email"] == "jane@example.com"
assert mock_event_data["contexts"]["feedback"]["message"] == "This is an example!"
assert mock_event_data["contexts"]["feedback"]["name"] == "Jane Bloggs"
assert mock_event_data["platform"] == "other"
assert mock_event_data["contexts"]["feedback"]["associated_event_id"] == self.event_id
assert mock_event_data["level"] == "error"
@mock.patch("sentry.feedback.usecases.ingest.create_feedback.produce_occurrence_to_kafka")
def test_does_not_call_feedback_shim_no_event_if_ff_enabled(
self, mock_produce_occurrence_to_kafka
):
self.client.post(
self.path,
{
"name": "Jane Bloggs",
"email": "jane@example.com",
"comments": "This is an example!",
},
HTTP_REFERER="http://example.com",
HTTP_ACCEPT="application/json",
)
assert len(mock_produce_occurrence_to_kafka.mock_calls) == 0
|
ErrorPageEmbedEnvironmentTest
|
python
|
doocs__leetcode
|
solution/0100-0199/0167.Two Sum II - Input Array Is Sorted/Solution.py
|
{
"start": 0,
"end": 306
}
|
class ____:
def twoSum(self, numbers: List[int], target: int) -> List[int]:
n = len(numbers)
for i in range(n - 1):
x = target - numbers[i]
j = bisect_left(numbers, x, lo=i + 1)
if j < n and numbers[j] == x:
return [i + 1, j + 1]
|
Solution
|
python
|
psf__black
|
src/black/__init__.py
|
{
"start": 2347,
"end": 53972
}
|
class ____(Enum):
NO = 0
YES = 1
DIFF = 2
CHECK = 3
COLOR_DIFF = 4
@classmethod
def from_configuration(
cls, *, check: bool, diff: bool, color: bool = False
) -> "WriteBack":
if check and not diff:
return cls.CHECK
if diff and color:
return cls.COLOR_DIFF
return cls.DIFF if diff else cls.YES
# Legacy name, left for integrations.
FileMode = Mode
def read_pyproject_toml(
ctx: click.Context, param: click.Parameter, value: str | None
) -> str | None:
"""Inject Black configuration from "pyproject.toml" into defaults in `ctx`.
Returns the path to a successfully found and read configuration file, None
otherwise.
"""
if not value:
value = find_pyproject_toml(
ctx.params.get("src", ()), ctx.params.get("stdin_filename", None)
)
if value is None:
return None
try:
config = parse_pyproject_toml(value)
except (OSError, ValueError) as e:
raise click.FileError(
filename=value, hint=f"Error reading configuration file: {e}"
) from None
if not config:
return None
else:
spellcheck_pyproject_toml_keys(ctx, list(config), value)
# Sanitize the values to be Click friendly. For more information please see:
# https://github.com/psf/black/issues/1458
# https://github.com/pallets/click/issues/1567
config = {
k: str(v) if not isinstance(v, (list, dict)) else v
for k, v in config.items()
}
target_version = config.get("target_version")
if target_version is not None and not isinstance(target_version, list):
raise click.BadOptionUsage(
"target-version", "Config key target-version must be a list"
)
exclude = config.get("exclude")
if exclude is not None and not isinstance(exclude, str):
raise click.BadOptionUsage("exclude", "Config key exclude must be a string")
extend_exclude = config.get("extend_exclude")
if extend_exclude is not None and not isinstance(extend_exclude, str):
raise click.BadOptionUsage(
"extend-exclude", "Config key extend-exclude must be a string"
)
line_ranges = config.get("line_ranges")
if line_ranges is not None:
raise click.BadOptionUsage(
"line-ranges", "Cannot use line-ranges in the pyproject.toml file."
)
default_map: dict[str, Any] = {}
if ctx.default_map:
default_map.update(ctx.default_map)
default_map.update(config)
ctx.default_map = default_map
return value
def spellcheck_pyproject_toml_keys(
ctx: click.Context, config_keys: list[str], config_file_path: str
) -> None:
invalid_keys: list[str] = []
available_config_options = {param.name for param in ctx.command.params}
invalid_keys = [key for key in config_keys if key not in available_config_options]
if invalid_keys:
keys_str = ", ".join(map(repr, invalid_keys))
out(
f"Invalid config keys detected: {keys_str} (in {config_file_path})",
fg="red",
)
def target_version_option_callback(
c: click.Context, p: click.Option | click.Parameter, v: tuple[str, ...]
) -> list[TargetVersion]:
"""Compute the target versions from a --target-version flag.
This is its own function because mypy couldn't infer the type correctly
when it was a lambda, causing mypyc trouble.
"""
return [TargetVersion[val.upper()] for val in v]
def enable_unstable_feature_callback(
c: click.Context, p: click.Option | click.Parameter, v: tuple[str, ...]
) -> list[Preview]:
"""Compute the features from an --enable-unstable-feature flag."""
return [Preview[val] for val in v]
def re_compile_maybe_verbose(regex: str) -> Pattern[str]:
"""Compile a regular expression string in `regex`.
If it contains newlines, use verbose mode.
"""
if "\n" in regex:
regex = "(?x)" + regex
compiled: Pattern[str] = re.compile(regex)
return compiled
def validate_regex(
ctx: click.Context,
param: click.Parameter,
value: str | None,
) -> Pattern[str] | None:
try:
return re_compile_maybe_verbose(value) if value is not None else None
except re.error as e:
raise click.BadParameter(f"Not a valid regular expression: {e}") from None
@click.command(
context_settings={"help_option_names": ["-h", "--help"]},
# While Click does set this field automatically using the docstring, mypyc
# (annoyingly) strips 'em so we need to set it here too.
help="The uncompromising code formatter.",
)
@click.option("-c", "--code", type=str, help="Format the code passed in as a string.")
@click.option(
"-l",
"--line-length",
type=int,
default=DEFAULT_LINE_LENGTH,
help="How many characters per line to allow.",
show_default=True,
)
@click.option(
"-t",
"--target-version",
type=click.Choice([v.name.lower() for v in TargetVersion]),
callback=target_version_option_callback,
multiple=True,
help=(
"Python versions that should be supported by Black's output. You should"
" include all versions that your code supports. By default, Black will infer"
" target versions from the project metadata in pyproject.toml. If this does"
" not yield conclusive results, Black will use per-file auto-detection."
),
)
@click.option(
"--pyi",
is_flag=True,
help=(
"Format all input files like typing stubs regardless of file extension. This"
" is useful when piping source on standard input."
),
)
@click.option(
"--ipynb",
is_flag=True,
help=(
"Format all input files like Jupyter Notebooks regardless of file extension."
" This is useful when piping source on standard input."
),
)
@click.option(
"--python-cell-magics",
multiple=True,
help=(
"When processing Jupyter Notebooks, add the given magic to the list"
f" of known python-magics ({', '.join(sorted(PYTHON_CELL_MAGICS))})."
" Useful for formatting cells with custom python magics."
),
default=[],
)
@click.option(
"-x",
"--skip-source-first-line",
is_flag=True,
help="Skip the first line of the source code.",
)
@click.option(
"-S",
"--skip-string-normalization",
is_flag=True,
help="Don't normalize string quotes or prefixes.",
)
@click.option(
"-C",
"--skip-magic-trailing-comma",
is_flag=True,
help="Don't use trailing commas as a reason to split lines.",
)
@click.option(
"--preview",
is_flag=True,
help=(
"Enable potentially disruptive style changes that may be added to Black's main"
" functionality in the next major release."
),
)
@click.option(
"--unstable",
is_flag=True,
help=(
"Enable potentially disruptive style changes that have known bugs or are not"
" currently expected to make it into the stable style Black's next major"
" release. Implies --preview."
),
)
@click.option(
"--enable-unstable-feature",
type=click.Choice([v.name for v in Preview]),
callback=enable_unstable_feature_callback,
multiple=True,
help=(
"Enable specific features included in the `--unstable` style. Requires"
" `--preview`. No compatibility guarantees are provided on the behavior"
" or existence of any unstable features."
),
)
@click.option(
"--check",
is_flag=True,
help=(
"Don't write the files back, just return the status. Return code 0 means"
" nothing would change. Return code 1 means some files would be reformatted."
" Return code 123 means there was an internal error."
),
)
@click.option(
"--diff",
is_flag=True,
help=(
"Don't write the files back, just output a diff to indicate what changes"
" Black would've made. They are printed to stdout so capturing them is simple."
),
)
@click.option(
"--color/--no-color",
is_flag=True,
help="Show (or do not show) colored diff. Only applies when --diff is given.",
)
@click.option(
"--line-ranges",
multiple=True,
metavar="START-END",
help=(
"When specified, Black will try its best to only format these lines. This"
" option can be specified multiple times, and a union of the lines will be"
" formatted. Each range must be specified as two integers connected by a `-`:"
" `<START>-<END>`. The `<START>` and `<END>` integer indices are 1-based and"
" inclusive on both ends."
),
default=(),
)
@click.option(
"--fast/--safe",
is_flag=True,
help=(
"By default, Black performs an AST safety check after formatting your code."
" The --fast flag turns off this check and the --safe flag explicitly enables"
" it. [default: --safe]"
),
)
@click.option(
"--required-version",
type=str,
help=(
"Require a specific version of Black to be running. This is useful for"
" ensuring that all contributors to your project are using the same"
" version, because different versions of Black may format code a little"
" differently. This option can be set in a configuration file for consistent"
" results across environments."
),
)
@click.option(
"--exclude",
type=str,
callback=validate_regex,
help=(
"A regular expression that matches files and directories that should be"
" excluded on recursive searches. An empty value means no paths are excluded."
" Use forward slashes for directories on all platforms (Windows, too)."
" By default, Black also ignores all paths listed in .gitignore. Changing this"
f" value will override all default exclusions. [default: {DEFAULT_EXCLUDES}]"
),
show_default=False,
)
@click.option(
"--extend-exclude",
type=str,
callback=validate_regex,
help=(
"Like --exclude, but adds additional files and directories on top of the"
" default values instead of overriding them."
),
)
@click.option(
"--force-exclude",
type=str,
callback=validate_regex,
help=(
"Like --exclude, but files and directories matching this regex will be excluded"
" even when they are passed explicitly as arguments. This is useful when"
" invoking Black programmatically on changed files, such as in a pre-commit"
" hook or editor plugin."
),
)
@click.option(
"--stdin-filename",
type=str,
is_eager=True,
help=(
"The name of the file when passing it through stdin. Useful to make sure Black"
" will respect the --force-exclude option on some editors that rely on using"
" stdin."
),
)
@click.option(
"--include",
type=str,
default=DEFAULT_INCLUDES,
callback=validate_regex,
help=(
"A regular expression that matches files and directories that should be"
" included on recursive searches. An empty value means all files are included"
" regardless of the name. Use forward slashes for directories on all platforms"
" (Windows, too). Overrides all exclusions, including from .gitignore and"
" command line options."
),
show_default=True,
)
@click.option(
"-W",
"--workers",
type=click.IntRange(min=1),
default=None,
help=(
"When Black formats multiple files, it may use a process pool to speed up"
" formatting. This option controls the number of parallel workers. This can"
" also be specified via the BLACK_NUM_WORKERS environment variable. Defaults"
" to the number of CPUs in the system."
),
)
@click.option(
"-q",
"--quiet",
is_flag=True,
help=(
"Stop emitting all non-critical output. Error messages will still be emitted"
" (which can silenced by 2>/dev/null)."
),
)
@click.option(
"-v",
"--verbose",
is_flag=True,
help=(
"Emit messages about files that were not changed or were ignored due to"
" exclusion patterns. If Black is using a configuration file, a message"
" detailing which one it is using will be emitted."
),
)
@click.version_option(
version=__version__,
message=(
f"%(prog)s, %(version)s (compiled: {'yes' if COMPILED else 'no'})\n"
f"Python ({platform.python_implementation()}) {platform.python_version()}"
),
)
@click.argument(
"src",
nargs=-1,
type=click.Path(
exists=True, file_okay=True, dir_okay=True, readable=True, allow_dash=True
),
is_eager=True,
metavar="SRC ...",
)
@click.option(
"--config",
type=click.Path(
exists=True,
file_okay=True,
dir_okay=False,
readable=True,
allow_dash=False,
path_type=str,
),
is_eager=True,
callback=read_pyproject_toml,
help="Read configuration options from a configuration file.",
)
@click.option(
"--no-cache",
is_flag=True,
help=(
"Skip reading and writing the cache, forcing Black to reformat all"
" included files."
),
)
@click.pass_context
def main(
ctx: click.Context,
code: str | None,
line_length: int,
target_version: list[TargetVersion],
check: bool,
diff: bool,
line_ranges: Sequence[str],
color: bool,
fast: bool,
pyi: bool,
ipynb: bool,
python_cell_magics: Sequence[str],
skip_source_first_line: bool,
skip_string_normalization: bool,
skip_magic_trailing_comma: bool,
preview: bool,
unstable: bool,
enable_unstable_feature: list[Preview],
quiet: bool,
verbose: bool,
required_version: str | None,
include: Pattern[str],
exclude: Pattern[str] | None,
extend_exclude: Pattern[str] | None,
force_exclude: Pattern[str] | None,
stdin_filename: str | None,
workers: int | None,
src: tuple[str, ...],
config: str | None,
no_cache: bool,
) -> None:
"""The uncompromising code formatter."""
ctx.ensure_object(dict)
assert sys.version_info >= (3, 10), "Black requires Python 3.10+"
if sys.version_info[:3] == (3, 12, 5):
out(
"Python 3.12.5 has a memory safety issue that can cause Black's "
"AST safety checks to fail. "
"Please upgrade to Python 3.12.6 or downgrade to Python 3.12.4"
)
ctx.exit(1)
if src and code is not None:
out(
main.get_usage(ctx)
+ "\n\n'SRC' and 'code' cannot be passed simultaneously."
)
ctx.exit(1)
if not src and code is None:
out(main.get_usage(ctx) + "\n\nOne of 'SRC' or 'code' is required.")
ctx.exit(1)
# It doesn't do anything if --unstable is also passed, so just allow it.
if enable_unstable_feature and not (preview or unstable):
out(
main.get_usage(ctx)
+ "\n\n'--enable-unstable-feature' requires '--preview'."
)
ctx.exit(1)
root, method = (
find_project_root(src, stdin_filename) if code is None else (None, None)
)
ctx.obj["root"] = root
if verbose:
if root:
out(
f"Identified `{root}` as project root containing a {method}.",
fg="blue",
)
if config:
config_source = ctx.get_parameter_source("config")
user_level_config = str(find_user_pyproject_toml())
if config == user_level_config:
out(
"Using configuration from user-level config at "
f"'{user_level_config}'.",
fg="blue",
)
elif config_source in (
ParameterSource.DEFAULT,
ParameterSource.DEFAULT_MAP,
):
out("Using configuration from project root.", fg="blue")
else:
out(f"Using configuration in '{config}'.", fg="blue")
if ctx.default_map:
for param, value in ctx.default_map.items():
out(f"{param}: {value}")
error_msg = "Oh no! 💥 💔 💥"
if (
required_version
and required_version != __version__
and required_version != __version__.split(".")[0]
):
err(
f"{error_msg} The required version `{required_version}` does not match"
f" the running version `{__version__}`!"
)
ctx.exit(1)
if ipynb and pyi:
err("Cannot pass both `pyi` and `ipynb` flags!")
ctx.exit(1)
write_back = WriteBack.from_configuration(check=check, diff=diff, color=color)
if target_version:
versions = set(target_version)
else:
# We'll autodetect later.
versions = set()
mode = Mode(
target_versions=versions,
line_length=line_length,
is_pyi=pyi,
is_ipynb=ipynb,
skip_source_first_line=skip_source_first_line,
string_normalization=not skip_string_normalization,
magic_trailing_comma=not skip_magic_trailing_comma,
preview=preview,
unstable=unstable,
python_cell_magics=set(python_cell_magics),
enabled_features=set(enable_unstable_feature),
)
lines: list[tuple[int, int]] = []
if line_ranges:
if ipynb:
err("Cannot use --line-ranges with ipynb files.")
ctx.exit(1)
try:
lines = parse_line_ranges(line_ranges)
except ValueError as e:
err(str(e))
ctx.exit(1)
if code is not None:
# Run in quiet mode by default with -c; the extra output isn't useful.
# You can still pass -v to get verbose output.
quiet = True
report = Report(check=check, diff=diff, quiet=quiet, verbose=verbose)
if code is not None:
reformat_code(
content=code,
fast=fast,
write_back=write_back,
mode=mode,
report=report,
lines=lines,
)
else:
assert root is not None # root is only None if code is not None
try:
sources = get_sources(
root=root,
src=src,
quiet=quiet,
verbose=verbose,
include=include,
exclude=exclude,
extend_exclude=extend_exclude,
force_exclude=force_exclude,
report=report,
stdin_filename=stdin_filename,
)
except GitWildMatchPatternError:
ctx.exit(1)
if not sources:
if verbose or not quiet:
out("No Python files are present to be formatted. Nothing to do 😴")
if "-" in src:
sys.stdout.write(sys.stdin.read())
ctx.exit(0)
if len(sources) == 1:
reformat_one(
src=sources.pop(),
fast=fast,
write_back=write_back,
mode=mode,
report=report,
lines=lines,
no_cache=no_cache,
)
else:
from black.concurrency import reformat_many
if lines:
err("Cannot use --line-ranges to format multiple files.")
ctx.exit(1)
reformat_many(
sources=sources,
fast=fast,
write_back=write_back,
mode=mode,
report=report,
workers=workers,
no_cache=no_cache,
)
if verbose or not quiet:
if code is None and (verbose or report.change_count or report.failure_count):
out()
out(error_msg if report.return_code else "All done! ✨ 🍰 ✨")
if code is None:
click.echo(str(report), err=True)
ctx.exit(report.return_code)
def get_sources(
*,
root: Path,
src: tuple[str, ...],
quiet: bool,
verbose: bool,
include: Pattern[str],
exclude: Pattern[str] | None,
extend_exclude: Pattern[str] | None,
force_exclude: Pattern[str] | None,
report: "Report",
stdin_filename: str | None,
) -> set[Path]:
"""Compute the set of files to be formatted."""
sources: set[Path] = set()
assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}"
using_default_exclude = exclude is None
exclude = re_compile_maybe_verbose(DEFAULT_EXCLUDES) if exclude is None else exclude
gitignore: dict[Path, PathSpec] | None = None
root_gitignore = get_gitignore(root)
for s in src:
if s == "-" and stdin_filename:
path = Path(stdin_filename)
if path_is_excluded(stdin_filename, force_exclude):
report.path_ignored(
path,
"--stdin-filename matches the --force-exclude regular expression",
)
continue
is_stdin = True
else:
path = Path(s)
is_stdin = False
# Compare the logic here to the logic in `gen_python_files`.
if is_stdin or path.is_file():
if resolves_outside_root_or_cannot_stat(path, root, report):
if verbose:
out(f'Skipping invalid source: "{path}"', fg="red")
continue
root_relative_path = best_effort_relative_path(path, root).as_posix()
root_relative_path = "/" + root_relative_path
# Hard-exclude any files that matches the `--force-exclude` regex.
if path_is_excluded(root_relative_path, force_exclude):
report.path_ignored(
path, "matches the --force-exclude regular expression"
)
continue
if is_stdin:
path = Path(f"{STDIN_PLACEHOLDER}{path}")
if path.suffix == ".ipynb" and not jupyter_dependencies_are_installed(
warn=verbose or not quiet
):
continue
if verbose:
out(f'Found input source: "{path}"', fg="blue")
sources.add(path)
elif path.is_dir():
path = root / (path.resolve().relative_to(root))
if verbose:
out(f'Found input source directory: "{path}"', fg="blue")
if using_default_exclude:
gitignore = {
root: root_gitignore,
path: get_gitignore(path),
}
sources.update(
gen_python_files(
path.iterdir(),
root,
include,
exclude,
extend_exclude,
force_exclude,
report,
gitignore,
verbose=verbose,
quiet=quiet,
)
)
elif s == "-":
if verbose:
out("Found input source stdin", fg="blue")
sources.add(path)
else:
err(f"invalid path: {s}")
return sources
def reformat_code(
content: str,
fast: bool,
write_back: WriteBack,
mode: Mode,
report: Report,
*,
lines: Collection[tuple[int, int]] = (),
) -> None:
"""
Reformat and print out `content` without spawning child processes.
Similar to `reformat_one`, but for string content.
`fast`, `write_back`, and `mode` options are passed to
:func:`format_file_in_place` or :func:`format_stdin_to_stdout`.
"""
path = Path("<string>")
try:
changed = Changed.NO
if format_stdin_to_stdout(
content=content, fast=fast, write_back=write_back, mode=mode, lines=lines
):
changed = Changed.YES
report.done(path, changed)
except Exception as exc:
if report.verbose:
traceback.print_exc()
report.failed(path, str(exc))
# diff-shades depends on being to monkeypatch this function to operate. I know it's
# not ideal, but this shouldn't cause any issues ... hopefully. ~ichard26
@mypyc_attr(patchable=True)
def reformat_one(
src: Path,
fast: bool,
write_back: WriteBack,
mode: Mode,
report: "Report",
*,
lines: Collection[tuple[int, int]] = (),
no_cache: bool = False,
) -> None:
"""Reformat a single file under `src` without spawning child processes.
`fast`, `write_back`, and `mode` options are passed to
:func:`format_file_in_place` or :func:`format_stdin_to_stdout`.
"""
try:
changed = Changed.NO
if str(src) == "-":
is_stdin = True
elif str(src).startswith(STDIN_PLACEHOLDER):
is_stdin = True
# Use the original name again in case we want to print something
# to the user
src = Path(str(src)[len(STDIN_PLACEHOLDER) :])
else:
is_stdin = False
if is_stdin:
if src.suffix == ".pyi":
mode = replace(mode, is_pyi=True)
elif src.suffix == ".ipynb":
mode = replace(mode, is_ipynb=True)
if format_stdin_to_stdout(
fast=fast, write_back=write_back, mode=mode, lines=lines
):
changed = Changed.YES
else:
cache = None if no_cache else Cache.read(mode)
if cache is not None and write_back not in (
WriteBack.DIFF,
WriteBack.COLOR_DIFF,
):
if not cache.is_changed(src):
changed = Changed.CACHED
if changed is not Changed.CACHED and format_file_in_place(
src, fast=fast, write_back=write_back, mode=mode, lines=lines
):
changed = Changed.YES
if cache is not None and (
(write_back is WriteBack.YES and changed is not Changed.CACHED)
or (write_back is WriteBack.CHECK and changed is Changed.NO)
):
cache.write([src])
report.done(src, changed)
except Exception as exc:
if report.verbose:
traceback.print_exc()
report.failed(src, str(exc))
def format_file_in_place(
src: Path,
fast: bool,
mode: Mode,
write_back: WriteBack = WriteBack.NO,
lock: Any = None, # multiprocessing.Manager().Lock() is some crazy proxy
*,
lines: Collection[tuple[int, int]] = (),
) -> bool:
"""Format file under `src` path. Return True if changed.
If `write_back` is DIFF, write a diff to stdout. If it is YES, write reformatted
code to the file.
`mode` and `fast` options are passed to :func:`format_file_contents`.
"""
if src.suffix == ".pyi":
mode = replace(mode, is_pyi=True)
elif src.suffix == ".ipynb":
mode = replace(mode, is_ipynb=True)
then = datetime.fromtimestamp(src.stat().st_mtime, timezone.utc)
header = b""
with open(src, "rb") as buf:
if mode.skip_source_first_line:
header = buf.readline()
src_contents, encoding, newline = decode_bytes(buf.read(), mode)
try:
dst_contents = format_file_contents(
src_contents, fast=fast, mode=mode, lines=lines
)
except NothingChanged:
return False
except JSONDecodeError:
raise ValueError(
f"File '{src}' cannot be parsed as valid Jupyter notebook."
) from None
src_contents = header.decode(encoding) + src_contents
dst_contents = header.decode(encoding) + dst_contents
if write_back == WriteBack.YES:
with open(src, "w", encoding=encoding, newline=newline) as f:
f.write(dst_contents)
elif write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF):
now = datetime.now(timezone.utc)
src_name = f"{src}\t{then}"
dst_name = f"{src}\t{now}"
if mode.is_ipynb:
diff_contents = ipynb_diff(src_contents, dst_contents, src_name, dst_name)
else:
diff_contents = diff(src_contents, dst_contents, src_name, dst_name)
if write_back == WriteBack.COLOR_DIFF:
diff_contents = color_diff(diff_contents)
with lock or nullcontext():
f = io.TextIOWrapper(
sys.stdout.buffer,
encoding=encoding,
newline=newline,
write_through=True,
)
f = wrap_stream_for_windows(f)
f.write(diff_contents)
f.detach()
return True
def format_stdin_to_stdout(
fast: bool,
*,
content: str | None = None,
write_back: WriteBack = WriteBack.NO,
mode: Mode,
lines: Collection[tuple[int, int]] = (),
) -> bool:
"""Format file on stdin. Return True if changed.
If content is None, it's read from sys.stdin.
If `write_back` is YES, write reformatted code back to stdout. If it is DIFF,
write a diff to stdout. The `mode` argument is passed to
:func:`format_file_contents`.
"""
then = datetime.now(timezone.utc)
if content is None:
src, encoding, newline = decode_bytes(sys.stdin.buffer.read(), mode)
elif Preview.normalize_cr_newlines in mode:
src, encoding, newline = content, "utf-8", "\n"
else:
src, encoding, newline = content, "utf-8", ""
dst = src
try:
dst = format_file_contents(src, fast=fast, mode=mode, lines=lines)
return True
except NothingChanged:
return False
finally:
f = io.TextIOWrapper(
sys.stdout.buffer, encoding=encoding, newline=newline, write_through=True
)
if write_back == WriteBack.YES:
# Make sure there's a newline after the content
if Preview.normalize_cr_newlines in mode:
if dst and dst[-1] != "\n" and dst[-1] != "\r":
dst += newline
else:
if dst and dst[-1] != "\n":
dst += "\n"
f.write(dst)
elif write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF):
now = datetime.now(timezone.utc)
src_name = f"STDIN\t{then}"
dst_name = f"STDOUT\t{now}"
d = diff(src, dst, src_name, dst_name)
if write_back == WriteBack.COLOR_DIFF:
d = color_diff(d)
f = wrap_stream_for_windows(f)
f.write(d)
f.detach()
def check_stability_and_equivalence(
src_contents: str,
dst_contents: str,
*,
mode: Mode,
lines: Collection[tuple[int, int]] = (),
) -> None:
"""Perform stability and equivalence checks.
Raise AssertionError if source and destination contents are not
equivalent, or if a second pass of the formatter would format the
content differently.
"""
assert_equivalent(src_contents, dst_contents)
assert_stable(src_contents, dst_contents, mode=mode, lines=lines)
def format_file_contents(
src_contents: str,
*,
fast: bool,
mode: Mode,
lines: Collection[tuple[int, int]] = (),
) -> FileContent:
"""Reformat contents of a file and return new contents.
If `fast` is False, additionally confirm that the reformatted code is
valid by calling :func:`assert_equivalent` and :func:`assert_stable` on it.
`mode` is passed to :func:`format_str`.
"""
if mode.is_ipynb:
dst_contents = format_ipynb_string(src_contents, fast=fast, mode=mode)
else:
dst_contents = format_str(src_contents, mode=mode, lines=lines)
if src_contents == dst_contents:
raise NothingChanged
if not fast and not mode.is_ipynb:
# Jupyter notebooks will already have been checked above.
check_stability_and_equivalence(
src_contents, dst_contents, mode=mode, lines=lines
)
return dst_contents
def format_cell(src: str, *, fast: bool, mode: Mode) -> str:
"""Format code in given cell of Jupyter notebook.
General idea is:
- if cell has trailing semicolon, remove it;
- if cell has IPython magics, mask them;
- format cell;
- reinstate IPython magics;
- reinstate trailing semicolon (if originally present);
- strip trailing newlines.
Cells with syntax errors will not be processed, as they
could potentially be automagics or multi-line magics, which
are currently not supported.
"""
validate_cell(src, mode)
src_without_trailing_semicolon, has_trailing_semicolon = remove_trailing_semicolon(
src
)
try:
masked_src, replacements = mask_cell(src_without_trailing_semicolon)
except SyntaxError:
raise NothingChanged from None
masked_dst = format_str(masked_src, mode=mode)
if not fast:
check_stability_and_equivalence(masked_src, masked_dst, mode=mode)
dst_without_trailing_semicolon = unmask_cell(masked_dst, replacements)
dst = put_trailing_semicolon_back(
dst_without_trailing_semicolon, has_trailing_semicolon
)
dst = dst.rstrip("\n")
if dst == src:
raise NothingChanged from None
return dst
def validate_metadata(nb: MutableMapping[str, Any]) -> None:
"""If notebook is marked as non-Python, don't format it.
All notebook metadata fields are optional, see
https://nbformat.readthedocs.io/en/latest/format_description.html. So
if a notebook has empty metadata, we will try to parse it anyway.
"""
language = nb.get("metadata", {}).get("language_info", {}).get("name", None)
if language is not None and language != "python":
raise NothingChanged from None
def format_ipynb_string(src_contents: str, *, fast: bool, mode: Mode) -> FileContent:
"""Format Jupyter notebook.
Operate cell-by-cell, only on code cells, only for Python notebooks.
If the ``.ipynb`` originally had a trailing newline, it'll be preserved.
"""
if not src_contents:
raise NothingChanged
trailing_newline = src_contents[-1] == "\n"
modified = False
nb = json.loads(src_contents)
validate_metadata(nb)
for cell in nb["cells"]:
if cell.get("cell_type", None) == "code":
try:
src = "".join(cell["source"])
dst = format_cell(src, fast=fast, mode=mode)
except NothingChanged:
pass
else:
cell["source"] = dst.splitlines(keepends=True)
modified = True
if modified:
dst_contents = json.dumps(nb, indent=1, ensure_ascii=False)
if trailing_newline:
dst_contents = dst_contents + "\n"
return dst_contents
else:
raise NothingChanged
def format_str(
src_contents: str, *, mode: Mode, lines: Collection[tuple[int, int]] = ()
) -> str:
"""Reformat a string and return new contents.
`mode` determines formatting options, such as how many characters per line are
allowed. Example:
>>> import black
>>> print(black.format_str("def f(arg:str='')->None:...", mode=black.Mode()))
def f(arg: str = "") -> None:
...
A more complex example:
>>> print(
... black.format_str(
... "def f(arg:str='')->None: hey",
... mode=black.Mode(
... target_versions={black.TargetVersion.PY36},
... line_length=10,
... string_normalization=False,
... is_pyi=False,
... ),
... ),
... )
def f(
arg: str = '',
) -> None:
hey
"""
if lines:
lines = sanitized_lines(lines, src_contents)
if not lines:
return src_contents # Nothing to format
dst_contents = _format_str_once(src_contents, mode=mode, lines=lines)
# Forced second pass to work around optional trailing commas (becoming
# forced trailing commas on pass 2) interacting differently with optional
# parentheses. Admittedly ugly.
if src_contents != dst_contents:
if lines:
lines = adjusted_lines(lines, src_contents, dst_contents)
return _format_str_once(dst_contents, mode=mode, lines=lines)
return dst_contents
def _format_str_once(
src_contents: str, *, mode: Mode, lines: Collection[tuple[int, int]] = ()
) -> str:
if Preview.normalize_cr_newlines in mode:
normalized_contents, _, newline_type = decode_bytes(
src_contents.encode("utf-8"), mode
)
src_node = lib2to3_parse(
normalized_contents.lstrip(), target_versions=mode.target_versions
)
else:
src_node = lib2to3_parse(src_contents.lstrip(), mode.target_versions)
dst_blocks: list[LinesBlock] = []
if mode.target_versions:
versions = mode.target_versions
else:
future_imports = get_future_imports(src_node)
versions = detect_target_versions(src_node, future_imports=future_imports)
line_generation_features = {
feature
for feature in {
Feature.PARENTHESIZED_CONTEXT_MANAGERS,
Feature.UNPARENTHESIZED_EXCEPT_TYPES,
Feature.T_STRINGS,
}
if supports_feature(versions, feature)
}
normalize_fmt_off(src_node, mode, lines)
if lines:
# This should be called after normalize_fmt_off.
convert_unchanged_lines(src_node, lines)
line_generator = LineGenerator(mode=mode, features=line_generation_features)
elt = EmptyLineTracker(mode=mode)
split_line_features = {
feature
for feature in {
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
}
if supports_feature(versions, feature)
}
block: LinesBlock | None = None
for current_line in line_generator.visit(src_node):
block = elt.maybe_empty_lines(current_line)
dst_blocks.append(block)
for line in transform_line(
current_line, mode=mode, features=split_line_features
):
block.content_lines.append(str(line))
if dst_blocks:
dst_blocks[-1].after = 0
dst_contents = []
for block in dst_blocks:
dst_contents.extend(block.all_lines())
if not dst_contents:
if Preview.normalize_cr_newlines in mode:
if "\n" in normalized_contents:
return newline_type
else:
# Use decode_bytes to retrieve the correct source newline (CRLF or LF),
# and check if normalized_content has more than one line
normalized_content, _, newline = decode_bytes(
src_contents.encode("utf-8"), mode
)
if "\n" in normalized_content:
return newline
return ""
if Preview.normalize_cr_newlines in mode:
return "".join(dst_contents).replace("\n", newline_type)
else:
return "".join(dst_contents)
def decode_bytes(src: bytes, mode: Mode) -> tuple[FileContent, Encoding, NewLine]:
"""Return a tuple of (decoded_contents, encoding, newline).
`newline` is either CRLF or LF but `decoded_contents` is decoded with
universal newlines (i.e. only contains LF).
"""
srcbuf = io.BytesIO(src)
encoding, lines = tokenize.detect_encoding(srcbuf.readline)
if not lines:
return "", encoding, "\n"
if Preview.normalize_cr_newlines in mode:
if lines[0][-2:] == b"\r\n":
if b"\r" in lines[0][:-2]:
newline = "\r"
else:
newline = "\r\n"
elif lines[0][-1:] == b"\n":
if b"\r" in lines[0][:-1]:
newline = "\r"
else:
newline = "\n"
else:
if b"\r" in lines[0]:
newline = "\r"
else:
newline = "\n"
else:
newline = "\r\n" if lines[0][-2:] == b"\r\n" else "\n"
srcbuf.seek(0)
with io.TextIOWrapper(srcbuf, encoding) as tiow:
return tiow.read(), encoding, newline
def get_features_used(
node: Node, *, future_imports: set[str] | None = None
) -> set[Feature]:
"""Return a set of (relatively) new Python features used in this file.
Currently looking for:
- f-strings;
- self-documenting expressions in f-strings (f"{x=}");
- underscores in numeric literals;
- trailing commas after * or ** in function signatures and calls;
- positional only arguments in function signatures and lambdas;
- assignment expression;
- relaxed decorator syntax;
- usage of __future__ flags (annotations);
- print / exec statements;
- parenthesized context managers;
- match statements;
- except* clause;
- variadic generics;
"""
features: set[Feature] = set()
if future_imports:
features |= {
FUTURE_FLAG_TO_FEATURE[future_import]
for future_import in future_imports
if future_import in FUTURE_FLAG_TO_FEATURE
}
for n in node.pre_order():
if n.type == token.FSTRING_START:
features.add(Feature.F_STRINGS)
elif n.type == token.TSTRING_START:
features.add(Feature.T_STRINGS)
elif (
n.type == token.RBRACE
and n.parent is not None
and any(child.type == token.EQUAL for child in n.parent.children)
):
features.add(Feature.DEBUG_F_STRINGS)
elif is_number_token(n):
if "_" in n.value:
features.add(Feature.NUMERIC_UNDERSCORES)
elif n.type == token.SLASH:
if n.parent and n.parent.type in {
syms.typedargslist,
syms.arglist,
syms.varargslist,
}:
features.add(Feature.POS_ONLY_ARGUMENTS)
elif n.type == token.COLONEQUAL:
features.add(Feature.ASSIGNMENT_EXPRESSIONS)
elif n.type == syms.decorator:
if len(n.children) > 1 and not is_simple_decorator_expression(
n.children[1]
):
features.add(Feature.RELAXED_DECORATORS)
elif (
n.type in {syms.typedargslist, syms.arglist}
and n.children
and n.children[-1].type == token.COMMA
):
if n.type == syms.typedargslist:
feature = Feature.TRAILING_COMMA_IN_DEF
else:
feature = Feature.TRAILING_COMMA_IN_CALL
for ch in n.children:
if ch.type in STARS:
features.add(feature)
if ch.type == syms.argument:
for argch in ch.children:
if argch.type in STARS:
features.add(feature)
elif (
n.type in {syms.return_stmt, syms.yield_expr}
and len(n.children) >= 2
and n.children[1].type == syms.testlist_star_expr
and any(child.type == syms.star_expr for child in n.children[1].children)
):
features.add(Feature.UNPACKING_ON_FLOW)
elif (
n.type == syms.annassign
and len(n.children) >= 4
and n.children[3].type == syms.testlist_star_expr
):
features.add(Feature.ANN_ASSIGN_EXTENDED_RHS)
elif (
n.type == syms.with_stmt
and len(n.children) > 2
and n.children[1].type == syms.atom
):
atom_children = n.children[1].children
if (
len(atom_children) == 3
and atom_children[0].type == token.LPAR
and _contains_asexpr(atom_children[1])
and atom_children[2].type == token.RPAR
):
features.add(Feature.PARENTHESIZED_CONTEXT_MANAGERS)
elif n.type == syms.match_stmt:
features.add(Feature.PATTERN_MATCHING)
elif n.type in {syms.subscriptlist, syms.trailer} and any(
child.type == syms.star_expr for child in n.children
):
features.add(Feature.VARIADIC_GENERICS)
elif (
n.type == syms.tname_star
and len(n.children) == 3
and n.children[2].type == syms.star_expr
):
features.add(Feature.VARIADIC_GENERICS)
elif n.type in (syms.type_stmt, syms.typeparams):
features.add(Feature.TYPE_PARAMS)
elif (
n.type in (syms.typevartuple, syms.paramspec, syms.typevar)
and n.children[-2].type == token.EQUAL
):
features.add(Feature.TYPE_PARAM_DEFAULTS)
elif (
n.type == syms.except_clause
and len(n.children) >= 2
and (
n.children[1].type == token.STAR or n.children[1].type == syms.testlist
)
):
is_star_except = n.children[1].type == token.STAR
if is_star_except:
features.add(Feature.EXCEPT_STAR)
# Presence of except* pushes as clause 1 index back
has_as_clause = (
len(n.children) >= is_star_except + 3
and n.children[is_star_except + 2].type == token.NAME
and n.children[is_star_except + 2].value == "as" # type: ignore
)
# If there's no 'as' clause and the except expression is a testlist.
if not has_as_clause and (
(is_star_except and n.children[2].type == syms.testlist)
or (not is_star_except and n.children[1].type == syms.testlist)
):
features.add(Feature.UNPARENTHESIZED_EXCEPT_TYPES)
return features
def _contains_asexpr(node: Node | Leaf) -> bool:
"""Return True if `node` contains an as-pattern."""
if node.type == syms.asexpr_test:
return True
elif node.type == syms.atom:
if (
len(node.children) == 3
and node.children[0].type == token.LPAR
and node.children[2].type == token.RPAR
):
return _contains_asexpr(node.children[1])
elif node.type == syms.testlist_gexp:
return any(_contains_asexpr(child) for child in node.children)
return False
def detect_target_versions(
node: Node, *, future_imports: set[str] | None = None
) -> set[TargetVersion]:
"""Detect the version to target based on the nodes used."""
features = get_features_used(node, future_imports=future_imports)
return {
version for version in TargetVersion if features <= VERSION_TO_FEATURES[version]
}
def get_future_imports(node: Node) -> set[str]:
"""Return a set of __future__ imports in the file."""
imports: set[str] = set()
def get_imports_from_children(children: list[LN]) -> Generator[str, None, None]:
for child in children:
if isinstance(child, Leaf):
if child.type == token.NAME:
yield child.value
elif child.type == syms.import_as_name:
orig_name = child.children[0]
assert isinstance(orig_name, Leaf), "Invalid syntax parsing imports"
assert orig_name.type == token.NAME, "Invalid syntax parsing imports"
yield orig_name.value
elif child.type == syms.import_as_names:
yield from get_imports_from_children(child.children)
else:
raise AssertionError("Invalid syntax parsing imports")
for child in node.children:
if child.type != syms.simple_stmt:
break
first_child = child.children[0]
if isinstance(first_child, Leaf):
# Continue looking if we see a docstring; otherwise stop.
if (
len(child.children) == 2
and first_child.type == token.STRING
and child.children[1].type == token.NEWLINE
):
continue
break
elif first_child.type == syms.import_from:
module_name = first_child.children[1]
if not isinstance(module_name, Leaf) or module_name.value != "__future__":
break
imports |= set(get_imports_from_children(first_child.children[3:]))
else:
break
return imports
def _black_info() -> str:
return (
f"Black {__version__} on "
f"Python ({platform.python_implementation()}) {platform.python_version()}"
)
def assert_equivalent(src: str, dst: str) -> None:
"""Raise AssertionError if `src` and `dst` aren't equivalent."""
try:
src_ast = parse_ast(src)
except Exception as exc:
raise ASTSafetyError(
"cannot use --safe with this file; failed to parse source file AST: "
f"{exc}\n"
"This could be caused by running Black with an older Python version "
"that does not support new syntax used in your source file."
) from exc
try:
dst_ast = parse_ast(dst)
except Exception as exc:
log = dump_to_file("".join(traceback.format_tb(exc.__traceback__)), dst)
raise ASTSafetyError(
f"INTERNAL ERROR: {_black_info()} produced invalid code: {exc}. "
"Please report a bug on https://github.com/psf/black/issues. "
f"This invalid output might be helpful: {log}"
) from None
src_ast_str = "\n".join(stringify_ast(src_ast))
dst_ast_str = "\n".join(stringify_ast(dst_ast))
if src_ast_str != dst_ast_str:
log = dump_to_file(diff(src_ast_str, dst_ast_str, "src", "dst"))
raise ASTSafetyError(
f"INTERNAL ERROR: {_black_info()} produced code that is not equivalent to"
" the source. Please report a bug on https://github.com/psf/black/issues."
f" This diff might be helpful: {log}"
) from None
def assert_stable(
src: str, dst: str, mode: Mode, *, lines: Collection[tuple[int, int]] = ()
) -> None:
"""Raise AssertionError if `dst` reformats differently the second time."""
if lines:
# Formatting specified lines requires `adjusted_lines` to map original lines
# to the formatted lines before re-formatting the previously formatted result.
# Due to less-ideal diff algorithm, some edge cases produce incorrect new line
# ranges. Hence for now, we skip the stable check.
# See https://github.com/psf/black/issues/4033 for context.
return
# We shouldn't call format_str() here, because that formats the string
# twice and may hide a bug where we bounce back and forth between two
# versions.
newdst = _format_str_once(dst, mode=mode, lines=lines)
if dst != newdst:
log = dump_to_file(
str(mode),
diff(src, dst, "source", "first pass"),
diff(dst, newdst, "first pass", "second pass"),
)
raise AssertionError(
f"INTERNAL ERROR: {_black_info()} produced different code on the second"
" pass of the formatter. Please report a bug on"
f" https://github.com/psf/black/issues. This diff might be helpful: {log}"
) from None
def patched_main() -> None:
# PyInstaller patches multiprocessing to need freeze_support() even in non-Windows
# environments so just assume we always need to call it if frozen.
if getattr(sys, "frozen", False):
from multiprocessing import freeze_support
freeze_support()
main()
if __name__ == "__main__":
patched_main()
|
WriteBack
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/ext/baked.py
|
{
"start": 10083,
"end": 17658
}
|
class ____:
"""Invokes a :class:`.BakedQuery` against a :class:`.Session`.
The :class:`_baked.Result` object is where the actual :class:`.query.Query`
object gets created, or retrieved from the cache,
against a target :class:`.Session`, and is then invoked for results.
"""
__slots__ = "bq", "session", "_params", "_post_criteria"
def __init__(self, bq, session):
self.bq = bq
self.session = session
self._params = {}
self._post_criteria = []
def params(self, *args, **kw):
"""Specify parameters to be replaced into the string SQL statement."""
if len(args) == 1:
kw.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary."
)
self._params.update(kw)
return self
def _using_post_criteria(self, fns):
if fns:
self._post_criteria.extend(fns)
return self
def with_post_criteria(self, fn):
"""Add a criteria function that will be applied post-cache.
This adds a function that will be run against the
:class:`_query.Query` object after it is retrieved from the
cache. This currently includes **only** the
:meth:`_query.Query.params` and :meth:`_query.Query.execution_options`
methods.
.. warning:: :meth:`_baked.Result.with_post_criteria`
functions are applied
to the :class:`_query.Query`
object **after** the query's SQL statement
object has been retrieved from the cache. Only
:meth:`_query.Query.params` and
:meth:`_query.Query.execution_options`
methods should be used.
"""
return self._using_post_criteria([fn])
def _as_query(self):
q = self.bq._as_query(self.session).params(self._params)
for fn in self._post_criteria:
q = fn(q)
return q
def __str__(self):
return str(self._as_query())
def __iter__(self):
return self._iter().__iter__()
def _iter(self):
bq = self.bq
if not self.session.enable_baked_queries or bq._spoiled:
return self._as_query()._iter()
query, statement = bq._bakery.get(
bq._effective_key(self.session), (None, None)
)
if query is None:
query, statement = bq._bake(self.session)
if self._params:
q = query.params(self._params)
else:
q = query
for fn in self._post_criteria:
q = fn(q)
params = q._params
execution_options = dict(q._execution_options)
execution_options.update(
{
"_sa_orm_load_options": q.load_options,
"compiled_cache": bq._bakery,
}
)
result = self.session.execute(
statement, params, execution_options=execution_options
)
if result._attributes.get("is_single_entity", False):
result = result.scalars()
if result._attributes.get("filtered", False):
result = result.unique()
return result
def count(self):
"""return the 'count'.
Equivalent to :meth:`_query.Query.count`.
Note this uses a subquery to ensure an accurate count regardless
of the structure of the original statement.
"""
col = func.count(literal_column("*"))
bq = self.bq.with_criteria(lambda q: q._legacy_from_self(col))
return bq.for_session(self.session).params(self._params).scalar()
def scalar(self):
"""Return the first element of the first result or None
if no rows present. If multiple rows are returned,
raises MultipleResultsFound.
Equivalent to :meth:`_query.Query.scalar`.
"""
try:
ret = self.one()
if not isinstance(ret, collections_abc.Sequence):
return ret
return ret[0]
except orm_exc.NoResultFound:
return None
def first(self):
"""Return the first row.
Equivalent to :meth:`_query.Query.first`.
"""
bq = self.bq.with_criteria(lambda q: q.slice(0, 1))
return (
bq.for_session(self.session)
.params(self._params)
._using_post_criteria(self._post_criteria)
._iter()
.first()
)
def one(self):
"""Return exactly one result or raise an exception.
Equivalent to :meth:`_query.Query.one`.
"""
return self._iter().one()
def one_or_none(self):
"""Return one or zero results, or raise an exception for multiple
rows.
Equivalent to :meth:`_query.Query.one_or_none`.
"""
return self._iter().one_or_none()
def all(self):
"""Return all rows.
Equivalent to :meth:`_query.Query.all`.
"""
return self._iter().all()
def get(self, ident):
"""Retrieve an object based on identity.
Equivalent to :meth:`_query.Query.get`.
"""
query = self.bq.steps[0](self.session)
return query._get_impl(ident, self._load_on_pk_identity)
def _load_on_pk_identity(self, session, query, primary_key_identity, **kw):
"""Load the given primary key identity from the database."""
mapper = query._raw_columns[0]._annotations["parententity"]
_get_clause, _get_params = mapper._get_clause
def setup(query):
_lcl_get_clause = _get_clause
q = query._clone()
q._get_condition()
q._order_by = None
# None present in ident - turn those comparisons
# into "IS NULL"
if None in primary_key_identity:
nones = {
_get_params[col].key
for col, value in zip(
mapper.primary_key, primary_key_identity
)
if value is None
}
_lcl_get_clause = sql_util.adapt_criterion_to_null(
_lcl_get_clause, nones
)
# TODO: can mapper._get_clause be pre-adapted?
q._where_criteria = (
sql_util._deep_annotate(_lcl_get_clause, {"_orm_adapt": True}),
)
for fn in self._post_criteria:
q = fn(q)
return q
# cache the query against a key that includes
# which positions in the primary key are NULL
# (remember, we can map to an OUTER JOIN)
bq = self.bq
# add the clause we got from mapper._get_clause to the cache
# key so that if a race causes multiple calls to _get_clause,
# we've cached on ours
bq = bq._clone()
bq._cache_key += (_get_clause,)
bq = bq.with_criteria(
setup, tuple(elem is None for elem in primary_key_identity)
)
params = {
_get_params[primary_key].key: id_val
for id_val, primary_key in zip(
primary_key_identity, mapper.primary_key
)
}
result = list(bq.for_session(self.session).params(**params))
l = len(result)
if l > 1:
raise orm_exc.MultipleResultsFound()
elif l:
return result[0]
else:
return None
bakery = BakedQuery.bakery
|
Result
|
python
|
apache__airflow
|
providers/apache/hive/src/airflow/providers/apache/hive/transfers/hive_to_mysql.py
|
{
"start": 1301,
"end": 5301
}
|
class ____(BaseOperator):
"""
Moves data from Hive to MySQL.
Note that for now the data is loaded into memory before being pushed
to MySQL, so this operator should be used for smallish amount of data.
:param sql: SQL query to execute against Hive server. (templated)
:param mysql_table: target MySQL table, use dot notation to target a
specific database. (templated)
:param mysql_conn_id: source mysql connection
:param hiveserver2_conn_id: Reference to the
:ref:`Hive Server2 thrift service connection id <howto/connection:hiveserver2>`.
:param mysql_preoperator: sql statement to run against mysql prior to
import, typically use to truncate of delete in place
of the data coming in, allowing the task to be idempotent (running
the task twice won't double load data). (templated)
:param mysql_postoperator: sql statement to run against mysql after the
import, typically used to move data from staging to
production and issue cleanup commands. (templated)
:param bulk_load: flag to use bulk_load option. This loads mysql directly
from a tab-delimited text file using the LOAD DATA LOCAL INFILE command. The MySQL
server must support loading local files via this command (it is disabled by default).
:param hive_conf:
"""
template_fields: Sequence[str] = ("sql", "mysql_table", "mysql_preoperator", "mysql_postoperator")
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {
"sql": "hql",
"mysql_preoperator": "mysql",
"mysql_postoperator": "mysql",
}
ui_color = "#a0e08c"
def __init__(
self,
*,
sql: str,
mysql_table: str,
hiveserver2_conn_id: str = "hiveserver2_default",
mysql_conn_id: str = "mysql_default",
mysql_preoperator: str | None = None,
mysql_postoperator: str | None = None,
bulk_load: bool = False,
hive_conf: dict | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.sql = sql
self.mysql_table = mysql_table
self.mysql_conn_id = mysql_conn_id
self.mysql_preoperator = mysql_preoperator
self.mysql_postoperator = mysql_postoperator
self.hiveserver2_conn_id = hiveserver2_conn_id
self.bulk_load = bulk_load
self.hive_conf = hive_conf
def execute(self, context: Context):
hive = HiveServer2Hook(hiveserver2_conn_id=self.hiveserver2_conn_id)
self.log.info("Extracting data from Hive: %s", self.sql)
hive_conf = context_to_airflow_vars(context)
if self.hive_conf:
hive_conf.update(self.hive_conf)
if self.bulk_load:
with NamedTemporaryFile() as tmp_file:
hive.to_csv(
self.sql,
tmp_file.name,
delimiter="\t",
lineterminator="\n",
output_header=False,
hive_conf=hive_conf,
)
mysql = self._call_preoperator(local_infile=self.bulk_load)
mysql.bulk_load(table=self.mysql_table, tmp_file=tmp_file.name)
else:
hive_results = hive.get_records(self.sql, parameters=hive_conf)
mysql = self._call_preoperator()
mysql.insert_rows(table=self.mysql_table, rows=hive_results)
if self.mysql_postoperator:
self.log.info("Running MySQL postoperator")
mysql.run(self.mysql_postoperator)
self.log.info("Done.")
def _call_preoperator(self, local_infile: bool = False) -> MySqlHook:
mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id, local_infile=local_infile)
if self.mysql_preoperator:
self.log.info("Running MySQL preoperator")
mysql.run(self.mysql_preoperator)
self.log.info("Inserting rows into MySQL")
return mysql
|
HiveToMySqlOperator
|
python
|
walkccc__LeetCode
|
solutions/1130. Minimum Cost Tree From Leaf Values/1130.py
|
{
"start": 0,
"end": 728
}
|
class ____:
def mctFromLeafValues(self, arr: list[int]) -> int:
n = len(arr)
# dp[i][j] := the minimum cost of arr[i..j]
dp = [[0] * n for _ in range(n)]
# maxVal[i][j] := the maximum value of arr[i..j]
maxVal = [[0] * n for _ in range(n)]
for i in range(n):
maxVal[i][i] = arr[i]
for d in range(1, n):
for i in range(n - d):
j = i + d
maxVal[i][j] = max(maxVal[i][j - 1], maxVal[i + 1][j])
for d in range(1, n):
for i in range(n - d):
j = i + d
dp[i][j] = math.inf
for k in range(i, j):
dp[i][j] = min(dp[i][j], dp[i][k] + dp[k + 1][j] +
maxVal[i][k] * maxVal[k + 1][j])
return dp[0][-1]
|
Solution
|
python
|
dask__distributed
|
distributed/tests/test_stories.py
|
{
"start": 2718,
"end": 5062
}
|
class ____(Worker):
async def get_story(self, *args, **kw):
raise CommClosedError
@gen_cluster(client=True, Worker=WorkerBrokenStory)
@pytest.mark.parametrize("on_error", ["ignore", "raise"])
async def test_client_story_failed_worker(c, s, a, b, on_error):
f = c.submit(inc, 1)
coro = c.story(f.key, on_error=on_error)
await f
if on_error == "raise":
with pytest.raises(CommClosedError):
await coro
elif on_error == "ignore":
story = await coro
assert story
assert len(story) > 1
else:
raise ValueError(on_error)
@gen_cluster(client=True, config=NO_AMM)
async def test_worker_story_with_deps(c, s, a, b):
"""
Assert that the structure of the story does not change unintentionally and
expected subfields are actually filled
"""
dep = c.submit(inc, 1, workers=[a.address], key="dep")
res = c.submit(inc, dep, workers=[b.address], key="res")
await res
story = a.state.story("res")
assert story == []
# Story now includes randomized stimulus_ids and timestamps.
story = b.state.story("res")
stimulus_ids = {ev[-2].rsplit("-", 1)[0] for ev in story}
assert stimulus_ids == {"compute-task", "gather-dep-success", "task-finished"}
# This is a simple transition log
expected = [
("res", "compute-task", "released"),
("res", "released", "waiting", "waiting", {"dep": "fetch"}),
("res", "waiting", "ready", "ready", {"res": "executing"}),
("res", "ready", "executing", "executing", {}),
("res", "put-in-memory"),
("res", "executing", "memory", "memory", {}),
]
assert_story(story, expected, strict=True)
story = b.state.story("dep")
stimulus_ids = {ev[-2].rsplit("-", 1)[0] for ev in story}
assert stimulus_ids == {"compute-task", "gather-dep-success"}
expected = [
("dep", "ensure-task-exists", "released"),
("dep", "released", "fetch", "fetch", {}),
("gather-dependencies", a.address, {"dep"}),
("dep", "fetch", "flight", "flight", {}),
("request-dep", a.address, {"dep"}),
("receive-dep", a.address, {"dep"}),
("dep", "put-in-memory"),
("dep", "flight", "memory", "memory", {"res": "ready"}),
]
assert_story(story, expected, strict=True)
|
WorkerBrokenStory
|
python
|
spyder-ide__spyder
|
spyder/plugins/ipythonconsole/utils/client.py
|
{
"start": 730,
"end": 2782
}
|
class ____:
"""Class to handle SSH tunneling for a kernel connection."""
def __init__(self, ssh_connection, *, _close_conn_on_exit=False):
self.ssh_connection = ssh_connection
self._port_forwarded = {}
self._close_conn_on_exit = _close_conn_on_exit
def __del__(self):
"""Close all port forwarders and the connection if required."""
for forwarder in self._port_forwarded.values():
forwarder.close()
if self._close_conn_on_exit:
self.ssh_connection.close()
@classmethod
@AsyncDispatcher(loop="asyncssh", early_return=False)
async def new_connection(cls, *args, **kwargs):
"""Create a new SSH connection."""
return cls(
await asyncssh.connect(*args, **kwargs, known_hosts=None),
_close_conn_on_exit=True,
)
@classmethod
def from_connection(cls, ssh_connection):
"""Create a new KernelTunnelHandler from an existing connection."""
return cls(ssh_connection)
@AsyncDispatcher(loop="asyncssh", early_return=False)
async def forward_port(self, remote_host, remote_port):
"""Forward a port through the SSH connection."""
local = self._get_free_port()
try:
self._port_forwarded[(remote_host, remote_port)] = (
await self.ssh_connection.forward_local_port(
'127.0.0.1', local, remote_host, remote_port
)
)
except asyncssh.Error as err:
raise SpyderKernelError(
_(
"It was not possible to open an SSH tunnel to connect to "
"the remote kernel. Please check your credentials and the "
"server connection status."
)
) from err
return local
@staticmethod
def _get_free_port():
"""Request a free port from the OS."""
with socket.socket() as s:
s.bind(("", 0))
return s.getsockname()[1]
|
KernelClientTunneler
|
python
|
bokeh__bokeh
|
src/bokeh/models/glyphs.py
|
{
"start": 16569,
"end": 18064
}
|
class ____(Glyph, FillGlyph, HatchGlyph):
''' Render a horizontally directed area between two equal length sequences
of x-coordinates with the same y-coordinates using step lines.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
__example__ = "examples/reference/models/HAreaStep.py"
_args = ('x1', 'x2', 'y')
x1 = NumberSpec(default=field("x1"), help="""
The x-coordinates for the points of one side of the area.
""")
x2 = NumberSpec(default=field("x2"), help="""
The x-coordinates for the points of the other side of the area.
""")
y = NumberSpec(default=field("y"), help="""
The y-coordinates for the points of the area.
""")
step_mode = Enum(StepMode, default="before", help="""
Where the step "level" should be drawn in relation to the x and y
coordinates. The parameter can assume one of three values:
* ``before``: (default) Draw step levels before each y-coordinate (no step before the first point)
* ``after``: Draw step levels after each y-coordinate (no step after the last point)
* ``center``: Draw step levels centered on each y-coordinate
""")
fill_props = Include(ScalarFillProps, help="""
The {prop} values for the horizontal directed area.
""")
hatch_props = Include(HatchProps, help="""
The {prop} values for the horizontal directed area.
""")
|
HAreaStep
|
python
|
numpy__numpy
|
numpy/lib/tests/test_function_base.py
|
{
"start": 72785,
"end": 74422
}
|
class ____:
class A:
iters = 20
def bound(self, *args):
return 0
@staticmethod
def unbound(*args):
return 0
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
@pytest.mark.skipif(NOGIL_BUILD,
reason=("Functions are immortalized if a thread is "
"launched, making this test flaky"))
@pytest.mark.parametrize('name, incr', [
('bound', A.iters),
('unbound', 0),
])
@pytest.mark.thread_unsafe(
reason="test result depends on the reference count of a global object"
)
def test_frompyfunc_leaks(self, name, incr):
# exposed in gh-11867 as np.vectorized, but the problem stems from
# frompyfunc.
# class.attribute = np.frompyfunc(<method>) creates a
# reference cycle if <method> is a bound class method.
# It requires a gc collection cycle to break the cycle.
import gc
A_func = getattr(self.A, name)
gc.disable()
try:
refcount = sys.getrefcount(A_func)
for i in range(self.A.iters):
a = self.A()
a.f = np.frompyfunc(getattr(a, name), 1, 1)
out = a.f(np.arange(10))
a = None
# A.func is part of a reference cycle if incr is non-zero
assert_equal(sys.getrefcount(A_func), refcount + incr)
for i in range(5):
gc.collect()
assert_equal(sys.getrefcount(A_func), refcount)
finally:
gc.enable()
|
TestLeaks
|
python
|
pypa__hatch
|
tests/backend/metadata/test_core.py
|
{
"start": 28270,
"end": 31672
}
|
class ____:
def test_dynamic(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"authors": 9000, "dynamic": ["authors"]}})
with pytest.raises(
ValueError,
match="Metadata field `authors` cannot be both statically defined and listed in field `project.dynamic`",
):
_ = metadata.core.authors
def test_not_array(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"authors": "foo"}})
with pytest.raises(TypeError, match="Field `project.authors` must be an array"):
_ = metadata.core.authors
def test_default(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {}})
assert metadata.core.authors == metadata.core.authors == []
def test_not_table(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"authors": ["foo"]}})
with pytest.raises(TypeError, match="Author #1 of field `project.authors` must be an inline table"):
_ = metadata.core.authors
def test_no_data(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"authors": [{}]}})
with pytest.raises(
ValueError, match="Author #1 of field `project.authors` must specify either `name` or `email`"
):
_ = metadata.core.authors
def test_name_not_string(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"authors": [{"name": 9}]}})
with pytest.raises(TypeError, match="Name of author #1 of field `project.authors` must be a string"):
_ = metadata.core.authors
def test_name_only(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"authors": [{"name": "foo"}]}})
assert len(metadata.core.authors) == 1
assert metadata.core.authors[0] == {"name": "foo"}
assert metadata.core.authors_data == metadata.core.authors_data == {"name": ["foo"], "email": []}
def test_email_not_string(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"authors": [{"email": 9}]}})
with pytest.raises(TypeError, match="Email of author #1 of field `project.authors` must be a string"):
_ = metadata.core.authors
def test_email_only(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"authors": [{"email": "foo@bar.baz"}]}})
assert len(metadata.core.authors) == 1
assert metadata.core.authors[0] == {"email": "foo@bar.baz"}
assert metadata.core.authors_data == {"name": [], "email": ["foo@bar.baz"]}
def test_name_and_email(self, isolation):
metadata = ProjectMetadata(
str(isolation),
None,
{
"project": {
"authors": [{"name": "foo2", "email": "foo2@bar.baz"}, {"name": "foo1", "email": "foo1@bar.baz"}]
}
},
)
assert len(metadata.core.authors) == 2
assert metadata.core.authors[0] == {"name": "foo2", "email": "foo2@bar.baz"}
assert metadata.core.authors[1] == {"name": "foo1", "email": "foo1@bar.baz"}
assert metadata.core.authors_data == {"name": [], "email": ["foo2 <foo2@bar.baz>", "foo1 <foo1@bar.baz>"]}
|
TestAuthors
|
python
|
python-markdown__markdown
|
markdown/blockprocessors.py
|
{
"start": 13728,
"end": 19031
}
|
class ____(BlockProcessor):
""" Process ordered list blocks. """
TAG: str = 'ol'
""" The tag used for the the wrapping element. """
STARTSWITH: str = '1'
"""
The integer (as a string ) with which the list starts. For example, if a list is initialized as
`3. Item`, then the `ol` tag will be assigned an HTML attribute of `starts="3"`. Default: `"1"`.
"""
LAZY_OL: bool = True
""" Ignore `STARTSWITH` if `True`. """
SIBLING_TAGS: list[str] = ['ol', 'ul']
"""
Markdown does not require the type of a new list item match the previous list item type.
This is the list of types which can be mixed.
"""
def __init__(self, parser: BlockParser):
super().__init__(parser)
# Detect an item (`1. item`). `group(1)` contains contents of item.
self.RE = re.compile(r'^[ ]{0,%d}\d+\.[ ]+(.*)' % (self.tab_length - 1))
# Detect items on secondary lines. they can be of either list type.
self.CHILD_RE = re.compile(r'^[ ]{0,%d}((\d+\.)|[*+-])[ ]+(.*)' %
(self.tab_length - 1))
# Detect indented (nested) items of either type
self.INDENT_RE = re.compile(r'^[ ]{%d,%d}((\d+\.)|[*+-])[ ]+.*' %
(self.tab_length, self.tab_length * 2 - 1))
def test(self, parent: etree.Element, block: str) -> bool:
return bool(self.RE.match(block))
def run(self, parent: etree.Element, blocks: list[str]) -> None:
# Check for multiple items in one block.
items = self.get_items(blocks.pop(0))
sibling = self.lastChild(parent)
if sibling is not None and sibling.tag in self.SIBLING_TAGS:
# Previous block was a list item, so set that as parent
lst = sibling
# make sure previous item is in a `p` - if the item has text,
# then it isn't in a `p`
if lst[-1].text:
# since it's possible there are other children for this
# sibling, we can't just `SubElement` the `p`, we need to
# insert it as the first item.
p = etree.Element('p')
p.text = lst[-1].text
lst[-1].text = ''
lst[-1].insert(0, p)
# if the last item has a tail, then the tail needs to be put in a `p`
# likely only when a header is not followed by a blank line
lch = self.lastChild(lst[-1])
if lch is not None and lch.tail:
p = etree.SubElement(lst[-1], 'p')
p.text = lch.tail.lstrip()
lch.tail = ''
# parse first block differently as it gets wrapped in a `p`.
li = etree.SubElement(lst, 'li')
self.parser.state.set('looselist')
firstitem = items.pop(0)
self.parser.parseBlocks(li, [firstitem])
self.parser.state.reset()
elif parent.tag in ['ol', 'ul']:
# this catches the edge case of a multi-item indented list whose
# first item is in a blank parent-list item:
# * * subitem1
# * subitem2
# see also `ListIndentProcessor`
lst = parent
else:
# This is a new list so create parent with appropriate tag.
lst = etree.SubElement(parent, self.TAG)
# Check if a custom start integer is set
if not self.LAZY_OL and self.STARTSWITH != '1':
lst.attrib['start'] = self.STARTSWITH
self.parser.state.set('list')
# Loop through items in block, recursively parsing each with the
# appropriate parent.
for item in items:
if item.startswith(' '*self.tab_length):
# Item is indented. Parse with last item as parent
self.parser.parseBlocks(lst[-1], [item])
else:
# New item. Create `li` and parse with it as parent
li = etree.SubElement(lst, 'li')
self.parser.parseBlocks(li, [item])
self.parser.state.reset()
def get_items(self, block: str) -> list[str]:
""" Break a block into list items. """
items = []
for line in block.split('\n'):
m = self.CHILD_RE.match(line)
if m:
# This is a new list item
# Check first item for the start index
if not items and self.TAG == 'ol':
# Detect the integer value of first list item
INTEGER_RE = re.compile(r'(\d+)')
self.STARTSWITH = INTEGER_RE.match(m.group(1)).group()
# Append to the list
items.append(m.group(3))
elif self.INDENT_RE.match(line):
# This is an indented (possibly nested) item.
if items[-1].startswith(' '*self.tab_length):
# Previous item was indented. Append to that item.
items[-1] = '{}\n{}'.format(items[-1], line)
else:
items.append(line)
else:
# This is another line of previous item. Append to that item.
items[-1] = '{}\n{}'.format(items[-1], line)
return items
|
OListProcessor
|
python
|
spack__spack
|
lib/spack/spack/modules/lmod.py
|
{
"start": 18756,
"end": 18938
}
|
class ____(spack.error.SpackError, KeyError):
"""Error raised if the key ``core_compilers`` has not been specified
in the configuration file.
"""
|
CoreCompilersNotFoundError
|
python
|
ray-project__ray
|
python/ray/util/state/common.py
|
{
"start": 55497,
"end": 56560
}
|
class ____:
#: Group key (actor class name) -> summary
summary: Dict[str, ActorSummaryPerClass]
#: Total number of actors
total_actors: int
summary_by: str = "class"
@classmethod
def to_summary(cls, *, actors: List[Dict]):
# NOTE: The argument tasks contains a list of dictionary
# that have the same k/v as ActorState.
summary = {}
total_actors = 0
for actor in actors:
key = actor["class_name"]
if key not in summary:
summary[key] = ActorSummaryPerClass(
class_name=actor["class_name"],
)
actor_summary = summary[key]
state = actor["state"]
if state not in actor_summary.state_counts:
actor_summary.state_counts[state] = 0
actor_summary.state_counts[state] += 1
total_actors += 1
return ActorSummaries(
summary=summary,
total_actors=total_actors,
)
@dataclass(init=not IS_PYDANTIC_2)
|
ActorSummaries
|
python
|
tornadoweb__tornado
|
tornado/netutil.py
|
{
"start": 15432,
"end": 15923
}
|
class ____(Resolver):
"""Resolver implementation using `.IOLoop.run_in_executor`.
.. versionadded:: 5.0
.. deprecated:: 6.2
Use `DefaultLoopResolver` instead.
"""
async def resolve(
self, host: str, port: int, family: socket.AddressFamily = socket.AF_UNSPEC
) -> List[Tuple[int, Any]]:
result = await IOLoop.current().run_in_executor(
None, _resolve_addr, host, port, family
)
return result
|
DefaultExecutorResolver
|
python
|
jd__tenacity
|
tenacity/wait.py
|
{
"start": 5779,
"end": 7079
}
|
class ____(wait_base):
"""Wait strategy that applies exponential backoff.
It allows for a customized multiplier and an ability to restrict the
upper and lower limits to some maximum and minimum value.
The intervals are fixed (i.e. there is no jitter), so this strategy is
suitable for balancing retries against latency when a required resource is
unavailable for an unknown duration, but *not* suitable for resolving
contention between multiple processes for a shared resource. Use
wait_random_exponential for the latter case.
"""
def __init__(
self,
multiplier: typing.Union[int, float] = 1,
max: _utils.time_unit_type = _utils.MAX_WAIT, # noqa
exp_base: typing.Union[int, float] = 2,
min: _utils.time_unit_type = 0, # noqa
) -> None:
self.multiplier = multiplier
self.min = _utils.to_seconds(min)
self.max = _utils.to_seconds(max)
self.exp_base = exp_base
def __call__(self, retry_state: "RetryCallState") -> float:
try:
exp = self.exp_base ** (retry_state.attempt_number - 1)
result = self.multiplier * exp
except OverflowError:
return self.max
return max(max(0, self.min), min(result, self.max))
|
wait_exponential
|
python
|
huggingface__transformers
|
src/transformers/models/starcoder2/modular_starcoder2.py
|
{
"start": 1907,
"end": 2721
}
|
class ____(nn.Module):
def __init__(self, config: Starcoder2Config):
super().__init__()
embed_dim = config.hidden_size
self.c_fc = nn.Linear(embed_dim, config.intermediate_size, bias=config.use_bias)
self.c_proj = nn.Linear(config.intermediate_size, embed_dim, bias=config.use_bias)
self.act = ACT2FN[config.hidden_act]
self.residual_dropout = config.residual_dropout
def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]]) -> torch.FloatTensor:
hidden_states = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.c_proj(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.residual_dropout, training=self.training)
return hidden_states
|
Starcoder2MLP
|
python
|
tensorflow__tensorflow
|
tensorflow/python/distribute/coordinator/cluster_coordinator.py
|
{
"start": 3275,
"end": 4234
}
|
class ____(Exception):
"""Wrapper for errors from resource building.
When a closure starts, it first checks for errors in any of its inputs, which
are RemoteValues from resource closures. If there were any errors, it wraps
the exception in this class and raises so it can be handled by the worker
failure handler.
Attributes:
original_exception:
"""
def __init__(self, original_exception):
# Avoid doubly-nested errors
if isinstance(original_exception,
(ClosureInputError, ClosureAbortedError)):
self.original_exception = original_exception.original_exception
else:
self.original_exception = original_exception
message = ("Input has an error, the original exception is %r, "
"error message is %s." %
(self.original_exception, str(self.original_exception)))
super().__init__(message)
self.with_traceback(original_exception.__traceback__)
|
ClosureInputError
|
python
|
django__django
|
tests/messages_tests/test_mixins.py
|
{
"start": 280,
"end": 1106
}
|
class ____(TestCase):
def test_set_messages_success(self):
author = {"name": "John Doe", "slug": "success-msg"}
add_url = reverse("add_success_msg")
req = self.client.post(add_url, author)
# Uncompressed message is stored in the cookie.
value = b64_decode(
req.cookies["messages"].value.split(":")[0].encode(),
).decode()
self.assertIn(ContactFormViewWithMsg.success_message % author, value)
def test_set_messages_success_on_delete(self):
object_to_delete = SomeObject.objects.create(name="MyObject")
delete_url = reverse("success_msg_on_delete", args=[object_to_delete.pk])
response = self.client.post(delete_url, follow=True)
self.assertContains(response, DeleteFormViewWithMsg.success_message)
|
SuccessMessageMixinTests
|
python
|
pennersr__django-allauth
|
allauth/idp/oidc/contrib/rest_framework/authentication.py
|
{
"start": 197,
"end": 583
}
|
class ____(BaseAuthentication):
"""
Use the OIDC access token to authenticate the request.
"""
def authenticate(self, request):
server = get_server()
orequest = extract_params(request)
valid, ctx = server.verify_request(*orequest, scopes=[])
if not valid:
return None
return ctx.user, ctx.access_token
|
TokenAuthentication
|
python
|
PyCQA__pylint
|
tests/functional/u/useless/useless_parent_delegation.py
|
{
"start": 14349,
"end": 14474
}
|
class ____(ReturnTypeAny):
choices = [1, 2, 3]
def draw(self) -> int:
return super().draw()
|
ReturnTypeNarrowed
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/ascent/package.py
|
{
"start": 217,
"end": 748
}
|
class ____(Package):
"""This packagae has the variants shared, defaulted
to True and adios2 defaulted to False"""
homepage = "https://github.com/Alpine-DAV/ascent"
url = "http://www.example.com/ascent-1.0.tar.gz"
version("0.9.2", sha256="44cd954aa5db478ab40042cd54fd6fcedf25000c3bb510ca23fcff8090531b91")
variant("adios2", default=False, description="Build Adios2 filter support")
variant("shared", default=True, description="Build Ascent as shared libs")
depends_on("adios2", when="+adios2")
|
Ascent
|
python
|
bokeh__bokeh
|
examples/plotting/customjs_expr.py
|
{
"start": 461,
"end": 1257
}
|
class ____(DataModel):
amp = Float(default=0.1, help="Amplitude")
freq = Float(default=0.1, help="Frequency")
phase = Float(default=0, help="Phase")
offset = Float(default=-5, help="Offset")
params = Params(amp=2, freq=3, phase=0.4, offset=1)
x = np.linspace(0, 10, 100)
y = CustomJSExpr(args=dict(params=params), code="""
const A = params.amp
const k = params.freq
const phi = params.phase
const B = params.offset
const {x} = this.data
return x.map((xi) => A*Math.sin(k*xi + phi) + B)
/* Alternatively:
for (const xi of x) {
yield A*Math.sin(k*xi + phi) + B
}
*/
""")
plot = figure(tags=[params], y_range=(-5, 5), title="Data models with custom JS expressions")
plot.line(x, y=expr(y), line_width=3, line_alpha=0.6)
show(plot)
|
Params
|
python
|
huggingface__transformers
|
src/transformers/models/vjepa2/modeling_vjepa2.py
|
{
"start": 4360,
"end": 7647
}
|
class ____(nn.Module):
"""
Construct mask token, position and patch embeddings.
"""
def __init__(self, config: VJEPA2Config, hidden_size: int = 1024):
super().__init__()
self.config = config
self.hidden_size = hidden_size
self.patch_embeddings = VJEPA2PatchEmbeddings3D(config, hidden_size=hidden_size)
self.num_patches = self.patch_embeddings.num_patches
self.patch_size = config.patch_size
def forward(self, pixel_values_videos: torch.Tensor) -> torch.Tensor:
num_frames = pixel_values_videos.shape[1]
# Swap `frames` and `channels` dims, the result is:
# (batch_size, channels, num_frames, height, width)
pixel_values_videos = pixel_values_videos.permute(0, 2, 1, 3, 4)
# For some cases, if the input vision (image/video) consists of num_frames < tubelet_size,
# then embedding lookup fails. In these cases, we duplicate the frames.
if num_frames < self.config.tubelet_size:
pixel_values_videos = pixel_values_videos.repeat(1, 1, self.config.tubelet_size, 1, 1)
target_dtype = self.patch_embeddings.proj.weight.dtype
pixel_values_videos = pixel_values_videos.to(dtype=target_dtype)
embeddings = self.patch_embeddings(pixel_values_videos)
return embeddings
# Adapted from transformers.models.vit.modeling_vit.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs,
):
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling
# Normalize the attention scores to probabilities.
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
def rotate_queries_or_keys(x, pos):
B, num_heads, N, D = x.size()
# similar to inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim))
# they are computing this every time. instead HF style is to compute the inv_freq once and store it
# -- compute angle for each position
omega = torch.arange(D // 2, dtype=x.dtype, device=x.device)
omega /= D / 2.0
omega = 1.0 / 10000**omega # (D/2,)
freq = pos.unsqueeze(-1) * omega # (..., N, D/2), outer product
# -- build rotation matrix and apply
emb_sin = freq.sin() # (..., N, D/2)
emb_cos = freq.cos() # (..., N, D/2)
emb_sin = emb_sin.squeeze(-1).repeat(1, 1, 1, 2)
emb_cos = emb_cos.squeeze(-1).repeat(1, 1, 1, 2)
# --
y = x.unflatten(-1, (-1, 2))
y1, y2 = y.unbind(dim=-1)
y = torch.stack((-y2, y1), dim=-1)
y = y.flatten(-2)
return (x * emb_cos) + (y * emb_sin)
|
VJEPA2Embeddings
|
python
|
huggingface__transformers
|
src/transformers/models/smollm3/modeling_smollm3.py
|
{
"start": 11979,
"end": 12706
}
|
class ____(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
SmolLM3RMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
|
SmolLM3RMSNorm
|
python
|
Textualize__textual
|
tests/snapshot_tests/snapshot_apps/hot_reloading_app.py
|
{
"start": 499,
"end": 646
}
|
class ____(App[None]):
CSS_PATH = CSS_PATH
def compose(self) -> ComposeResult:
yield Container(Label("Hello, world!"))
|
HotReloadingApp
|
python
|
django__django
|
tests/generic_relations_regress/models.py
|
{
"start": 574,
"end": 636
}
|
class ____(Link):
class Meta:
proxy = True
|
LinkProxy
|
python
|
h5py__h5py
|
examples/swmr_multiprocess.py
|
{
"start": 921,
"end": 1900
}
|
class ____(Process):
def __init__(self, event, fname, dsetname, timeout = 2.0):
super().__init__()
self._event = event
self._fname = fname
self._dsetname = dsetname
self._timeout = timeout
def run(self):
self.log = logging.getLogger('reader')
self.log.info("Waiting for initial event")
assert self._event.wait( self._timeout )
self._event.clear()
self.log.info("Opening file %s", self._fname)
f = h5py.File(self._fname, 'r', libver='latest', swmr=True)
assert f.swmr_mode
dset = f[self._dsetname]
try:
# monitor and read loop
while self._event.wait( self._timeout ):
self._event.clear()
self.log.debug("Refreshing dataset")
dset.refresh()
shape = dset.shape
self.log.info("Read dset shape: %s"%str(shape))
finally:
f.close()
|
SwmrReader
|
python
|
tensorflow__tensorflow
|
tensorflow/python/framework/composite_tensor.py
|
{
"start": 991,
"end": 5665
}
|
class ____(metaclass=abc.ABCMeta):
"""Abstract base class for Tensor-like objects that are composed from Tensors.
Each `CompositeTensor` can be decomposed into a structured collection of
component `tf.Tensor`s, and reconstructed from those components.
The `tensorflow.python.util.nest` module has support for treating composite
tensors as structure, which makes it easy to flatten and reconstruct
composite tensors (or larger structures that contain composite tensors).
E.g.:
```python
ct = ... # Create a composite tensor.
flat_list_of_tensors = nest.flatten(ct, expand_composites=True)
transformed_list_of_tensors = ... # do something with the flat tensors.
result = nest.pack_sequence_as(ct, transformed_list_of_tensors,
expand_composites=True)
```
"""
@abc.abstractproperty
def _type_spec(self):
"""A `TypeSpec` describing the type of this value."""
raise NotImplementedError(f"{type(self).__name__}._type_spec()")
def _shape_invariant_to_type_spec(self, shape):
"""Returns a TypeSpec given a shape invariant (used by `tf.while_loop`).
Args:
shape: A `tf.TensorShape` object. The shape invariant for this
`CompositeTensor`, or `None` if a default shape invariant should be used
(based on the value of this `CompositeTensor`).
Returns:
A nested structure whose values are `tf.TensorShape` objects, specifying
the shape invariants for the tensors that comprise this `CompositeTensor`.
"""
# New TypeSpec subclasses generally do not need to implement this --
# this method is used for backwards compatibility. Users of tf.while_loop
# can specify a type by passing in TypeSpec instead.
raise NotImplementedError(
f"{type(self).__name__}._shape_invariant_to_type_spec")
def _consumers(self):
"""Returns a list of `Operation`s that consume this `CompositeTensor`.
Returns:
A list of `Operation`s.
Raises:
RuntimeError: If this method is called while executing eagerly.
"""
consumers = nest.flatten([
component.consumers()
for component in nest.flatten(self, expand_composites=True)
if getattr(component, "graph", None) is not None
])
return list(set(consumers))
def __tf_tracing_type__(self, context):
return self._type_spec.__tf_tracing_type__(context)
def _convert_variables_to_tensors(self):
"""Converts ResourceVariable components to Tensors.
Override this method to explicitly convert ResourceVariables embedded in the
CompositeTensor to Tensors. By default, it returns the CompositeTensor
unchanged.
Returns:
A CompositeTensor with all its ResourceVariable components converted to
Tensors.
"""
return self
def replace_composites_with_components(structure):
"""Recursively replaces CompositeTensors with their components.
Args:
structure: A `nest`-compatible structure, possibly containing composite
tensors.
Returns:
A copy of `structure`, where each composite tensor has been replaced by
its components. The result will contain no composite tensors.
Note that `nest.flatten(replace_composites_with_components(structure))`
returns the same value as `nest.flatten(structure)`.
"""
if isinstance(structure, CompositeTensor):
return replace_composites_with_components(
structure._type_spec._to_components(structure)) # pylint: disable=protected-access
elif not nest.is_nested(structure):
return structure
else:
return nest.map_structure(
replace_composites_with_components, structure, expand_composites=False)
def convert_variables_to_tensors(composite_tensor):
return composite_tensor._convert_variables_to_tensors() # pylint: disable=protected-access
# @TODO(edloper): Can we replace convert_to_tensor_or_xyz with just
# convert_to_tensor_or_composite? Alternatively, should composite tensors
# register a dispatch override for tf.convert_to_tensor?
# Note about the internal encoding of composite tensors when they are "lowered"
# from Python objects to tensors. The usual encoding is "component encoding"
# which uses the dense tensors that represent a composite tensor.
# A second encoding, "batchable tensor list encoding", is used by datasets
# and map_fn which in addition to supporting batching also can use ops
# for encoding and decoding, e.g. for encoding/decoding to/from a
# single variant that represents a composite tensor. Some internal properties
# for type specs for composite tensors use `flat` as a nickname for
# "batchable tensor list encoding". (e.g. `flat_tensor_specs`).
|
CompositeTensor
|
python
|
tiangolo__fastapi
|
docs_src/response_model/tutorial005.py
|
{
"start": 104,
"end": 848
}
|
class ____(BaseModel):
name: str
description: Union[str, None] = None
price: float
tax: float = 10.5
items = {
"foo": {"name": "Foo", "price": 50.2},
"bar": {"name": "Bar", "description": "The Bar fighters", "price": 62, "tax": 20.2},
"baz": {
"name": "Baz",
"description": "There goes my baz",
"price": 50.2,
"tax": 10.5,
},
}
@app.get(
"/items/{item_id}/name",
response_model=Item,
response_model_include={"name", "description"},
)
async def read_item_name(item_id: str):
return items[item_id]
@app.get("/items/{item_id}/public", response_model=Item, response_model_exclude={"tax"})
async def read_item_public_data(item_id: str):
return items[item_id]
|
Item
|
python
|
great-expectations__great_expectations
|
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_us_county_name.py
|
{
"start": 780,
"end": 1789
}
|
class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_us_county_name"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_us_county_name(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
|
ColumnValuesToBeValidUSCountyName
|
python
|
eth-brownie__brownie
|
brownie/network/contract.py
|
{
"start": 25295,
"end": 31794
}
|
class ____(_ContractBase):
"""Methods for interacting with a deployed contract.
Each public contract method is available as a ContractCall or ContractTx
instance, created when this class is instantiated.
Attributes:
bytecode: Bytecode of the deployed contract, including constructor args.
tx: TransactionReceipt of the of the tx that deployed the contract."""
_reverted = False
_initialized = False
def __init__(
self,
address: HexAddress,
owner: Optional[AccountsType] = None,
tx: TransactionReceiptType = None,
) -> None:
address = _resolve_address(address)
self.bytecode: Final[HexStr] = ( # type: ignore [assignment]
# removeprefix is used for compatibility with both hexbytes<1 and >=1
self._build.get("deployedBytecode", None)
or web3.eth.get_code(address).hex().removeprefix("0x")
)
if not self.bytecode:
raise ContractNotFound(f"No contract deployed at {address}")
self._owner: Final = owner
self.tx: Final = tx
self.address: Final = address
self.events = ContractEvents(self)
_add_deployment_topics(address, self.abi)
fn_abis = [abi for abi in self.abi if abi["type"] == "function"]
fn_names = [abi["name"] for abi in fn_abis]
contract_name = self._name
contract_natspec: dict = self._build.get("natspec") or {}
methods_natspec: dict = contract_natspec.get("methods") or {}
for abi, abi_name in zip(fn_abis, fn_names):
name = f"{contract_name}.{abi_name}"
sig = build_function_signature(abi)
natspec = methods_natspec.get(sig, {})
if fn_names.count(abi_name) == 1:
fn = _get_method_object(address, abi, name, owner, natspec)
self._check_and_set(abi_name, fn)
continue
# special logic to handle function overloading
if not hasattr(self, abi_name):
overloaded = OverloadedMethod(address, name, owner)
self._check_and_set(abi_name, overloaded)
getattr(self, abi_name)._add_fn(abi, natspec)
self._initialized = True
def _check_and_set(self, name: str, obj: Any) -> None:
if name == "balance":
warnings.warn(
f"'{self._name}' defines a 'balance' function, "
f"'{self._name}.balance' is available as {self._name}.wei_balance",
BrownieEnvironmentWarning,
)
setattr(self, "wei_balance", self.balance)
elif hasattr(self, name):
warnings.warn(
"Namespace collision between contract function and "
f"brownie `Contract` class member: '{self._name}.{name}'\n"
f"The {name} function will not be available when interacting with {self._name}",
BrownieEnvironmentWarning,
)
return
setattr(self, name, obj)
def __hash__(self) -> int:
return hash(f"{self._name}{self.address}{self._project}")
def __str__(self) -> str:
return self.address
def __repr__(self) -> str:
alias = self._build.get("alias")
if alias:
return f"<'{alias}' Contract '{self.address}'>"
return f"<{self._name} Contract '{self.address}'>"
def __eq__(self, other: object) -> bool:
if isinstance(other, _DeployedContractBase):
return self.address == other.address and self.bytecode == other.bytecode
if isinstance(other, str):
try:
address = _resolve_address(other)
return address == self.address
except ValueError:
return False
return super().__eq__(other)
def __getattribute__(self, name: str) -> AnyContractMethod:
if super().__getattribute__("_reverted"):
raise ContractNotFound("This contract no longer exists.")
try:
return super().__getattribute__(name)
except AttributeError:
raise AttributeError(f"Contract '{self._name}' object has no attribute '{name}'")
def __setattr__(self, name: str, value: Any) -> None:
if self._initialized and isinstance(getattr(self, name, None), _ContractMethod):
raise AttributeError(
f"{self._name}.{name} is a contract function, it cannot be assigned to"
)
super().__setattr__(name, value)
def get_method_object(self, calldata: str) -> Optional["_ContractMethod"]:
"""
Given a calldata hex string, returns a `ContractMethod` object.
"""
sig = calldata[:10].lower()
if sig not in self.selectors:
return None
fn = getattr(self, self.selectors[sig], None)
if isinstance(fn, OverloadedMethod):
return next((v for v in fn.methods.values() if v.signature == sig), None)
return fn
def balance(self) -> Wei:
"""Returns the current ether balance of the contract, in wei."""
balance = web3.eth.get_balance(self.address)
return Wei(balance)
def _deployment_path(self) -> Optional[Path]:
if not self._project._path or (
CONFIG.network_type != "live" and not CONFIG.settings["dev_deployment_artifacts"]
):
return None
chainid = CONFIG.active_network["chainid"] if CONFIG.network_type == "live" else "dev"
path = self._project._build_path.joinpath(f"deployments/{chainid}")
path.mkdir(exist_ok=True)
return path.joinpath(f"{self.address}.json")
def _save_deployment(self) -> None:
path = self._deployment_path()
chainid = CONFIG.active_network["chainid"] if CONFIG.network_type == "live" else "dev"
deployment_build = self._build.copy()
deployment_build["deployment"] = {
"address": self.address,
"chainid": chainid,
"blockHeight": web3.eth.block_number,
}
if path:
self._project._add_to_deployment_map(self)
if not path.exists():
with path.open("w") as fp:
ujson_dump(deployment_build, fp)
def _delete_deployment(self) -> None:
if path := self._deployment_path():
self._project._remove_from_deployment_map(self)
if path.exists():
path.unlink()
|
_DeployedContractBase
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/links/translate.py
|
{
"start": 2525,
"end": 2851
}
|
class ____(BaseGoogleLink):
"""
Helper class for constructing Legacy Translation Dataset link.
Legacy Datasets are created and managed by AutoML API.
"""
name = "Translation Legacy Dataset"
key = "translation_legacy_dataset"
format_str = TRANSLATION_LEGACY_DATASET_LINK
|
TranslationLegacyDatasetLink
|
python
|
huggingface__transformers
|
src/transformers/models/pixtral/modeling_pixtral.py
|
{
"start": 12118,
"end": 12845
}
|
class ____(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
PixtralRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
|
PixtralRMSNorm
|
python
|
pandas-dev__pandas
|
pandas/core/arrays/string_.py
|
{
"start": 2087,
"end": 10937
}
|
class ____(StorageExtensionDtype):
"""
Extension dtype for string data.
.. warning::
StringDtype is considered experimental. The implementation and
parts of the API may change without warning.
Parameters
----------
storage : {"python", "pyarrow"}, optional
If not given, the value of ``pd.options.mode.string_storage``.
na_value : {np.nan, pd.NA}, default pd.NA
Whether the dtype follows NaN or NA missing value semantics.
Attributes
----------
None
Methods
-------
None
See Also
--------
BooleanDtype : Extension dtype for boolean data.
Examples
--------
>>> pd.StringDtype()
<StringDtype(na_value=<NA>)>
>>> pd.StringDtype(storage="python")
<StringDtype(storage='python', na_value=<NA>)>
"""
@property
def name(self) -> str: # type: ignore[override]
if self._na_value is libmissing.NA:
return "string"
else:
return "str"
#: StringDtype().na_value uses pandas.NA except the implementation that
# follows NumPy semantics, which uses nan.
@property
def na_value(self) -> libmissing.NAType | float: # type: ignore[override]
return self._na_value
_metadata = ("storage", "_na_value") # type: ignore[assignment]
def __init__(
self,
storage: str | None = None,
na_value: libmissing.NAType | float = libmissing.NA,
) -> None:
# infer defaults
if storage is None:
storage = get_option("mode.string_storage")
if storage == "auto":
if HAS_PYARROW:
storage = "pyarrow"
else:
storage = "python"
# validate options
if storage not in {"python", "pyarrow"}:
raise ValueError(
f"Storage must be 'python' or 'pyarrow'. Got {storage} instead."
)
if storage == "pyarrow" and not HAS_PYARROW:
raise ImportError(
f"pyarrow>={PYARROW_MIN_VERSION} is required for PyArrow "
"backed StringArray."
)
if isinstance(na_value, float) and np.isnan(na_value):
# when passed a NaN value, always set to np.nan to ensure we use
# a consistent NaN value (and we can use `dtype.na_value is np.nan`)
na_value = np.nan
elif na_value is not libmissing.NA:
raise ValueError(f"'na_value' must be np.nan or pd.NA, got {na_value}")
self.storage = cast(str, storage)
self._na_value = na_value
def __repr__(self) -> str:
storage = "" if self.storage == "pyarrow" else "storage='python', "
return f"<StringDtype({storage}na_value={self._na_value})>"
def __eq__(self, other: object) -> bool:
# we need to override the base class __eq__ because na_value (NA or NaN)
# cannot be checked with normal `==`
if isinstance(other, str):
# TODO should dtype == "string" work for the NaN variant?
if other == "string" or other == self.name:
return True
try:
other = self.construct_from_string(other)
except (TypeError, ImportError):
# TypeError if `other` is not a valid string for StringDtype
# ImportError if pyarrow is not installed for "string[pyarrow]"
return False
if isinstance(other, type(self)):
return self.storage == other.storage and self.na_value is other.na_value
return False
def __setstate__(self, state: MutableMapping[str, Any]) -> None:
# back-compat for pandas < 2.3, where na_value did not yet exist
self.storage = state.pop("storage", "python")
self._na_value = state.pop("_na_value", libmissing.NA)
def __hash__(self) -> int:
# need to override __hash__ as well because of overriding __eq__
return super().__hash__()
def __reduce__(self):
return StringDtype, (self.storage, self.na_value)
@property
def type(self) -> type[str]:
return str
@classmethod
def construct_from_string(cls, string) -> Self:
"""
Construct a StringDtype from a string.
Parameters
----------
string : str
The type of the name. The storage type will be taking from `string`.
Valid options and their storage types are
========================== ==============================================
string result storage
========================== ==============================================
``'string'`` pd.options.mode.string_storage, default python
``'string[python]'`` python
``'string[pyarrow]'`` pyarrow
========================== ==============================================
Returns
-------
StringDtype
Raise
-----
TypeError
If the string is not a valid option.
"""
if not isinstance(string, str):
raise TypeError(
f"'construct_from_string' expects a string, got {type(string)}"
)
if string == "string":
return cls()
elif string == "str" and using_string_dtype():
return cls(na_value=np.nan)
elif string == "string[python]":
return cls(storage="python")
elif string == "string[pyarrow]":
return cls(storage="pyarrow")
else:
raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
def construct_array_type(self) -> type_t[BaseStringArray]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
from pandas.core.arrays.string_arrow import (
ArrowStringArray,
)
if self.storage == "python" and self._na_value is libmissing.NA:
return StringArray
elif self.storage == "pyarrow" and self._na_value is libmissing.NA:
return ArrowStringArray
elif self.storage == "python":
return StringArray
else:
return ArrowStringArray
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
storages = set()
na_values = set()
for dtype in dtypes:
if isinstance(dtype, StringDtype):
storages.add(dtype.storage)
na_values.add(dtype.na_value)
elif isinstance(dtype, np.dtype) and dtype.kind in ("U", "T"):
continue
else:
return None
if len(storages) == 2:
# if both python and pyarrow storage -> priority to pyarrow
storage = "pyarrow"
else:
storage = next(iter(storages)) # type: ignore[assignment]
na_value: libmissing.NAType | float
if len(na_values) == 2:
# if both NaN and NA -> priority to NA
na_value = libmissing.NA
else:
na_value = next(iter(na_values))
return StringDtype(storage=storage, na_value=na_value)
def __from_arrow__(
self, array: pyarrow.Array | pyarrow.ChunkedArray
) -> BaseStringArray:
"""
Construct StringArray from pyarrow Array/ChunkedArray.
"""
if self.storage == "pyarrow":
from pandas.core.arrays.string_arrow import (
ArrowStringArray,
_check_pyarrow_available,
)
_check_pyarrow_available()
if not pa.types.is_large_string(array.type):
array = pc.cast(array, pa.large_string())
return ArrowStringArray(array, dtype=self)
else:
import pyarrow
if isinstance(array, pyarrow.Array):
chunks = [array]
else:
# pyarrow.ChunkedArray
chunks = array.chunks
results = []
for arr in chunks:
# convert chunk by chunk to numpy and concatenate then, to avoid
# overflow for large string data when concatenating the pyarrow arrays
arr = arr.to_numpy(zero_copy_only=False)
arr = ensure_string_array(arr, na_value=self.na_value)
results.append(arr)
if len(chunks) == 0:
arr = np.array([], dtype=object)
else:
arr = np.concatenate(results)
# Bypass validation inside StringArray constructor, see GH#47781
new_string_array = StringArray.__new__(StringArray)
NDArrayBacked.__init__(new_string_array, arr, self)
return new_string_array
|
StringDtype
|
python
|
pdm-project__pdm
|
src/pdm/models/backends.py
|
{
"start": 1249,
"end": 1918
}
|
class ____(BuildBackend):
def expand_line(self, req: str, expand_env: bool = True) -> str:
line = req.replace("file:///${PROJECT_ROOT}", self.root.as_uri())
if expand_env:
line = expand_env_vars(line)
return line
def relative_path_to_url(self, path: str) -> str:
if os.path.isabs(path):
return Path(path).as_uri()
return f"file:///${{PROJECT_ROOT}}/{urllib.parse.quote(path)}"
@classmethod
def build_system(cls) -> BuildSystem:
return {
"requires": ["pdm-backend"],
"build-backend": "pdm.backend",
}
# Context formatting helpers for hatch
|
PDMBackend
|
python
|
tensorflow__tensorflow
|
tensorflow/python/framework/python_api_dispatcher_test.py
|
{
"start": 13612,
"end": 17461
}
|
class ____(test_util.TensorFlowTestCase):
def testBasicDispatch(self):
dispatcher = dispatch.PythonAPIDispatcher('tf.foo', ['x', 'y', 'name'],
(None,))
rt_checker = dispatch.MakeInstanceChecker(ragged_tensor.RaggedTensor)
f1 = lambda x, y, name=None: 'f1'
dispatcher.Register(dispatch.PySignatureChecker([(0, rt_checker)]), f1)
rt = ragged_factory_ops.constant([[1, 2], [3]])
self.assertEqual(dispatcher.Dispatch((rt, 5), None), 'f1')
self.assertEqual(dispatcher.Dispatch((rt, 5, 'my_name'), None), 'f1')
self.assertEqual(dispatcher.Dispatch((), {'x': rt, 'y': 5}), 'f1')
self.assertEqual(
dispatcher.Dispatch((), {
'x': rt,
'y': 5,
'name': 'x'
}), 'f1')
self.assertEqual(dispatcher.Dispatch(('foo', rt), None), NotImplemented)
self.assertEqual(dispatcher.Dispatch(('foo', 'bar'), None), NotImplemented)
self.assertEqual(
dispatcher.Dispatch(('foo', 'bar', 'baz'), None), NotImplemented)
def testMultipleDispatchers(self):
dispatcher = dispatch.PythonAPIDispatcher('tf.foo', ['x', 'y', 'name'],
(None,))
rt_checker = dispatch.MakeInstanceChecker(ragged_tensor.RaggedTensor)
rt_x_checker = dispatch.PySignatureChecker([(0, rt_checker)])
rt_y_checker = dispatch.PySignatureChecker([(1, rt_checker)])
f1 = lambda x, y, name=None: 'f1'
f2 = lambda x, y, name=None: 'f2'
rt = ragged_factory_ops.constant([[1, 2], [3]])
dispatcher.Register(rt_x_checker, f1)
dispatcher.Register(rt_y_checker, f2)
self.assertEqual(dispatcher.Dispatch((rt, 5), None), 'f1')
self.assertEqual(dispatcher.Dispatch(('foo', rt), None), 'f2')
self.assertEqual(dispatcher.Dispatch(('foo',), {'y': rt}), 'f2')
self.assertEqual(dispatcher.Dispatch(('foo', 'bar'), None), NotImplemented)
with self.assertRaisesRegex(
ValueError, 'Multiple dispatch targets .*'
r'match the arguments to tf\.foo'):
dispatcher.Dispatch((rt, rt), None)
def testListAndUnionDispatch(self):
dispatcher = dispatch.PythonAPIDispatcher('tf.foo', ['x', 'ys', 'name'],
(None,))
rt_checker = dispatch.MakeInstanceChecker(ragged_tensor.RaggedTensor)
tensor_checker = dispatch.MakeInstanceChecker(tensor.Tensor)
rt_or_t = dispatch.MakeUnionChecker([rt_checker, tensor_checker])
list_of_rt_or_t = dispatch.MakeListChecker(rt_or_t)
f1 = lambda x, ys, name=None: 'f1'
dispatcher.Register(
dispatch.PySignatureChecker([(0, rt_or_t), (1, list_of_rt_or_t)]), f1)
rt = ragged_factory_ops.constant([[1, 2], [3]])
t = constant_op.constant(5)
self.assertEqual(dispatcher.Dispatch((rt, [t]), None), 'f1')
self.assertEqual(dispatcher.Dispatch((rt, [rt]), None), 'f1')
self.assertEqual(dispatcher.Dispatch((t, [rt]), None), 'f1')
self.assertEqual(dispatcher.Dispatch((rt, []), None), 'f1')
self.assertEqual(dispatcher.Dispatch((t, [t, t, rt, t]), None), 'f1')
self.assertEqual(dispatcher.Dispatch((rt, [t], 'my_name'), None), 'f1')
self.assertEqual(dispatcher.Dispatch((), {'x': rt, 'ys': [t]}), 'f1')
self.assertEqual(
dispatcher.Dispatch((), {
'x': rt,
'ys': [t],
'name': 'x'
}), 'f1')
self.assertEqual(dispatcher.Dispatch((t, [t]), None), NotImplemented)
self.assertEqual(dispatcher.Dispatch((t, []), None), NotImplemented)
self.assertEqual(dispatcher.Dispatch(('foo', [rt]), None), NotImplemented)
self.assertEqual(dispatcher.Dispatch(('foo', 'bar'), None), NotImplemented)
self.assertEqual(
dispatcher.Dispatch(('foo', 'bar', 'baz'), None), NotImplemented)
if __name__ == '__main__':
googletest.main()
|
PythonAPIDispatcherTest
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/destination-google-sheets/destination_google_sheets/buffer.py
|
{
"start": 181,
"end": 4315
}
|
class ____:
# Default instance of AirbyteLogger
logger = AirbyteLogger()
# intervals after which the records_buffer should be cleaned up for selected stream
flush_interval = 500 # records count
flush_interval_size_in_kb = 10 ^ 8 # memory allocation ~ 97656 Kb or 95 Mb
def __init__(self):
# Buffer for input records
self.records_buffer = {}
# Placeholder for streams metadata
self.stream_info = {}
@property
def default_missing(self) -> str:
"""
Default value for missing keys in record stream, compared to configured_stream catalog.
Overwrite if needed.
"""
return ""
def init_buffer_stream(self, configured_stream: AirbyteStream):
"""
Saves important stream's information for later use.
Particulary, creates the data structure for `records_stream`.
Populates `stream_info` placeholder with stream metadata information.
"""
stream = configured_stream.stream
self.records_buffer[stream.name] = []
self.stream_info[stream.name] = {
"headers": sorted(list(stream.json_schema.get("properties").keys())),
"is_set": False,
}
def add_to_buffer(self, stream_name: str, record: Mapping):
"""
Populates input records to `records_buffer`.
1) normalizes input record
2) coerces normalized record to str
3) gets values as list of record values from record mapping.
"""
norm_record = self._normalize_record(stream_name, record)
norm_values = list(map(str, norm_record.values()))
self.records_buffer[stream_name].append(norm_values)
def clear_buffer(self, stream_name: str):
"""
Cleans up the `records_buffer` values, belonging to input stream.
"""
self.records_buffer[stream_name].clear()
def _normalize_record(self, stream_name: str, record: Mapping) -> Mapping[str, Any]:
"""
Updates the record keys up to the input configured_stream catalog keys.
Handles two scenarios:
1) when record has less keys than catalog declares (undersetting)
2) when record has more keys than catalog declares (oversetting)
Returns: alphabetically sorted, catalog-normalized Mapping[str, Any].
EXAMPLE:
- UnderSetting:
* Catalog:
- has 3 entities:
[ 'id', 'key1', 'key2' ]
^
* Input record:
- missing 1 entity, compare to catalog
{ 'id': 123, 'key2': 'value' }
^
* Result:
- 'key1' has been added to the record, because it was declared in catalog, to keep the data structure.
{'id': 123, 'key1': '', {'key2': 'value'} }
^
- OverSetting:
* Catalog:
- has 3 entities:
[ 'id', 'key1', 'key2', ]
^
* Input record:
- doesn't have entity 'key1'
- has 1 more enitity, compare to catalog 'key3'
{ 'id': 123, ,'key2': 'value', 'key3': 'value' }
^ ^
* Result:
- 'key1' was added, because it expected be the part of the record, to keep the data structure
- 'key3' was dropped, because it was not declared in catalog, to keep the data structure
{ 'id': 123, 'key1': '', 'key2': 'value', }
^ ^
"""
headers = self.stream_info[stream_name]["headers"]
# undersetting scenario
[record.update({key: self.default_missing}) for key in headers if key not in record.keys()]
# oversetting scenario
[record.pop(key) for key in record.copy().keys() if key not in headers]
return dict(sorted(record.items(), key=lambda x: x[0]))
|
WriteBufferMixin
|
python
|
tensorflow__tensorflow
|
tensorflow/core/function/polymorphism/function_cache_test.py
|
{
"start": 2072,
"end": 2302
}
|
class ____(MockGenericType):
def most_specific_common_supertype(self, others):
if self._object == 2 and isinstance(others[0]._object, int):
return MockSupertypes2With3(3)
else:
return None
|
MockSupertypes2With3
|
python
|
django-extensions__django-extensions
|
tests/testapp/models.py
|
{
"start": 7976,
"end": 8081
}
|
class ____(SluggedTestModel):
class Meta:
app_label = "django_extensions"
|
ChildSluggedTestModel
|
python
|
huggingface__transformers
|
src/transformers/models/oneformer/modeling_oneformer.py
|
{
"start": 78594,
"end": 80710
}
|
class ____(nn.Module):
def __init__(
self, embed_dim, num_heads, dropout=0.0, activation="relu", normalize_before=False, layer_norm_eps=1e-05
):
super().__init__()
self.self_attn = OneFormerAttention(embed_dim=embed_dim, num_heads=num_heads, dropout=dropout, is_decoder=True)
self.norm = nn.LayerNorm(embed_dim, eps=layer_norm_eps)
self.dropout = nn.Dropout(dropout)
self.activation = ACT2FN[activation]
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(
self,
output,
output_mask: Optional[Tensor] = None,
output_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
output2, attention_weights = self.self_attn(
hidden_states=output, position_embeddings=query_pos, attention_mask=output_mask, output_attentions=True
)
output = output + self.dropout(output2)
output = self.norm(output)
return output, attention_weights
def forward_pre(
self,
output,
output_mask: Optional[Tensor] = None,
output_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
output2 = self.norm(output)
output2, attention_weights = self.self_attn(
hidden_states=output2, position_embeddings=query_pos, attention_mask=output_mask, output_attentions=True
)
output = output + self.dropout(output2)
return output, attention_weights
def forward(
self,
output,
output_mask: Optional[Tensor] = None,
output_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
if self.normalize_before:
return self.forward_pre(output, output_mask, output_key_padding_mask, query_pos)
return self.forward_post(output, output_mask, output_key_padding_mask, query_pos)
|
OneFormerTransformerDecoderSelfAttentionLayer
|
python
|
lepture__authlib
|
authlib/integrations/flask_oauth2/requests.py
|
{
"start": 305,
"end": 675
}
|
class ____(OAuth2Payload):
def __init__(self, request: Request):
self._request = request
@property
def data(self):
return self._request.values
@cached_property
def datalist(self):
values = defaultdict(list)
for k in self.data:
values[k].extend(self.data.getlist(k))
return values
|
FlaskOAuth2Payload
|
python
|
django__django
|
tests/generic_inline_admin/models.py
|
{
"start": 1651,
"end": 1693
}
|
class ____(Episode):
pass
|
EpisodePermanent
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/strings_ops/reduce_join_op_test.py
|
{
"start": 2333,
"end": 3280
}
|
class ____(UnicodeTestCase):
"""Tests for helper functions."""
def testInputArray(self):
num_dims = 3
truth = ["{:03b}".format(i) for i in range(2**num_dims)]
output_array = _input_array(num_dims).reshape([-1])
self.assertAllEqualUnicode(truth, output_array)
def testJoinedArray(self):
num_dims = 3
truth_dim_zero = [["000100", "001101"], ["010110", "011111"]]
truth_dim_one = [["000010", "001011"], ["100110", "101111"]]
truth_dim_two = [["000001", "010011"], ["100101", "110111"]]
output_array_dim_zero = _joined_array(num_dims, reduce_dim=0)
output_array_dim_one = _joined_array(num_dims, reduce_dim=1)
output_array_dim_two = _joined_array(num_dims, reduce_dim=2)
self.assertAllEqualUnicode(truth_dim_zero, output_array_dim_zero)
self.assertAllEqualUnicode(truth_dim_one, output_array_dim_one)
self.assertAllEqualUnicode(truth_dim_two, output_array_dim_two)
|
ReduceJoinTestHelperTest
|
python
|
numpy__numpy
|
numpy/f2py/auxfuncs.py
|
{
"start": 14951,
"end": 26920
}
|
class ____:
def __init__(self, mess):
self.mess = mess
def __call__(self, var):
mess = f'\n\n var = {var}\n Message: {self.mess}\n'
raise F2PYError(mess)
def l_and(*f):
l1, l2 = 'lambda v', []
for i in range(len(f)):
l1 = '%s,f%d=f[%d]' % (l1, i, i)
l2.append('f%d(v)' % (i))
return eval(f"{l1}:{' and '.join(l2)}")
def l_or(*f):
l1, l2 = 'lambda v', []
for i in range(len(f)):
l1 = '%s,f%d=f[%d]' % (l1, i, i)
l2.append('f%d(v)' % (i))
return eval(f"{l1}:{' or '.join(l2)}")
def l_not(f):
return eval('lambda v,f=f:not f(v)')
def isdummyroutine(rout):
try:
return rout['f2pyenhancements']['fortranname'] == ''
except KeyError:
return 0
def getfortranname(rout):
try:
name = rout['f2pyenhancements']['fortranname']
if name == '':
raise KeyError
if not name:
errmess(f"Failed to use fortranname from {rout['f2pyenhancements']}\n")
raise KeyError
except KeyError:
name = rout['name']
return name
def getmultilineblock(rout, blockname, comment=1, counter=0):
try:
r = rout['f2pyenhancements'].get(blockname)
except KeyError:
return
if not r:
return
if counter > 0 and isinstance(r, str):
return
if isinstance(r, list):
if counter >= len(r):
return
r = r[counter]
if r[:3] == "'''":
if comment:
r = '\t/* start ' + blockname + \
' multiline (' + repr(counter) + ') */\n' + r[3:]
else:
r = r[3:]
if r[-3:] == "'''":
if comment:
r = r[:-3] + '\n\t/* end multiline (' + repr(counter) + ')*/'
else:
r = r[:-3]
else:
errmess(f"{blockname} multiline block should end with `'''`: {repr(r)}\n")
return r
def getcallstatement(rout):
return getmultilineblock(rout, 'callstatement')
def getcallprotoargument(rout, cb_map={}):
r = getmultilineblock(rout, 'callprotoargument', comment=0)
if r:
return r
if hascallstatement(rout):
outmess(
'warning: callstatement is defined without callprotoargument\n')
return
from .capi_maps import getctype
arg_types, arg_types2 = [], []
if l_and(isstringfunction, l_not(isfunction_wrap))(rout):
arg_types.extend(['char*', 'size_t'])
for n in rout['args']:
var = rout['vars'][n]
if isintent_callback(var):
continue
if n in cb_map:
ctype = cb_map[n] + '_typedef'
else:
ctype = getctype(var)
if l_and(isintent_c, l_or(isscalar, iscomplex))(var):
pass
elif isstring(var):
pass
elif not isattr_value(var):
ctype = ctype + '*'
if (isstring(var)
or isarrayofstrings(var) # obsolete?
or isstringarray(var)):
arg_types2.append('size_t')
arg_types.append(ctype)
proto_args = ','.join(arg_types + arg_types2)
if not proto_args:
proto_args = 'void'
return proto_args
def getusercode(rout):
return getmultilineblock(rout, 'usercode')
def getusercode1(rout):
return getmultilineblock(rout, 'usercode', counter=1)
def getpymethoddef(rout):
return getmultilineblock(rout, 'pymethoddef')
def getargs(rout):
sortargs, args = [], []
if 'args' in rout:
args = rout['args']
if 'sortvars' in rout:
for a in rout['sortvars']:
if a in args:
sortargs.append(a)
for a in args:
if a not in sortargs:
sortargs.append(a)
else:
sortargs = rout['args']
return args, sortargs
def getargs2(rout):
sortargs, args = [], rout.get('args', [])
auxvars = [a for a in rout['vars'].keys() if isintent_aux(rout['vars'][a])
and a not in args]
args = auxvars + args
if 'sortvars' in rout:
for a in rout['sortvars']:
if a in args:
sortargs.append(a)
for a in args:
if a not in sortargs:
sortargs.append(a)
else:
sortargs = auxvars + rout['args']
return args, sortargs
def getrestdoc(rout):
if 'f2pymultilines' not in rout:
return None
k = None
if rout['block'] == 'python module':
k = rout['block'], rout['name']
return rout['f2pymultilines'].get(k, None)
def gentitle(name):
ln = (80 - len(name) - 6) // 2
return f"/*{ln * '*'} {name} {ln * '*'}*/"
def flatlist(lst):
if isinstance(lst, list):
return reduce(lambda x, y, f=flatlist: x + f(y), lst, [])
return [lst]
def stripcomma(s):
if s and s[-1] == ',':
return s[:-1]
return s
def replace(str, d, defaultsep=''):
if isinstance(d, list):
return [replace(str, _m, defaultsep) for _m in d]
if isinstance(str, list):
return [replace(_m, d, defaultsep) for _m in str]
for k in 2 * list(d.keys()):
if k == 'separatorsfor':
continue
if 'separatorsfor' in d and k in d['separatorsfor']:
sep = d['separatorsfor'][k]
else:
sep = defaultsep
if isinstance(d[k], list):
str = str.replace(f'#{k}#', sep.join(flatlist(d[k])))
else:
str = str.replace(f'#{k}#', d[k])
return str
def dictappend(rd, ar):
if isinstance(ar, list):
for a in ar:
rd = dictappend(rd, a)
return rd
for k in ar.keys():
if k[0] == '_':
continue
if k in rd:
if isinstance(rd[k], str):
rd[k] = [rd[k]]
if isinstance(rd[k], list):
if isinstance(ar[k], list):
rd[k] = rd[k] + ar[k]
else:
rd[k].append(ar[k])
elif isinstance(rd[k], dict):
if isinstance(ar[k], dict):
if k == 'separatorsfor':
for k1 in ar[k].keys():
if k1 not in rd[k]:
rd[k][k1] = ar[k][k1]
else:
rd[k] = dictappend(rd[k], ar[k])
else:
rd[k] = ar[k]
return rd
def applyrules(rules, d, var={}):
ret = {}
if isinstance(rules, list):
for r in rules:
rr = applyrules(r, d, var)
ret = dictappend(ret, rr)
if '_break' in rr:
break
return ret
if '_check' in rules and (not rules['_check'](var)):
return ret
if 'need' in rules:
res = applyrules({'needs': rules['need']}, d, var)
if 'needs' in res:
cfuncs.append_needs(res['needs'])
for k in rules.keys():
if k == 'separatorsfor':
ret[k] = rules[k]
continue
if isinstance(rules[k], str):
ret[k] = replace(rules[k], d)
elif isinstance(rules[k], list):
ret[k] = []
for i in rules[k]:
ar = applyrules({k: i}, d, var)
if k in ar:
ret[k].append(ar[k])
elif k[0] == '_':
continue
elif isinstance(rules[k], dict):
ret[k] = []
for k1 in rules[k].keys():
if isinstance(k1, types.FunctionType) and k1(var):
if isinstance(rules[k][k1], list):
for i in rules[k][k1]:
if isinstance(i, dict):
res = applyrules({'supertext': i}, d, var)
i = res.get('supertext', '')
ret[k].append(replace(i, d))
else:
i = rules[k][k1]
if isinstance(i, dict):
res = applyrules({'supertext': i}, d)
i = res.get('supertext', '')
ret[k].append(replace(i, d))
else:
errmess(f'applyrules: ignoring rule {repr(rules[k])}.\n')
if isinstance(ret[k], list):
if len(ret[k]) == 1:
ret[k] = ret[k][0]
if ret[k] == []:
del ret[k]
return ret
_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]+)',
re.I).match
_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]*?'
r'__user__[\w_]*)', re.I).match
def get_f2py_modulename(source):
name = None
with open(source) as f:
for line in f:
m = _f2py_module_name_match(line)
if m:
if _f2py_user_module_name_match(line): # skip *__user__* names
continue
name = m.group('name')
break
return name
def getuseblocks(pymod):
all_uses = []
for inner in pymod['body']:
for modblock in inner['body']:
if modblock.get('use'):
all_uses.extend([x for x in modblock.get("use").keys() if "__" not in x])
return all_uses
def process_f2cmap_dict(f2cmap_all, new_map, c2py_map, verbose=False):
"""
Update the Fortran-to-C type mapping dictionary with new mappings and
return a list of successfully mapped C types.
This function integrates a new mapping dictionary into an existing
Fortran-to-C type mapping dictionary. It ensures that all keys are in
lowercase and validates new entries against a given C-to-Python mapping
dictionary. Redefinitions and invalid entries are reported with a warning.
Parameters
----------
f2cmap_all : dict
The existing Fortran-to-C type mapping dictionary that will be updated.
It should be a dictionary of dictionaries where the main keys represent
Fortran types and the nested dictionaries map Fortran type specifiers
to corresponding C types.
new_map : dict
A dictionary containing new type mappings to be added to `f2cmap_all`.
The structure should be similar to `f2cmap_all`, with keys representing
Fortran types and values being dictionaries of type specifiers and their
C type equivalents.
c2py_map : dict
A dictionary used for validating the C types in `new_map`. It maps C
types to corresponding Python types and is used to ensure that the C
types specified in `new_map` are valid.
verbose : boolean
A flag used to provide information about the types mapped
Returns
-------
tuple of (dict, list)
The updated Fortran-to-C type mapping dictionary and a list of
successfully mapped C types.
"""
f2cmap_mapped = []
new_map_lower = {}
for k, d1 in new_map.items():
d1_lower = {k1.lower(): v1 for k1, v1 in d1.items()}
new_map_lower[k.lower()] = d1_lower
for k, d1 in new_map_lower.items():
if k not in f2cmap_all:
f2cmap_all[k] = {}
for k1, v1 in d1.items():
if v1 in c2py_map:
if k1 in f2cmap_all[k]:
outmess(
"\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n"
% (k, k1, f2cmap_all[k][k1], v1)
)
f2cmap_all[k][k1] = v1
if verbose:
outmess(f'\tMapping "{k}(kind={k1})" to "{v1}\"\n')
f2cmap_mapped.append(v1)
elif verbose:
errmess(
"\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n"
% (k, k1, v1, v1, list(c2py_map.keys()))
)
return f2cmap_all, f2cmap_mapped
|
throw_error
|
python
|
pytorch__pytorch
|
test/inductor/test_cooperative_reductions.py
|
{
"start": 731,
"end": 1664
}
|
class ____(InductorChoices):
def __init__(self, *, cooperative: bool, persistent: bool, cfg: dict[str, int]):
super().__init__()
self.cooperative = cooperative
self.persistent = persistent
self.cfg = cfg
self.call_count = 0
def triton_kernel_kwargs(
self,
kernel_cls: type[TritonKernel],
features: SIMDKernelFeatures,
groups: list[sympy.Expr],
kernel_kwargs: dict[str, Any],
) -> dict[str, Any]:
self.call_count += 1
return {
**kernel_kwargs,
"override_cooperative_reduction": self.cooperative,
"override_persistent_reduction": self.persistent,
"fixed_config": FixedTritonConfig(self.cfg),
}
@config.patch(
{
"triton.cooperative_reductions": True,
"triton.force_cooperative_reductions": True,
}
)
@instantiate_parametrized_tests
|
TestingHeuristics
|
python
|
neetcode-gh__leetcode
|
python/0518-coin-change-ii.py
|
{
"start": 0,
"end": 1456
}
|
class ____:
def change(self, amount: int, coins: List[int]) -> int:
# MEMOIZATION
# Time: O(n*m)
# Memory: O(n*m)
cache = {}
def dfs(i, a):
if a == amount:
return 1
if a > amount:
return 0
if i == len(coins):
return 0
if (i, a) in cache:
return cache[(i, a)]
cache[(i, a)] = dfs(i, a + coins[i]) + dfs(i + 1, a)
return cache[(i, a)]
return dfs(0, 0)
# DYNAMIC PROGRAMMING
# Time: O(n*m)
# Memory: O(n*m)
dp = [[0] * (len(coins) + 1) for i in range(amount + 1)]
dp[0] = [1] * (len(coins) + 1)
for a in range(1, amount + 1):
for i in range(len(coins) - 1, -1, -1):
dp[a][i] = dp[a][i + 1]
if a - coins[i] >= 0:
dp[a][i] += dp[a - coins[i]][i]
return dp[amount][0]
# DYNAMIC PROGRAMMING
# Time: O(n*m)
# Memory: O(n) where n = amount
dp = [0] * (amount + 1)
dp[0] = 1
for i in range(len(coins) - 1, -1, -1):
nextDP = [0] * (amount + 1)
nextDP[0] = 1
for a in range(1, amount + 1):
nextDP[a] = dp[a]
if a - coins[i] >= 0:
nextDP[a] += nextDP[a - coins[i]]
dp = nextDP
return dp[amount]
|
Solution
|
python
|
facebookresearch__faiss
|
tests/test_build_blocks.py
|
{
"start": 13773,
"end": 15554
}
|
class ____(unittest.TestCase):
def do_test_bucket_sort(self, nt):
rs = np.random.RandomState(123)
tab = rs.randint(100, size=1000, dtype='int64')
lims, perm = faiss.bucket_sort(tab, nt=nt)
for i in range(max(tab) + 1):
assert np.all(tab[perm[lims[i]: lims[i + 1]]] == i)
def test_bucket_sort(self):
self.do_test_bucket_sort(0)
def test_bucket_sort_parallel(self):
self.do_test_bucket_sort(4)
def do_test_bucket_sort_inplace(
self, nt, nrow=500, ncol=20, nbucket=300, repro=False,
dtype='int32'):
rs = np.random.RandomState(123)
tab = rs.randint(nbucket, size=(nrow, ncol), dtype=dtype)
tab2 = tab.copy()
faiss.cvar.bucket_sort_verbose
faiss.cvar.bucket_sort_verbose = 1
lims = faiss.matrix_bucket_sort_inplace(tab2, nt=nt)
tab2 = tab2.ravel()
for b in range(nbucket):
rows, _ = np.where(tab == b)
rows.sort()
tab2[lims[b]:lims[b + 1]].sort()
rows = set(rows)
self.assertEqual(rows, set(tab2[lims[b]:lims[b + 1]]))
def test_bucket_sort_inplace(self):
self.do_test_bucket_sort_inplace(0)
def test_bucket_sort_inplace_parallel(self):
self.do_test_bucket_sort_inplace(4)
def test_bucket_sort_inplace_parallel_fewcol(self):
self.do_test_bucket_sort_inplace(4, ncol=3)
def test_bucket_sort_inplace_parallel_fewbucket(self):
self.do_test_bucket_sort_inplace(4, nbucket=5)
def test_bucket_sort_inplace_int64(self):
self.do_test_bucket_sort_inplace(0, dtype='int64')
def test_bucket_sort_inplace_parallel_int64(self):
self.do_test_bucket_sort_inplace(4, dtype='int64')
|
TestBucketSort
|
python
|
getsentry__sentry
|
tests/sentry/core/endpoints/test_project_details.py
|
{
"start": 18794,
"end": 59056
}
|
class ____(APITestCase):
endpoint = "sentry-api-0-project-details"
method = "put"
def setUp(self) -> None:
super().setUp()
self.org_slug = self.project.organization.slug
self.proj_slug = self.project.slug
self.login_as(user=self.user)
def test_superuser_simple(self) -> None:
superuser = self.create_user(is_superuser=True)
self.login_as(user=superuser, superuser=True)
self.get_success_response(self.org_slug, self.proj_slug, platform="native")
project = Project.objects.get(id=self.project.id)
assert project.platform == "native"
def test_staff_simple(self) -> None:
superuser = self.create_user(is_superuser=True)
self.login_as(user=superuser, superuser=True)
self.get_success_response(self.org_slug, self.proj_slug, platform="native")
project = Project.objects.get(id=self.project.id)
assert project.platform == "native"
def test_blank_subject_prefix(self) -> None:
project = Project.objects.get(id=self.project.id)
options = {"mail:subject_prefix": "[Sentry]"}
self.get_success_response(self.org_slug, self.proj_slug, options=options)
assert project.get_option("mail:subject_prefix") == "[Sentry]"
options["mail:subject_prefix"] = ""
self.get_success_response(self.org_slug, self.proj_slug, options=options)
assert project.get_option("mail:subject_prefix") == ""
def test_simple_member_restriction(self) -> None:
project = self.create_project()
user = self.create_user("bar@example.com")
self.create_member(
user=user,
organization=project.organization,
teams=[project.teams.first()],
role="member",
)
self.login_as(user)
self.get_error_response(
self.org_slug,
self.proj_slug,
slug="zzz",
isBookmarked="true",
status_code=403,
)
assert not ProjectBookmark.objects.filter(
user_id=user.id, project_id=self.project.id
).exists()
def test_member_changes_permission_denied(self) -> None:
project = self.create_project()
user = self.create_user("bar@example.com")
self.create_member(
user=user,
organization=project.organization,
teams=[project.teams.first()],
role="member",
)
self.login_as(user=user)
self.get_error_response(
self.org_slug,
self.proj_slug,
slug="zzz",
isBookmarked="true",
status_code=403,
)
assert Project.objects.get(id=project.id).slug != "zzz"
assert not ProjectBookmark.objects.filter(user_id=user.id, project_id=project.id).exists()
@with_feature("organizations:team-roles")
def test_member_with_team_role(self) -> None:
user = self.create_user("bar@example.com")
self.create_member(
user=user,
organization=self.organization,
role="member",
)
team = self.create_team(organization=self.organization)
project = self.create_project(teams=[team])
self.create_team_membership(user=user, team=team, role="admin")
self.login_as(user=user)
self.get_success_response(
self.organization.slug,
project.slug,
slug="zzz",
isBookmarked="true",
)
assert Project.objects.get(id=project.id).slug == "zzz"
assert ProjectBookmark.objects.filter(user_id=user.id, project_id=project.id).exists()
def test_name(self) -> None:
self.get_success_response(self.org_slug, self.proj_slug, name="hello world")
project = Project.objects.get(id=self.project.id)
assert project.name == "hello world"
def test_slug(self) -> None:
with outbox_runner():
self.get_success_response(self.org_slug, self.proj_slug, slug="foobar")
project = Project.objects.get(id=self.project.id)
assert project.slug == "foobar"
assert ProjectRedirect.objects.filter(project=self.project, redirect_slug=self.proj_slug)
with assume_test_silo_mode(SiloMode.CONTROL):
assert AuditLogEntry.objects.filter(
organization_id=project.organization_id,
event=audit_log.get_event_id("PROJECT_EDIT"),
).exists()
def test_invalid_slug(self) -> None:
new_project = self.create_project()
self.get_error_response(
self.org_slug,
self.proj_slug,
slug=new_project.slug,
status_code=400,
)
project = Project.objects.get(id=self.project.id)
assert project.slug != new_project.slug
def test_invalid_numeric_slug(self) -> None:
response = self.get_error_response(
self.org_slug,
self.proj_slug,
slug="1234",
status_code=400,
)
assert response.data["slug"][0] == DEFAULT_SLUG_ERROR_MESSAGE
def test_reserved_slug(self) -> None:
self.get_error_response(
self.org_slug,
self.proj_slug,
slug=list(RESERVED_PROJECT_SLUGS)[0],
status_code=400,
)
def test_platform(self) -> None:
self.get_success_response(self.org_slug, self.proj_slug, platform="python")
project = Project.objects.get(id=self.project.id)
assert project.platform == "python"
def test_platform_invalid(self) -> None:
self.get_error_response(self.org_slug, self.proj_slug, platform="lol", status_code=400)
def test_options(self) -> None:
options: dict[str, Any] = {
"sentry:resolve_age": 1,
"sentry:scrub_data": False,
"sentry:scrub_defaults": False,
"sentry:sensitive_fields": ["foo", "bar"],
"sentry:safe_fields": ["token"],
"sentry:store_crash_reports": 0,
"sentry:relay_pii_config": '{"applications": {"freeform": []}}',
"sentry:csp_ignored_sources_defaults": False,
"sentry:csp_ignored_sources": "foo\nbar",
"sentry:grouping_config": "some-config",
"filters:blacklisted_ips": "127.0.0.1\n198.51.100.0",
"filters:releases": "1.*\n2.1.*",
"filters:error_messages": "TypeError*\n*: integer division by modulo or zero",
"filters:log_messages": "Updated*\n*.sentry.io",
"filters:trace_metric_names": "counter.*\n*.duration",
"mail:subject_prefix": "[Sentry]",
"sentry:scrub_ip_address": False,
"sentry:origins": "*",
"sentry:scrape_javascript": False,
"sentry:token": "*",
"sentry:token_header": "*",
"sentry:verify_ssl": False,
"sentry:replay_hydration_error_issues": True,
"sentry:toolbar_allowed_origins": "*.sentry.io\nexample.net \nnugettrends.com",
"sentry:replay_rage_click_issues": True,
"sentry:feedback_user_report_notifications": True,
"sentry:feedback_ai_spam_detection": True,
"feedback:branding": False,
"filters:react-hydration-errors": True,
"filters:chunk-load-error": True,
}
with (
self.feature(
[
"projects:custom-inbound-filters",
"organizations:ourlogs-ingestion",
"organizations:tracemetrics-ingestion",
]
),
outbox_runner(),
):
self.get_success_response(self.org_slug, self.proj_slug, options=options)
project = Project.objects.get(id=self.project.id)
assert project.get_option("sentry:origins", []) == options["sentry:origins"].split("\n")
assert project.get_option("sentry:resolve_age", 0) == options["sentry:resolve_age"]
assert project.get_option("sentry:scrub_data", True) == options["sentry:scrub_data"]
with assume_test_silo_mode(SiloMode.CONTROL):
assert AuditLogEntry.objects.filter(
organization_id=project.organization_id,
event=audit_log.get_event_id("PROJECT_EDIT"),
).exists()
assert project.get_option("sentry:scrub_defaults", True) == options["sentry:scrub_defaults"]
with assume_test_silo_mode(SiloMode.CONTROL):
assert AuditLogEntry.objects.filter(
organization_id=project.organization_id,
event=audit_log.get_event_id("PROJECT_EDIT"),
).exists()
assert (
project.get_option("sentry:sensitive_fields", []) == options["sentry:sensitive_fields"]
)
with assume_test_silo_mode(SiloMode.CONTROL):
assert AuditLogEntry.objects.filter(
organization_id=project.organization_id,
event=audit_log.get_event_id("PROJECT_EDIT"),
).exists()
assert project.get_option("sentry:safe_fields", []) == options["sentry:safe_fields"]
assert (
project.get_option("sentry:store_crash_reports")
== options["sentry:store_crash_reports"]
)
assert (
project.get_option("sentry:relay_pii_config", "") == options["sentry:relay_pii_config"]
)
assert project.get_option("sentry:grouping_config", "") == options["sentry:grouping_config"]
with assume_test_silo_mode(SiloMode.CONTROL):
assert AuditLogEntry.objects.filter(
organization_id=project.organization_id,
event=audit_log.get_event_id("PROJECT_EDIT"),
).exists()
assert (
project.get_option("sentry:csp_ignored_sources_defaults", True)
== options["sentry:csp_ignored_sources_defaults"]
)
assert project.get_option("sentry:csp_ignored_sources", []) == options[
"sentry:csp_ignored_sources"
].split("\n")
assert project.get_option("sentry:blacklisted_ips") == ["127.0.0.1", "198.51.100.0"]
assert project.get_option("sentry:releases") == ["1.*", "2.1.*"]
assert project.get_option("sentry:error_messages") == [
"TypeError*",
"*: integer division by modulo or zero",
]
assert project.get_option("sentry:log_messages") == [
"Updated*",
"*.sentry.io",
]
assert project.get_option("sentry:trace_metric_names") == [
"counter.*",
"*.duration",
]
assert project.get_option("mail:subject_prefix", "[Sentry]")
with assume_test_silo_mode(SiloMode.CONTROL):
assert AuditLogEntry.objects.filter(
organization_id=project.organization_id,
event=audit_log.get_event_id("PROJECT_EDIT"),
).exists()
assert project.get_option("sentry:resolve_age", 1)
with assume_test_silo_mode(SiloMode.CONTROL):
assert AuditLogEntry.objects.filter(
organization_id=project.organization_id,
event=audit_log.get_event_id("PROJECT_EDIT"),
).exists()
assert (
project.get_option("sentry:scrub_ip_address", True)
== options["sentry:scrub_ip_address"]
)
with assume_test_silo_mode(SiloMode.CONTROL):
assert AuditLogEntry.objects.filter(
organization_id=project.organization_id,
event=audit_log.get_event_id("PROJECT_EDIT"),
).exists()
assert project.get_option("sentry:origins", "*")
with assume_test_silo_mode(SiloMode.CONTROL):
assert AuditLogEntry.objects.filter(
organization_id=project.organization_id,
event=audit_log.get_event_id("PROJECT_EDIT"),
).exists()
assert (
project.get_option("sentry:scrape_javascript", False)
== options["sentry:scrape_javascript"]
)
with assume_test_silo_mode(SiloMode.CONTROL):
assert AuditLogEntry.objects.filter(
organization_id=project.organization_id,
event=audit_log.get_event_id("PROJECT_EDIT"),
).exists()
assert project.get_option("sentry:token", "*")
with assume_test_silo_mode(SiloMode.CONTROL):
assert AuditLogEntry.objects.filter(
organization_id=project.organization_id,
event=audit_log.get_event_id("PROJECT_EDIT"),
).exists()
assert project.get_option("sentry:token_header", "*")
with assume_test_silo_mode(SiloMode.CONTROL):
assert AuditLogEntry.objects.filter(
organization_id=project.organization_id,
event=audit_log.get_event_id("PROJECT_EDIT"),
).exists()
assert project.get_option("sentry:verify_ssl", False) == options["sentry:verify_ssl"]
with assume_test_silo_mode(SiloMode.CONTROL):
assert AuditLogEntry.objects.filter(
organization_id=project.organization_id,
event=audit_log.get_event_id("PROJECT_EDIT"),
).exists()
assert project.get_option("feedback:branding") == "0"
assert project.get_option("sentry:replay_hydration_error_issues") is True
assert project.get_option("sentry:toolbar_allowed_origins") == [
"*.sentry.io",
"example.net",
"nugettrends.com",
]
assert project.get_option("sentry:replay_rage_click_issues") is True
assert project.get_option("sentry:feedback_user_report_notifications") is True
assert project.get_option("sentry:feedback_ai_spam_detection") is True
with assume_test_silo_mode(SiloMode.CONTROL):
assert AuditLogEntry.objects.filter(
organization_id=project.organization_id,
event=audit_log.get_event_id("PROJECT_EDIT"),
).exists()
assert project.get_option("filters:react-hydration-errors", "1")
assert project.get_option("filters:chunk-load-error", "1")
self.project.update_option(
"relay.cardinality-limiter.limits",
[
{
"limit": {
"id": "project-override-custom",
"window": {"windowSeconds": 3600, "granularitySeconds": 600},
"limit": 1000,
"namespace": "custom",
"scope": "name",
}
}
],
)
def test_bookmarks(self) -> None:
self.get_success_response(self.org_slug, self.proj_slug, isBookmarked="false")
assert not ProjectBookmark.objects.filter(
project_id=self.project.id, user_id=self.user.id
).exists()
def test_security_token(self) -> None:
resp = self.get_success_response(self.org_slug, self.proj_slug, securityToken="fizzbuzz")
assert self.project.get_security_token() == "fizzbuzz"
assert resp.data["securityToken"] == "fizzbuzz"
# can delete
resp = self.get_success_response(self.org_slug, self.proj_slug, securityToken="")
assert self.project.get_security_token() == ""
assert resp.data["securityToken"] == ""
def test_security_token_header(self) -> None:
value = "X-Hello-World"
resp = self.get_success_response(self.org_slug, self.proj_slug, securityTokenHeader=value)
assert self.project.get_option("sentry:token_header") == "X-Hello-World"
assert resp.data["securityTokenHeader"] == "X-Hello-World"
# can delete
resp = self.get_success_response(self.org_slug, self.proj_slug, securityTokenHeader="")
assert self.project.get_option("sentry:token_header") == ""
assert resp.data["securityTokenHeader"] == ""
def test_verify_ssl(self) -> None:
resp = self.get_success_response(self.org_slug, self.proj_slug, verifySSL=False)
assert self.project.get_option("sentry:verify_ssl") is False
assert resp.data["verifySSL"] is False
def test_scrub_ip_address(self) -> None:
resp = self.get_success_response(self.org_slug, self.proj_slug, scrubIPAddresses=True)
assert self.project.get_option("sentry:scrub_ip_address") is True
assert resp.data["scrubIPAddresses"] is True
resp = self.get_success_response(self.org_slug, self.proj_slug, scrubIPAddresses=False)
assert self.project.get_option("sentry:scrub_ip_address") is False
assert resp.data["scrubIPAddresses"] is False
def test_scrape_javascript(self) -> None:
resp = self.get_success_response(self.org_slug, self.proj_slug, scrapeJavaScript=False)
assert self.project.get_option("sentry:scrape_javascript") is False
assert resp.data["scrapeJavaScript"] is False
def test_default_environment(self) -> None:
resp = self.get_success_response(self.org_slug, self.proj_slug, defaultEnvironment="dev")
assert self.project.get_option("sentry:default_environment") == "dev"
assert resp.data["defaultEnvironment"] == "dev"
resp = self.get_success_response(self.org_slug, self.proj_slug, defaultEnvironment="")
assert self.project.get_option("sentry:default_environment") == ""
assert resp.data["defaultEnvironment"] == ""
def test_resolve_age(self) -> None:
resp = self.get_success_response(self.org_slug, self.proj_slug, resolveAge=5)
assert self.project.get_option("sentry:resolve_age") == 5
assert resp.data["resolveAge"] == 5
# can set to 0 or delete
resp = self.get_success_response(self.org_slug, self.proj_slug, resolveAge="")
assert self.project.get_option("sentry:resolve_age") == 0
assert resp.data["resolveAge"] == 0
def test_allowed_domains(self) -> None:
value = ["foobar.com", "https://example.com"]
resp = self.get_success_response(self.org_slug, self.proj_slug, allowedDomains=value)
assert self.project.get_option("sentry:origins") == ["foobar.com", "https://example.com"]
assert resp.data["allowedDomains"] == ["foobar.com", "https://example.com"]
# cannot be empty
resp = self.get_error_response(
self.org_slug, self.proj_slug, allowedDomains="", status_code=400
)
assert self.project.get_option("sentry:origins") == ["foobar.com", "https://example.com"]
assert resp.data["allowedDomains"] == [
"Empty value will block all requests, use * to accept from all domains"
]
resp = self.get_success_response(
self.org_slug,
self.proj_slug,
allowedDomains=["*", ""],
)
assert self.project.get_option("sentry:origins") == ["*"]
assert resp.data["allowedDomains"] == ["*"]
def test_safe_fields(self) -> None:
value = ["foobar", "extra.fields.**"]
resp = self.get_success_response(self.org_slug, self.proj_slug, safeFields=value)
assert self.project.get_option("sentry:safe_fields") == [
"foobar",
"extra.fields.**",
]
assert resp.data["safeFields"] == ["foobar", "extra.fields.**"]
value = ["er ror", "double.**.wildcard.**"]
resp = self.get_error_response(self.org_slug, self.proj_slug, safeFields=value)
assert resp.data["safeFields"] == [
'Invalid syntax near "er ror" (line 1),\nDeep wildcard used more than once (line 2)',
]
def test_highlight_tags(self) -> None:
# Unrelated change returns presets
resp = self.get_success_response(self.org_slug, self.proj_slug)
assert self.project.get_option("sentry:highlight_tags") is None
preset = get_highlight_preset_for_project(self.project)
assert resp.data["highlightTags"] == preset["tags"]
assert resp.data["highlightPreset"] == preset
# Set to custom
highlight_tags = ["bears", "beets", "battlestar_galactica", "blue bugs"]
resp = self.get_success_response(
self.org_slug,
self.proj_slug,
highlightTags=highlight_tags,
)
assert self.project.get_option("sentry:highlight_tags") == highlight_tags
assert resp.data["highlightTags"] == highlight_tags
# Set to empty
resp = self.get_success_response(
self.org_slug,
self.proj_slug,
highlightTags=[],
)
assert self.project.get_option("sentry:highlight_tags") == []
assert resp.data["highlightTags"] == []
assert resp.data["highlightPreset"] == preset
def test_highlight_context(self) -> None:
# Unrelated change returns presets
resp = self.get_success_response(self.org_slug, self.proj_slug)
preset = get_highlight_preset_for_project(self.project)
assert self.project.get_option("sentry:highlight_context") is None
assert resp.data["highlightContext"] == preset["context"]
assert resp.data["highlightPreset"] == preset
# Set to custom with different separators
highlight_context_types = ["birdwords", "bird-words", "bird words"]
for highlight_context_type in highlight_context_types:
highlight_context_value = [
"red",
"robin",
"blue",
"jay",
"red",
"blue",
"canadian goose",
]
highlight_context = {highlight_context_type: highlight_context_value}
resp = self.get_success_response(
self.org_slug,
self.proj_slug,
highlightContext=highlight_context,
)
option_result = self.project.get_option("sentry:highlight_context")
resp_result = resp.data["highlightContext"]
for highlight_context_key in highlight_context[highlight_context_type]:
assert highlight_context_key in option_result[highlight_context_type]
assert highlight_context_key in resp_result[highlight_context_type]
# Filters duplicates
assert (
len(option_result[highlight_context_type])
== len(resp_result[highlight_context_type])
== 5
)
# Set to empty
resp = self.get_success_response(
self.org_slug,
self.proj_slug,
highlightContext={},
)
assert self.project.get_option("sentry:highlight_context") == {}
assert resp.data["highlightContext"] == {}
assert resp.data["highlightPreset"] == preset
# Checking validation
resp = self.get_error_response(
self.org_slug,
self.proj_slug,
highlightContext=["bird-words", ["red", "blue"]],
)
assert "Expected a dictionary" in resp.data["highlightContext"][0]
resp = self.get_error_response(
self.org_slug,
self.proj_slug,
highlightContext={"": ["empty", "context", "type"]},
)
assert "Key '' is invalid" in resp.data["highlightContext"][0]
resp = self.get_error_response(
self.org_slug,
self.proj_slug,
highlightContext={"! {} #$%$?": ["empty", "context", "type"]},
)
assert "Key '! {} #$%$?' is invalid" in resp.data["highlightContext"][0]
resp = self.get_error_response(
self.org_slug,
self.proj_slug,
highlightContext={"bird \n words": ["empty", "context", "type"]},
)
assert "Key 'bird \n words' is invalid" in resp.data["highlightContext"][0]
resp = self.get_error_response(
self.org_slug,
self.proj_slug,
highlightContext={"bird-words": ["invalid", 123, "integer"]},
)
assert "must be a list of strings" in resp.data["highlightContext"][0]
def test_store_crash_reports(self) -> None:
resp = self.get_success_response(self.org_slug, self.proj_slug, storeCrashReports=10)
assert self.project.get_option("sentry:store_crash_reports") == 10
assert resp.data["storeCrashReports"] == 10
def test_store_crash_reports_exceeded(self) -> None:
# NB: Align with test_organization_details.py
data = {"storeCrashReports": 101}
resp = self.get_error_response(self.org_slug, self.proj_slug, status_code=400, **data)
assert self.project.get_option("sentry:store_crash_reports") is None
assert b"storeCrashReports" in resp.content
def test_store_crash_reports_inherit_organization_settings(self) -> None:
resp = self.get_success_response(self.org_slug, self.proj_slug, storeCrashReports=None)
assert self.project.get_option("sentry:store_crash_reports") is None
assert resp.data["storeCrashReports"] is None
def test_debug_files_role(self) -> None:
# Test setting a valid role
resp = self.get_success_response(self.org_slug, self.proj_slug, debugFilesRole="admin")
assert self.project.get_option("sentry:debug_files_role") == "admin"
assert resp.data["debugFilesRole"] == "admin"
# Test setting another valid role
resp = self.get_success_response(self.org_slug, self.proj_slug, debugFilesRole="member")
assert self.project.get_option("sentry:debug_files_role") == "member"
assert resp.data["debugFilesRole"] == "member"
# Test setting to None (inherit from organization)
resp = self.get_success_response(self.org_slug, self.proj_slug, debugFilesRole=None)
assert self.project.get_option("sentry:debug_files_role") is None
assert resp.data["debugFilesRole"] is None
def test_debug_files_role_invalid(self) -> None:
# Test with invalid role
self.get_error_response(
self.org_slug, self.proj_slug, debugFilesRole="invalid_role", status_code=400
)
assert self.project.get_option("sentry:debug_files_role") is None
def test_react_hydration_errors(self) -> None:
options = {"filters:react-hydration-errors": False}
resp = self.get_success_response(self.org_slug, self.proj_slug, options=options)
assert self.project.get_option("filters:react-hydration-errors") == "0"
assert resp.data["options"]["filters:react-hydration-errors"] is False
options = {"filters:react-hydration-errors": True}
resp = self.get_success_response(self.org_slug, self.proj_slug, options=options)
assert self.project.get_option("filters:react-hydration-errors") == "1"
assert resp.data["options"]["filters:react-hydration-errors"] is True
def test_chunk_load_error(self) -> None:
options = {"filters:chunk-load-error": False}
resp = self.get_success_response(self.org_slug, self.proj_slug, options=options)
assert self.project.get_option("filters:chunk-load-error") == "0"
assert resp.data["options"]["filters:chunk-load-error"] is False
options = {"filters:chunk-load-error": True}
resp = self.get_success_response(self.org_slug, self.proj_slug, options=options)
assert self.project.get_option("filters:chunk-load-error") == "1"
assert resp.data["options"]["filters:chunk-load-error"] is True
def test_relay_pii_config(self) -> None:
value = '{"applications": {"freeform": []}}'
resp = self.get_success_response(self.org_slug, self.proj_slug, relayPiiConfig=value)
assert self.project.get_option("sentry:relay_pii_config") == value
assert resp.data["relayPiiConfig"] == value
def test_sensitive_fields(self) -> None:
value = ["foobar.com", "https://example.com"]
resp = self.get_success_response(self.org_slug, self.proj_slug, sensitiveFields=value)
assert self.project.get_option("sentry:sensitive_fields") == [
"foobar.com",
"https://example.com",
]
assert resp.data["sensitiveFields"] == ["foobar.com", "https://example.com"]
def test_sensitive_fields_too_long(self) -> None:
value = 1000 * ["0123456789"] + ["1"]
resp = self.get_response(self.org_slug, self.proj_slug, sensitiveFields=value)
assert resp.status_code == 400
def test_data_scrubber(self) -> None:
resp = self.get_success_response(self.org_slug, self.proj_slug, dataScrubber=False)
assert self.project.get_option("sentry:scrub_data") is False
assert resp.data["dataScrubber"] is False
def test_data_scrubber_defaults(self) -> None:
resp = self.get_success_response(self.org_slug, self.proj_slug, dataScrubberDefaults=False)
assert self.project.get_option("sentry:scrub_defaults") is False
assert resp.data["dataScrubberDefaults"] is False
def test_digests_delay(self) -> None:
self.get_success_response(self.org_slug, self.proj_slug, digestsMinDelay=1000)
assert self.project.get_option("digests:mail:minimum_delay") == 1000
self.get_success_response(self.org_slug, self.proj_slug, digestsMaxDelay=1200)
assert self.project.get_option("digests:mail:maximum_delay") == 1200
self.get_success_response(
self.org_slug, self.proj_slug, digestsMinDelay=300, digestsMaxDelay=600
)
assert self.project.get_option("digests:mail:minimum_delay") == 300
assert self.project.get_option("digests:mail:maximum_delay") == 600
def test_digests_min_without_max(self) -> None:
self.get_success_response(self.org_slug, self.proj_slug, digestsMinDelay=1200)
assert self.project.get_option("digests:mail:minimum_delay") == 1200
def test_digests_max_without_min(self) -> None:
self.get_success_response(self.org_slug, self.proj_slug, digestsMaxDelay=1200)
assert self.project.get_option("digests:mail:maximum_delay") == 1200
def test_invalid_digests_min_delay(self) -> None:
min_delay = 120
self.project.update_option("digests:mail:minimum_delay", min_delay)
self.get_error_response(self.org_slug, self.proj_slug, digestsMinDelay=59, status_code=400)
self.get_error_response(
self.org_slug, self.proj_slug, digestsMinDelay=3601, status_code=400
)
assert self.project.get_option("digests:mail:minimum_delay") == min_delay
def test_invalid_digests_max_delay(self) -> None:
min_delay = 120
max_delay = 360
self.project.update_option("digests:mail:minimum_delay", min_delay)
self.project.update_option("digests:mail:maximum_delay", max_delay)
self.get_error_response(self.org_slug, self.proj_slug, digestsMaxDelay=59, status_code=400)
self.get_error_response(
self.org_slug, self.proj_slug, digestsMaxDelay=3601, status_code=400
)
assert self.project.get_option("digests:mail:maximum_delay") == max_delay
# test sending only max
self.get_error_response(self.org_slug, self.proj_slug, digestsMaxDelay=100, status_code=400)
assert self.project.get_option("digests:mail:maximum_delay") == max_delay
# test sending min + invalid max
self.get_error_response(
self.org_slug, self.proj_slug, digestsMinDelay=120, digestsMaxDelay=100, status_code=400
)
assert self.project.get_option("digests:mail:minimum_delay") == min_delay
assert self.project.get_option("digests:mail:maximum_delay") == max_delay
@mock.patch("sentry.api.base.create_audit_entry")
def test_redacted_symbol_source_secrets(self, create_audit_entry: mock.MagicMock) -> None:
with Feature(
{"organizations:symbol-sources": True, "organizations:custom-symbol-sources": True}
):
config = {
"id": "honk",
"name": "honk source",
"layout": {
"type": "native",
},
"filters": {"filetypes": ["pe"]},
"type": "http",
"url": "http://honk.beep",
"username": "honkhonk",
"password": "beepbeep",
}
self.get_success_response(
self.org_slug, self.proj_slug, symbolSources=orjson.dumps([config]).decode()
)
config["id"] = first_symbol_source_id(self.project.get_option("sentry:symbol_sources"))
assert (
self.project.get_option("sentry:symbol_sources") == orjson.dumps([config]).decode()
)
# redact password
redacted_source = config.copy()
redacted_source["password"] = {"hidden-secret": True}
# check that audit entry was created with redacted password
assert create_audit_entry.called
((_, kwargs),) = create_audit_entry.call_args_list
assert kwargs["data"] == {
"sentry:symbol_sources": [redacted_source],
"id": self.project.id,
"slug": self.project.slug,
"name": self.project.name,
"status": self.project.status,
"public": self.project.public,
}
self.get_success_response(
self.org_slug,
self.proj_slug,
symbolSources=orjson.dumps([redacted_source]).decode(),
)
# on save the magic object should be replaced with the previously set password
assert (
self.project.get_option("sentry:symbol_sources") == orjson.dumps([config]).decode()
)
@mock.patch("sentry.api.base.create_audit_entry")
def test_redacted_symbol_source_secrets_unknown_secret(
self, create_audit_entry: mock.MagicMock
) -> None:
with Feature(
{"organizations:symbol-sources": True, "organizations:custom-symbol-sources": True}
):
config = {
"id": "honk",
"name": "honk source",
"layout": {
"type": "native",
},
"filters": {"filetypes": ["pe"]},
"type": "http",
"url": "http://honk.beep",
"username": "honkhonk",
"password": "beepbeep",
}
self.get_success_response(
self.org_slug, self.proj_slug, symbolSources=orjson.dumps([config]).decode()
)
config["id"] = first_symbol_source_id(self.project.get_option("sentry:symbol_sources"))
assert (
self.project.get_option("sentry:symbol_sources") == orjson.dumps([config]).decode()
)
# prepare new call, this secret is not known
new_source = config.copy()
new_source["password"] = {"hidden-secret": True}
new_source["id"] = "oops"
response = self.get_response(
self.org_slug, self.proj_slug, symbolSources=orjson.dumps([new_source]).decode()
)
assert response.status_code == 400
assert orjson.loads(response.content) == {
"symbolSources": ["Hidden symbol source secret is missing a value"]
}
def symbol_sources(self):
project = Project.objects.get(id=self.project.id)
source1 = {
"id": "honk",
"name": "honk source",
"layout": {
"type": "native",
},
"filters": {"filetypes": ["pe"]},
"type": "http",
"url": "http://honk.beep",
"username": "honkhonk",
"password": "beepbeep",
}
source2 = {
"id": "bloop",
"name": "bloop source",
"layout": {
"type": "native",
},
"filters": {"filetypes": ["pe"]},
"type": "http",
"url": "http://honk.beep",
"username": "honkhonk",
"password": "beepbeep",
}
project.update_option("sentry:symbol_sources", orjson.dumps([source1, source2]).decode())
return [source1, source2]
def test_symbol_sources_no_modification(self) -> None:
source1, source2 = self.symbol_sources()
project = Project.objects.get(id=self.project.id)
with Feature({"organizations:custom-symbol-sources": False}):
resp = self.get_response(
self.org_slug,
self.proj_slug,
symbolSources=orjson.dumps([source1, source2]).decode(),
)
assert resp.status_code == 200
assert project.get_option(
"sentry:symbol_sources", orjson.dumps([source1, source2]).decode()
)
def test_symbol_sources_deletion(self) -> None:
source1, source2 = self.symbol_sources()
project = Project.objects.get(id=self.project.id)
with Feature({"organizations:custom-symbol-sources": False}):
resp = self.get_response(
self.org_slug, self.proj_slug, symbolSources=orjson.dumps([source1]).decode()
)
assert resp.status_code == 200
assert project.get_option("sentry:symbol_sources", orjson.dumps([source1]).decode())
@with_feature({"organizations:dynamic-sampling-custom": False})
def test_target_sample_rate_without_feature(self) -> None:
self.project.update_option("sentry:target_sample_rate", 1.0)
self.get_error_response(
self.org_slug, self.proj_slug, targetSampleRate=0.1, status_code=400
)
assert self.project.get_option("sentry:target_sample_rate") == 1.0
@with_feature({"organizations:dynamic-sampling-custom": True})
def test_target_sample_rate_automatic_mode(self) -> None:
self.project.update_option("sentry:target_sample_rate", 1.0)
# automatic mode is called "organization" in code
self.organization.update_option(
"sentry:sampling_mode", DynamicSamplingMode.ORGANIZATION.value
)
self.get_error_response(
self.org_slug, self.proj_slug, targetSampleRate=0.1, status_code=400
)
assert self.project.get_option("sentry:target_sample_rate") == 1.0
@with_feature({"organizations:dynamic-sampling-custom": True})
def test_target_sample_rate_invalid(self) -> None:
self.project.update_option("sentry:target_sample_rate", 1.0)
self.organization.update_option("sentry:sampling_mode", DynamicSamplingMode.PROJECT.value)
self.get_error_response(
self.org_slug, self.proj_slug, targetSampleRate=2.0, status_code=400
)
assert self.project.get_option("sentry:target_sample_rate") == 1.0
@with_feature({"organizations:dynamic-sampling-custom": True})
def test_target_sample_rate(self) -> None:
self.project.update_option("sentry:target_sample_rate", 1.0)
self.organization.update_option("sentry:sampling_mode", DynamicSamplingMode.PROJECT.value)
self.get_success_response(self.org_slug, self.proj_slug, targetSampleRate=0.1)
assert self.project.get_option("sentry:target_sample_rate") == 0.1
def test_no_setting_grouping_configs(self) -> None:
response = self.get_error_response(
self.org_slug, self.proj_slug, groupingConfig="some config", status_code=400
)
assert "Grouping config cannot be manually set" in response.text
response = self.get_error_response(
self.org_slug, self.proj_slug, secondaryGroupingConfig="another config", status_code=400
)
assert "Secondary grouping config cannot be manually set" in response.text
response = self.get_error_response(
self.org_slug, self.proj_slug, secondaryGroupingExpiry=12311121, status_code=400
)
assert "Secondary grouping expiry cannot be manually set" in response.text
|
ProjectUpdateTest
|
python
|
milvus-io__pymilvus
|
pymilvus/orm/schema.py
|
{
"start": 16746,
"end": 24620
}
|
class ____:
def __init__(self, name: str, dtype: DataType, description: str = "", **kwargs) -> None:
self.name = name
try:
dtype = DataType(dtype)
except ValueError:
raise DataTypeNotSupportException(message=ExceptionsMessage.FieldDtype) from None
if dtype == DataType.UNKNOWN:
raise DataTypeNotSupportException(message=ExceptionsMessage.FieldDtype)
self._dtype = dtype
self._description = description
self._type_params = {}
self._kwargs = copy.deepcopy(kwargs)
if not isinstance(kwargs.get("is_primary", False), bool):
raise PrimaryKeyException(message=ExceptionsMessage.IsPrimaryType)
self.is_primary = kwargs.get("is_primary", False)
self.is_dynamic = kwargs.get("is_dynamic", False)
self.nullable = kwargs.get("nullable", False)
self.auto_id = kwargs.get("auto_id", False)
if "auto_id" in kwargs:
if not isinstance(self.auto_id, bool):
raise AutoIDException(message=ExceptionsMessage.AutoIDType)
if not self.is_primary and self.auto_id:
raise PrimaryKeyException(message=ExceptionsMessage.AutoIDOnlyOnPK)
if not isinstance(kwargs.get("is_partition_key", False), bool):
raise PartitionKeyException(message=ExceptionsMessage.IsPartitionKeyType)
if not isinstance(kwargs.get("is_clustering_key", False), bool):
raise ClusteringKeyException(message=ExceptionsMessage.IsClusteringKeyType)
self.is_partition_key = kwargs.get("is_partition_key", False)
self.is_clustering_key = kwargs.get("is_clustering_key", False)
self.default_value = kwargs.get("default_value")
if "default_value" in kwargs and self.default_value is None and not self.nullable:
raise ParamError(message=ExceptionsMessage.DefaultValueInvalid)
if isinstance(self.default_value, schema_types.ValueField):
if self.default_value.WhichOneof("data") is None:
self.default_value = None
else:
self.default_value = infer_default_value_bydata(kwargs.get("default_value"))
self.element_type = kwargs.get("element_type")
if "mmap_enabled" in kwargs:
self._type_params["mmap_enabled"] = kwargs["mmap_enabled"]
for key in ["analyzer_params", "multi_analyzer_params"]:
if key in self._kwargs and isinstance(self._kwargs[key], dict):
self._kwargs[key] = orjson.dumps(self._kwargs[key]).decode(Config.EncodeProtocol)
self._parse_type_params()
self.is_function_output = False
def __repr__(self) -> str:
return str(self.to_dict())
def __deepcopy__(self, memodict: Optional[Dict] = None):
if memodict is None:
memodict = {}
return self.construct_from_dict(self.to_dict())
def _parse_type_params(self):
# update self._type_params according to self._kwargs
if self._dtype not in (
DataType.BINARY_VECTOR,
DataType.FLOAT_VECTOR,
DataType.FLOAT16_VECTOR,
DataType.BFLOAT16_VECTOR,
DataType.VARCHAR,
DataType.ARRAY,
DataType.SPARSE_FLOAT_VECTOR,
DataType.INT8_VECTOR,
DataType._ARRAY_OF_VECTOR,
):
return
if not self._kwargs:
return
if self._kwargs:
for k in COMMON_TYPE_PARAMS:
if k in self._kwargs:
if self._type_params is None:
self._type_params = {}
if isinstance(self._kwargs[k], str):
if self._kwargs[k].lower() == "true":
self._type_params[k] = True
continue
if self._kwargs[k].lower() == "false":
self._type_params[k] = False
continue
self._type_params[k] = self._kwargs[k]
@classmethod
def construct_from_dict(cls, raw: Dict):
kwargs = {}
kwargs.update(raw.get("params", {}))
kwargs["is_primary"] = raw.get("is_primary", False)
if raw.get("auto_id") is not None:
kwargs["auto_id"] = raw.get("auto_id")
kwargs["is_partition_key"] = raw.get("is_partition_key", False)
kwargs["is_clustering_key"] = raw.get("is_clustering_key", False)
if raw.get("default_value") is not None:
kwargs["default_value"] = raw.get("default_value")
kwargs["is_dynamic"] = raw.get("is_dynamic", False)
kwargs["nullable"] = raw.get("nullable", False)
kwargs["element_type"] = raw.get("element_type")
is_function_output = raw.get("is_function_output", False)
fs = FieldSchema(raw["name"], raw["type"], raw.get("description", ""), **kwargs)
fs.is_function_output = is_function_output
return fs
def to_dict(self):
_dict = {
"name": self.name,
"description": self._description,
"type": self.dtype,
}
if self._type_params:
_dict["params"] = copy.deepcopy(self.params)
if self.is_primary:
_dict["is_primary"] = True
_dict["auto_id"] = self.auto_id
if self.is_partition_key:
_dict["is_partition_key"] = True
if self.default_value is not None:
if self.default_value.WhichOneof("data") is None:
self.default_value = None
_dict["default_value"] = self.default_value
if self.is_dynamic:
_dict["is_dynamic"] = self.is_dynamic
if self.nullable:
_dict["nullable"] = self.nullable
if (
self.dtype == DataType.ARRAY or self._dtype == DataType._ARRAY_OF_VECTOR
) and self.element_type:
_dict["element_type"] = self.element_type
if self.is_clustering_key:
_dict["is_clustering_key"] = True
if self.is_function_output:
_dict["is_function_output"] = True
return _dict
def __getattr__(self, item: str):
if self._type_params and item in self._type_params:
return self._type_params[item]
return None
def __eq__(self, other: object):
if not isinstance(other, FieldSchema):
return False
return self.to_dict() == other.to_dict()
@property
def description(self):
"""
Returns the text description of the FieldSchema.
:return str:
FieldSchema description text, returned when the operation is successful.
:example:
>>> from pymilvus import FieldSchema, DataType
>>> field = FieldSchema("int64", DataType.INT64, description="int64", is_primary=False)
>>> field.description
'int64'
"""
return self._description
@property
def params(self):
"""
Returns the parameters of the field.
:return list:
List of the parameter.
:example:
>>> from pymilvus import FieldSchema, DataType
>>> field = FieldSchema("int64", DataType.INT64, description="int64", is_primary=False)
>>> field.params
{}
>>> fvec_field = FieldSchema("fvec", DataType.FLOAT_VECTOR, is_primary=False, dim=128)
>>> fvec_field.params
{'dim': 128}
"""
return self._type_params
@property
def dtype(self) -> DataType:
return self._dtype
def isVectorDataType(datatype: DataType) -> bool:
return datatype in (
DataType.FLOAT_VECTOR,
DataType.FLOAT16_VECTOR,
DataType.BFLOAT16_VECTOR,
DataType.INT8_VECTOR,
DataType.BINARY_VECTOR,
DataType.SPARSE_FLOAT_VECTOR,
)
|
FieldSchema
|
python
|
pytorch__pytorch
|
test/test_dataloader.py
|
{
"start": 128999,
"end": 129500
}
|
class ____(Dataset):
def __init__(self, end: int, slow_index: int):
self.end = end
self.slow_index = slow_index
self._worker_id = None
def __getitem__(self, idx):
if not self._worker_id:
worker_info = torch.utils.data.get_worker_info()
self._worker_id = worker_info.id
if idx == self.slow_index:
time.sleep(1.0)
return (self._worker_id, idx)
def __len__(self):
return self.end
|
TestSlowIndexDataset
|
python
|
scikit-learn__scikit-learn
|
sklearn/impute/_knn.py
|
{
"start": 662,
"end": 14967
}
|
class ____(_BaseImputer):
"""Imputation for completing missing values using k-Nearest Neighbors.
Each sample's missing values are imputed using the mean value from
`n_neighbors` nearest neighbors found in the training set. Two samples are
close if the features that neither is missing are close.
Read more in the :ref:`User Guide <knnimpute>`.
.. versionadded:: 0.22
Parameters
----------
missing_values : int, float, str, np.nan or None, default=np.nan
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed. For pandas' dataframes with
nullable integer dtypes with missing values, `missing_values`
should be set to np.nan, since `pd.NA` will be converted to np.nan.
n_neighbors : int, default=5
Number of neighboring samples to use for imputation.
weights : {'uniform', 'distance'} or callable, default='uniform'
Weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood are
weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- callable : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
metric : {'nan_euclidean'} or callable, default='nan_euclidean'
Distance metric for searching neighbors. Possible values:
- 'nan_euclidean'
- callable : a user-defined function which conforms to the definition
of ``func_metric(x, y, *, missing_values=np.nan)``. `x` and `y`
corresponds to a row (i.e. 1-D arrays) of `X` and `Y`, respectively.
The callable should returns a scalar distance value.
copy : bool, default=True
If True, a copy of X will be created. If False, imputation will
be done in-place whenever possible.
add_indicator : bool, default=False
If True, a :class:`MissingIndicator` transform will stack onto the
output of the imputer's transform. This allows a predictive estimator
to account for missingness despite imputation. If a feature has no
missing values at fit/train time, the feature won't appear on the
missing indicator even if there are missing values at transform/test
time.
keep_empty_features : bool, default=False
If True, features that consist exclusively of missing values when
`fit` is called are returned in results when `transform` is called.
The imputed value is always `0`.
.. versionadded:: 1.2
Attributes
----------
indicator_ : :class:`~sklearn.impute.MissingIndicator`
Indicator used to add binary indicators for missing values.
``None`` if add_indicator is False.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
SimpleImputer : Univariate imputer for completing missing values
with simple strategies.
IterativeImputer : Multivariate imputer that estimates values to impute for
each feature with missing values from all the others.
References
----------
* `Olga Troyanskaya, Michael Cantor, Gavin Sherlock, Pat Brown, Trevor
Hastie, Robert Tibshirani, David Botstein and Russ B. Altman, Missing
value estimation methods for DNA microarrays, BIOINFORMATICS Vol. 17
no. 6, 2001 Pages 520-525.
<https://academic.oup.com/bioinformatics/article/17/6/520/272365>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.impute import KNNImputer
>>> X = [[1, 2, np.nan], [3, 4, 3], [np.nan, 6, 5], [8, 8, 7]]
>>> imputer = KNNImputer(n_neighbors=2)
>>> imputer.fit_transform(X)
array([[1. , 2. , 4. ],
[3. , 4. , 3. ],
[5.5, 6. , 5. ],
[8. , 8. , 7. ]])
For a more detailed example see
:ref:`sphx_glr_auto_examples_impute_plot_missing_values.py`.
"""
_parameter_constraints: dict = {
**_BaseImputer._parameter_constraints,
"n_neighbors": [Interval(Integral, 1, None, closed="left")],
"weights": [StrOptions({"uniform", "distance"}), callable, Hidden(None)],
"metric": [StrOptions(set(_NAN_METRICS)), callable],
"copy": ["boolean"],
}
def __init__(
self,
*,
missing_values=np.nan,
n_neighbors=5,
weights="uniform",
metric="nan_euclidean",
copy=True,
add_indicator=False,
keep_empty_features=False,
):
super().__init__(
missing_values=missing_values,
add_indicator=add_indicator,
keep_empty_features=keep_empty_features,
)
self.n_neighbors = n_neighbors
self.weights = weights
self.metric = metric
self.copy = copy
def _calc_impute(self, dist_pot_donors, n_neighbors, fit_X_col, mask_fit_X_col):
"""Helper function to impute a single column.
Parameters
----------
dist_pot_donors : ndarray of shape (n_receivers, n_potential_donors)
Distance matrix between the receivers and potential donors from
training set. There must be at least one non-nan distance between
a receiver and a potential donor.
n_neighbors : int
Number of neighbors to consider.
fit_X_col : ndarray of shape (n_potential_donors,)
Column of potential donors from training set.
mask_fit_X_col : ndarray of shape (n_potential_donors,)
Missing mask for fit_X_col.
Returns
-------
imputed_values: ndarray of shape (n_receivers,)
Imputed values for receiver.
"""
# Get donors
donors_idx = np.argpartition(dist_pot_donors, n_neighbors - 1, axis=1)[
:, :n_neighbors
]
# Get weight matrix from distance matrix
donors_dist = dist_pot_donors[
np.arange(donors_idx.shape[0])[:, None], donors_idx
]
weight_matrix = _get_weights(donors_dist, self.weights)
# fill nans with zeros
if weight_matrix is not None:
weight_matrix[np.isnan(weight_matrix)] = 0.0
else:
weight_matrix = np.ones_like(donors_dist)
weight_matrix[np.isnan(donors_dist)] = 0.0
# Retrieve donor values and calculate kNN average
donors = fit_X_col.take(donors_idx)
donors_mask = mask_fit_X_col.take(donors_idx)
donors = np.ma.array(donors, mask=donors_mask)
return np.ma.average(donors, axis=1, weights=weight_matrix).data
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the imputer on X.
Parameters
----------
X : array-like shape of (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
The fitted `KNNImputer` class instance.
"""
# Check data integrity and calling arguments
if not is_scalar_nan(self.missing_values):
ensure_all_finite = True
else:
ensure_all_finite = "allow-nan"
X = validate_data(
self,
X,
accept_sparse=False,
dtype=FLOAT_DTYPES,
ensure_all_finite=ensure_all_finite,
copy=self.copy,
)
self._fit_X = X
self._mask_fit_X = _get_mask(self._fit_X, self.missing_values)
self._valid_mask = ~np.all(self._mask_fit_X, axis=0)
super()._fit_indicator(self._mask_fit_X)
return self
def transform(self, X):
"""Impute all missing values in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data to complete.
Returns
-------
X : array-like of shape (n_samples, n_output_features)
The imputed dataset. `n_output_features` is the number of features
that is not always missing during `fit`.
"""
check_is_fitted(self)
if not is_scalar_nan(self.missing_values):
ensure_all_finite = True
else:
ensure_all_finite = "allow-nan"
X = validate_data(
self,
X,
accept_sparse=False,
dtype=FLOAT_DTYPES,
force_writeable=True,
ensure_all_finite=ensure_all_finite,
copy=self.copy,
reset=False,
)
mask = _get_mask(X, self.missing_values)
mask_fit_X = self._mask_fit_X
valid_mask = self._valid_mask
X_indicator = super()._transform_indicator(mask)
# Removes columns where the training data is all nan
if not np.any(mask[:, valid_mask]):
# No missing values in X
if self.keep_empty_features:
Xc = X
Xc[:, ~valid_mask] = 0
else:
Xc = X[:, valid_mask]
# Even if there are no missing values in X, we still concatenate Xc
# with the missing value indicator matrix, X_indicator.
# This is to ensure that the output maintains consistency in terms
# of columns, regardless of whether missing values exist in X or not.
return super()._concatenate_indicator(Xc, X_indicator)
row_missing_idx = np.flatnonzero(mask[:, valid_mask].any(axis=1))
non_missing_fix_X = np.logical_not(mask_fit_X)
# Maps from indices from X to indices in dist matrix
dist_idx_map = np.zeros(X.shape[0], dtype=int)
dist_idx_map[row_missing_idx] = np.arange(row_missing_idx.shape[0])
def process_chunk(dist_chunk, start):
row_missing_chunk = row_missing_idx[start : start + len(dist_chunk)]
# Find and impute missing by column
for col in range(X.shape[1]):
if not valid_mask[col]:
# column was all missing during training
continue
col_mask = mask[row_missing_chunk, col]
if not np.any(col_mask):
# column has no missing values
continue
(potential_donors_idx,) = np.nonzero(non_missing_fix_X[:, col])
# receivers_idx are indices in X
receivers_idx = row_missing_chunk[np.flatnonzero(col_mask)]
# distances for samples that needed imputation for column
dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][
:, potential_donors_idx
]
# receivers with all nan distances impute with mean
all_nan_dist_mask = np.isnan(dist_subset).all(axis=1)
all_nan_receivers_idx = receivers_idx[all_nan_dist_mask]
if all_nan_receivers_idx.size:
col_mean = np.ma.array(
self._fit_X[:, col], mask=mask_fit_X[:, col]
).mean()
X[all_nan_receivers_idx, col] = col_mean
if len(all_nan_receivers_idx) == len(receivers_idx):
# all receivers imputed with mean
continue
# receivers with at least one defined distance
receivers_idx = receivers_idx[~all_nan_dist_mask]
dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][
:, potential_donors_idx
]
n_neighbors = min(self.n_neighbors, len(potential_donors_idx))
value = self._calc_impute(
dist_subset,
n_neighbors,
self._fit_X[potential_donors_idx, col],
mask_fit_X[potential_donors_idx, col],
)
X[receivers_idx, col] = value
# process in fixed-memory chunks
gen = pairwise_distances_chunked(
X[row_missing_idx, :],
self._fit_X,
metric=self.metric,
missing_values=self.missing_values,
ensure_all_finite=ensure_all_finite,
reduce_func=process_chunk,
)
for chunk in gen:
# process_chunk modifies X in place. No return value.
pass
if self.keep_empty_features:
Xc = X
Xc[:, ~valid_mask] = 0
else:
Xc = X[:, valid_mask]
return super()._concatenate_indicator(Xc, X_indicator)
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then the following input feature names are generated:
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
check_is_fitted(self, "n_features_in_")
input_features = _check_feature_names_in(self, input_features)
names = input_features[self._valid_mask]
return self._concatenate_indicator_feature_names_out(names, input_features)
|
KNNImputer
|
python
|
getsentry__sentry
|
src/sentry/integrations/analytics.py
|
{
"start": 517,
"end": 733
}
|
class ____(analytics.Event):
provider: str
id: int
organization_id: int
user_id: int | None = None
default_user_id: int
@analytics.eventclass("integration.issue.linked")
|
IntegrationIssueCreatedEvent
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/asset_health/asset_materialization_health.py
|
{
"start": 1066,
"end": 3770
}
|
class ____(LoadableBy[AssetKey]):
"""Minimal object for computing the health status for the materialization state of an asset.
This object is intended to be small and quick to deserialize. Deserializing AssetMaterializationHealthState
can be slow if there is a large entity subset. Rather than storing entity subsets, we store the number
of partitions in each state. This lets us quickly compute the health status of the asset and create
the metadata required for the UI.
"""
latest_materialization_timestamp: Optional[float]
latest_terminal_run_id: Optional[str]
num_failed_partitions: int
num_currently_materialized_partitions: int
partitions_snap: Optional[PartitionsSnap]
latest_failed_to_materialize_timestamp: Optional[float] = None
@property
def health_status(self) -> AssetHealthStatus:
if self.num_failed_partitions == 0 and self.num_currently_materialized_partitions == 0:
return AssetHealthStatus.UNKNOWN
if self.num_failed_partitions > 0:
return AssetHealthStatus.DEGRADED
else:
return AssetHealthStatus.HEALTHY
@property
def partitions_def(self) -> Optional[PartitionsDefinition]:
if self.partitions_snap is None:
return None
return self.partitions_snap.get_partitions_definition()
@classmethod
def from_asset_materialization_health_state(
cls,
asset_materialization_health_state: "AssetMaterializationHealthState",
) -> "MinimalAssetMaterializationHealthState":
return cls(
latest_materialization_timestamp=asset_materialization_health_state.latest_materialization_timestamp,
latest_terminal_run_id=asset_materialization_health_state.latest_terminal_run_id,
num_failed_partitions=asset_materialization_health_state.failed_subset.size,
num_currently_materialized_partitions=asset_materialization_health_state.currently_materialized_subset.size,
partitions_snap=asset_materialization_health_state.partitions_snap,
latest_failed_to_materialize_timestamp=asset_materialization_health_state.latest_failed_to_materialize_timestamp,
)
@classmethod
def _blocking_batch_load(
cls, keys: Iterable[AssetKey], context: LoadingContext
) -> Iterable[Optional["MinimalAssetMaterializationHealthState"]]:
asset_materialization_health_states = (
context.instance.get_minimal_asset_materialization_health_state_for_assets(list(keys))
)
return [asset_materialization_health_states.get(key) for key in keys]
@whitelist_for_serdes
@record.record
|
MinimalAssetMaterializationHealthState
|
python
|
apache__airflow
|
airflow-core/src/airflow/api_fastapi/auth/managers/models/batch_apis.py
|
{
"start": 1171,
"end": 1392
}
|
class ____(TypedDict, total=False):
"""Represent the parameters of ``is_authorized_connection`` API in the auth manager."""
method: ResourceMethod
details: ConnectionDetails | None
|
IsAuthorizedConnectionRequest
|
python
|
numba__numba
|
numba/core/datamodel/models.py
|
{
"start": 13020,
"end": 15374
}
|
class ____(DataModel):
def __init__(self, dmm, fe_type):
super(UniTupleModel, self).__init__(dmm, fe_type)
self._elem_model = dmm.lookup(fe_type.dtype)
self._count = len(fe_type)
self._value_type = ir.ArrayType(self._elem_model.get_value_type(),
self._count)
self._data_type = ir.ArrayType(self._elem_model.get_data_type(),
self._count)
def get_value_type(self):
return self._value_type
def get_data_type(self):
return self._data_type
def get_return_type(self):
return self.get_value_type()
def get_argument_type(self):
return (self._elem_model.get_argument_type(),) * self._count
def as_argument(self, builder, value):
out = []
for i in range(self._count):
v = builder.extract_value(value, [i])
v = self._elem_model.as_argument(builder, v)
out.append(v)
return out
def from_argument(self, builder, value):
out = ir.Constant(self.get_value_type(), ir.Undefined)
for i, v in enumerate(value):
v = self._elem_model.from_argument(builder, v)
out = builder.insert_value(out, v, [i])
return out
def as_data(self, builder, value):
out = ir.Constant(self.get_data_type(), ir.Undefined)
for i in range(self._count):
val = builder.extract_value(value, [i])
dval = self._elem_model.as_data(builder, val)
out = builder.insert_value(out, dval, [i])
return out
def from_data(self, builder, value):
out = ir.Constant(self.get_value_type(), ir.Undefined)
for i in range(self._count):
val = builder.extract_value(value, [i])
dval = self._elem_model.from_data(builder, val)
out = builder.insert_value(out, dval, [i])
return out
def as_return(self, builder, value):
return value
def from_return(self, builder, value):
return value
def traverse(self, builder):
def getter(i, value):
return builder.extract_value(value, i)
return [(self._fe_type.dtype, partial(getter, i))
for i in range(self._count)]
def inner_models(self):
return [self._elem_model]
|
UniTupleModel
|
python
|
PyCQA__pylint
|
tests/functional/s/super/super_checks.py
|
{
"start": 423,
"end": 627
}
|
class ____:
"""old style"""
def hop(self):
"""hop"""
super(NewAaaa, self).hop() # [no-member]
def __init__(self):
super(Aaaa, self).__init__() # [bad-super-call]
|
NewAaaa
|
python
|
run-llama__llama_index
|
llama-index-integrations/llms/llama-index-llms-openai/llama_index/llms/openai/base.py
|
{
"start": 3055,
"end": 3618
}
|
class ____(Protocol):
"""Tokenizers support an encode function that returns a list of ints."""
def encode(self, text: str) -> List[int]: # fmt: skip
...
def force_single_tool_call(response: ChatResponse) -> None:
tool_calls = [
block for block in response.message.blocks if isinstance(block, ToolCallBlock)
]
if len(tool_calls) > 1:
response.message.blocks = [
block
for block in response.message.blocks
if not isinstance(block, ToolCallBlock)
] + [tool_calls[0]]
|
Tokenizer
|
python
|
keras-team__keras
|
keras/src/utils/backend_utils.py
|
{
"start": 1761,
"end": 6441
}
|
class ____:
"""A class that can be used to switch from one backend to another.
Example:
```python
backend = DynamicBackend("tensorflow")
y = backend.square(tf.constant(...))
backend.set_backend("jax")
y = backend.square(jax.numpy.array(...))
```
Args:
backend: Initial backend to use (string).
"""
def __init__(self, backend=None):
self._backend = backend or backend_module.backend()
def set_backend(self, backend):
if backend not in ("tensorflow", "jax", "torch", "numpy", "openvino"):
raise ValueError(
"Available backends are ('tensorflow', 'jax', 'torch', "
f"'numpy' and 'openvino'). Received: backend={backend}"
)
self._backend = backend
def reset(self):
self._backend = backend_module.backend()
@property
def name(self):
return self._backend
def __getattr__(self, name):
if self._backend == "tensorflow":
module = importlib.import_module("keras.src.backend.tensorflow")
return getattr(module, name)
if self._backend == "jax":
module = importlib.import_module("keras.src.backend.jax")
return getattr(module, name)
if self._backend == "torch":
module = importlib.import_module("keras.src.backend.torch")
return getattr(module, name)
if self._backend == "numpy":
if backend_module.backend() == "numpy":
return getattr(backend_module, name)
else:
raise NotImplementedError(
"Currently, we cannot dynamically import the numpy backend "
"because it would disrupt the namespace of the import."
)
if self._backend == "openvino":
module = importlib.import_module("keras.src.backend.openvino")
return getattr(module, name)
@keras_export("keras.config.set_backend")
def set_backend(backend):
"""Reload the backend (and the Keras package).
Example:
>>> import os
>>> os.environ["KERAS_BACKEND"] = "tensorflow"
>>>
>>> import keras
>>> from keras import ops
>>> type(ops.ones(()))
<class 'tensorflow.python.framework.ops.EagerTensor'>
>>>
>>> keras.config.set_backend("jax")
UserWarning: Using `keras.config.set_backend` is dangerous...
>>> del keras, ops
>>>
>>> import keras
>>> from keras import ops
>>> type(ops.ones(()))
<class 'jaxlib.xla_extension.ArrayImpl'>
⚠️ WARNING ⚠️: Using this function is dangerous and should be done
carefully. Changing the backend will **NOT** convert
the type of any already-instantiated objects.
Thus, any layers / tensors / etc. already created will no
longer be usable without errors. It is strongly recommended **not**
to keep around **any** Keras-originated objects instances created
before calling `set_backend()`.
This includes any function or class instance that uses any Keras
functionality. All such code needs to be re-executed after calling
`set_backend()` and re-importing all imported `keras` modules.
"""
os.environ["KERAS_BACKEND"] = backend
# Clear module cache.
loaded_modules = [
key for key in sys.modules.keys() if key.startswith("keras")
]
for key in loaded_modules:
del sys.modules[key]
# Reimport Keras with the new backend (set via KERAS_BACKEND).
import keras
# Finally: refresh all imported Keras submodules.
globs = copy.copy(globals())
for key, value in globs.items():
if value.__class__ == keras.__class__:
if str(value).startswith("<module 'keras."):
module_name = str(value)
module_name = module_name[module_name.find("'") + 1 :]
module_name = module_name[: module_name.find("'")]
globals()[key] = importlib.import_module(module_name)
warnings.warn(
"Using `keras.config.set_backend` is dangerous and should be done "
"carefully. Already-instantiated objects will not be converted. Thus, "
"any layers / tensors / etc. already created will no longer be usable "
"without errors. It is strongly recommended not to keep around any "
"Keras-originated objects instances created before calling "
"`set_backend()`. This includes any function or class instance that "
"uses any Keras functionality. All such code needs to be re-executed "
"after calling `set_backend()` and re-importing all imported `keras` "
"modules.",
stacklevel=2,
)
|
DynamicBackend
|
python
|
pytorch__pytorch
|
test/test_complex.py
|
{
"start": 403,
"end": 14571
}
|
class ____(TestCase):
@dtypes(*complex_types())
def test_to_list(self, device, dtype):
# test that the complex float tensor has expected values and
# there's no garbage value in the resultant list
self.assertEqual(
torch.zeros((2, 2), device=device, dtype=dtype).tolist(),
[[0j, 0j], [0j, 0j]],
)
@dtypes(torch.float32, torch.float64, torch.float16)
def test_dtype_inference(self, device, dtype):
# issue: https://github.com/pytorch/pytorch/issues/36834
with set_default_dtype(dtype):
x = torch.tensor([3.0, 3.0 + 5.0j], device=device)
if dtype == torch.float16:
self.assertEqual(x.dtype, torch.chalf)
elif dtype == torch.float32:
self.assertEqual(x.dtype, torch.cfloat)
else:
self.assertEqual(x.dtype, torch.cdouble)
@dtypes(*complex_types())
def test_conj_copy(self, device, dtype):
# issue: https://github.com/pytorch/pytorch/issues/106051
x1 = torch.tensor([5 + 1j, 2 + 2j], device=device, dtype=dtype)
xc1 = torch.conj(x1)
x1.copy_(xc1)
self.assertEqual(x1, torch.tensor([5 - 1j, 2 - 2j], device=device, dtype=dtype))
@dtypes(*complex_types())
def test_all(self, device, dtype):
# issue: https://github.com/pytorch/pytorch/issues/120875
x = torch.tensor([1 + 2j, 3 - 4j, 5j, 6], device=device, dtype=dtype)
self.assertTrue(torch.all(x))
@dtypes(*complex_types())
def test_any(self, device, dtype):
# issue: https://github.com/pytorch/pytorch/issues/120875
x = torch.tensor(
[0, 0j, -0 + 0j, -0 - 0j, 0 + 0j, 0 - 0j], device=device, dtype=dtype
)
self.assertFalse(torch.any(x))
@onlyCPU
@dtypes(*complex_types())
def test_eq(self, device, dtype):
"Test eq on complex types"
nan = float("nan")
# Non-vectorized operations
for a, b in (
(
torch.tensor([-0.0610 - 2.1172j], device=device, dtype=dtype),
torch.tensor([-6.1278 - 8.5019j], device=device, dtype=dtype),
),
(
torch.tensor([-0.0610 - 2.1172j], device=device, dtype=dtype),
torch.tensor([-6.1278 - 2.1172j], device=device, dtype=dtype),
),
(
torch.tensor([-0.0610 - 2.1172j], device=device, dtype=dtype),
torch.tensor([-0.0610 - 8.5019j], device=device, dtype=dtype),
),
):
actual = torch.eq(a, b)
expected = torch.tensor([False], device=device, dtype=torch.bool)
self.assertEqual(
actual, expected, msg=f"\neq\nactual {actual}\nexpected {expected}"
)
actual = torch.eq(a, a)
expected = torch.tensor([True], device=device, dtype=torch.bool)
self.assertEqual(
actual, expected, msg=f"\neq\nactual {actual}\nexpected {expected}"
)
actual = torch.full_like(b, complex(2, 2))
torch.eq(a, b, out=actual)
expected = torch.tensor([complex(0)], device=device, dtype=dtype)
self.assertEqual(
actual, expected, msg=f"\neq(out)\nactual {actual}\nexpected {expected}"
)
actual = torch.full_like(b, complex(2, 2))
torch.eq(a, a, out=actual)
expected = torch.tensor([complex(1)], device=device, dtype=dtype)
self.assertEqual(
actual, expected, msg=f"\neq(out)\nactual {actual}\nexpected {expected}"
)
# Vectorized operations
for a, b in (
(
torch.tensor(
[
-0.0610 - 2.1172j,
5.1576 + 5.4775j,
complex(2.8871, nan),
-6.6545 - 3.7655j,
-2.7036 - 1.4470j,
0.3712 + 7.989j,
-0.0610 - 2.1172j,
5.1576 + 5.4775j,
complex(nan, -3.2650),
-6.6545 - 3.7655j,
-2.7036 - 1.4470j,
0.3712 + 7.989j,
],
device=device,
dtype=dtype,
),
torch.tensor(
[
-6.1278 - 8.5019j,
0.5886 + 8.8816j,
complex(2.8871, nan),
6.3505 + 2.2683j,
0.3712 + 7.9659j,
0.3712 + 7.989j,
-6.1278 - 2.1172j,
5.1576 + 8.8816j,
complex(nan, -3.2650),
6.3505 + 2.2683j,
0.3712 + 7.9659j,
0.3712 + 7.989j,
],
device=device,
dtype=dtype,
),
),
):
actual = torch.eq(a, b)
expected = torch.tensor(
[
False,
False,
False,
False,
False,
True,
False,
False,
False,
False,
False,
True,
],
device=device,
dtype=torch.bool,
)
self.assertEqual(
actual, expected, msg=f"\neq\nactual {actual}\nexpected {expected}"
)
actual = torch.eq(a, a)
expected = torch.tensor(
[
True,
True,
False,
True,
True,
True,
True,
True,
False,
True,
True,
True,
],
device=device,
dtype=torch.bool,
)
self.assertEqual(
actual, expected, msg=f"\neq\nactual {actual}\nexpected {expected}"
)
actual = torch.full_like(b, complex(2, 2))
torch.eq(a, b, out=actual)
expected = torch.tensor(
[
complex(0),
complex(0),
complex(0),
complex(0),
complex(0),
complex(1),
complex(0),
complex(0),
complex(0),
complex(0),
complex(0),
complex(1),
],
device=device,
dtype=dtype,
)
self.assertEqual(
actual, expected, msg=f"\neq(out)\nactual {actual}\nexpected {expected}"
)
actual = torch.full_like(b, complex(2, 2))
torch.eq(a, a, out=actual)
expected = torch.tensor(
[
complex(1),
complex(1),
complex(0),
complex(1),
complex(1),
complex(1),
complex(1),
complex(1),
complex(0),
complex(1),
complex(1),
complex(1),
],
device=device,
dtype=dtype,
)
self.assertEqual(
actual, expected, msg=f"\neq(out)\nactual {actual}\nexpected {expected}"
)
@onlyCPU
@dtypes(*complex_types())
def test_ne(self, device, dtype):
"Test ne on complex types"
nan = float("nan")
# Non-vectorized operations
for a, b in (
(
torch.tensor([-0.0610 - 2.1172j], device=device, dtype=dtype),
torch.tensor([-6.1278 - 8.5019j], device=device, dtype=dtype),
),
(
torch.tensor([-0.0610 - 2.1172j], device=device, dtype=dtype),
torch.tensor([-6.1278 - 2.1172j], device=device, dtype=dtype),
),
(
torch.tensor([-0.0610 - 2.1172j], device=device, dtype=dtype),
torch.tensor([-0.0610 - 8.5019j], device=device, dtype=dtype),
),
):
actual = torch.ne(a, b)
expected = torch.tensor([True], device=device, dtype=torch.bool)
self.assertEqual(
actual, expected, msg=f"\nne\nactual {actual}\nexpected {expected}"
)
actual = torch.ne(a, a)
expected = torch.tensor([False], device=device, dtype=torch.bool)
self.assertEqual(
actual, expected, msg=f"\nne\nactual {actual}\nexpected {expected}"
)
actual = torch.full_like(b, complex(2, 2))
torch.ne(a, b, out=actual)
expected = torch.tensor([complex(1)], device=device, dtype=dtype)
self.assertEqual(
actual, expected, msg=f"\nne(out)\nactual {actual}\nexpected {expected}"
)
actual = torch.full_like(b, complex(2, 2))
torch.ne(a, a, out=actual)
expected = torch.tensor([complex(0)], device=device, dtype=dtype)
self.assertEqual(
actual, expected, msg=f"\nne(out)\nactual {actual}\nexpected {expected}"
)
# Vectorized operations
for a, b in (
(
torch.tensor(
[
-0.0610 - 2.1172j,
5.1576 + 5.4775j,
complex(2.8871, nan),
-6.6545 - 3.7655j,
-2.7036 - 1.4470j,
0.3712 + 7.989j,
-0.0610 - 2.1172j,
5.1576 + 5.4775j,
complex(nan, -3.2650),
-6.6545 - 3.7655j,
-2.7036 - 1.4470j,
0.3712 + 7.989j,
],
device=device,
dtype=dtype,
),
torch.tensor(
[
-6.1278 - 8.5019j,
0.5886 + 8.8816j,
complex(2.8871, nan),
6.3505 + 2.2683j,
0.3712 + 7.9659j,
0.3712 + 7.989j,
-6.1278 - 2.1172j,
5.1576 + 8.8816j,
complex(nan, -3.2650),
6.3505 + 2.2683j,
0.3712 + 7.9659j,
0.3712 + 7.989j,
],
device=device,
dtype=dtype,
),
),
):
actual = torch.ne(a, b)
expected = torch.tensor(
[
True,
True,
True,
True,
True,
False,
True,
True,
True,
True,
True,
False,
],
device=device,
dtype=torch.bool,
)
self.assertEqual(
actual, expected, msg=f"\nne\nactual {actual}\nexpected {expected}"
)
actual = torch.ne(a, a)
expected = torch.tensor(
[
False,
False,
True,
False,
False,
False,
False,
False,
True,
False,
False,
False,
],
device=device,
dtype=torch.bool,
)
self.assertEqual(
actual, expected, msg=f"\nne\nactual {actual}\nexpected {expected}"
)
actual = torch.full_like(b, complex(2, 2))
torch.ne(a, b, out=actual)
expected = torch.tensor(
[
complex(1),
complex(1),
complex(1),
complex(1),
complex(1),
complex(0),
complex(1),
complex(1),
complex(1),
complex(1),
complex(1),
complex(0),
],
device=device,
dtype=dtype,
)
self.assertEqual(
actual, expected, msg=f"\nne(out)\nactual {actual}\nexpected {expected}"
)
actual = torch.full_like(b, complex(2, 2))
torch.ne(a, a, out=actual)
expected = torch.tensor(
[
complex(0),
complex(0),
complex(1),
complex(0),
complex(0),
complex(0),
complex(0),
complex(0),
complex(1),
complex(0),
complex(0),
complex(0),
],
device=device,
dtype=dtype,
)
self.assertEqual(
actual, expected, msg=f"\nne(out)\nactual {actual}\nexpected {expected}"
)
instantiate_device_type_tests(TestComplexTensor, globals())
if __name__ == "__main__":
TestCase._default_dtype_check_enabled = True
run_tests()
|
TestComplexTensor
|
python
|
django__django
|
tests/admin_views/admin.py
|
{
"start": 6765,
"end": 6873
}
|
class ____(admin.ModelAdmin):
def has_module_permission(self, request):
return False
|
ArticleAdmin2
|
python
|
great-expectations__great_expectations
|
contrib/great_expectations_geospatial_expectations/great_expectations_geospatial_expectations/expectations/expect_column_values_to_be_lat_lon_coordinates_in_range_of_given_point.py
|
{
"start": 1410,
"end": 6156
}
|
class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.coordinates.in_range"
condition_value_keys = ("center_point", "range", "unit", "projection")
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
center_point = kwargs.get("center_point")
unit = kwargs.get("unit")
range = kwargs.get("range")
projection = kwargs.get("projection")
if projection == "fcc":
if unit == "kilometers":
distances = column.apply(lambda x, y=center_point: fcc_projection(x, y))
elif unit == "miles":
distances = column.apply(lambda x, y=center_point: fcc_projection(x, y) * 1.609344)
range = range * 1.609344
elif projection == "pythagorean":
if unit == "kilometers":
distances = column.apply(lambda x, y=center_point: pythagorean_projection(x, y))
elif unit == "miles":
distances = column.apply(
lambda x, y=center_point: pythagorean_projection(x, y) * 1.609344
)
range = range * 1.609344
return distances.le(range)
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
@column_condition_partial(engine=SparkDFExecutionEngine)
def _spark(cls, column, **kwargs):
center_point = kwargs.get("center_point")
unit = kwargs.get("unit")
range = kwargs.get("range")
projection = kwargs.get("projection")
if projection == "fcc":
if unit == "kilometers":
distances = F.udf(
lambda x, y=center_point: fcc_projection(x, y),
pyspark.types.FloatType(),
)
elif unit == "miles":
distances = F.udf(
lambda x, y=center_point: fcc_projection(x, y) * 1.609344,
pyspark.types.FloatType(),
)
range = range * 1.609344
return F.when(distances(column) < range, F.lit(True)).otherwise(F.lit(False))
elif projection == "pythagorean":
if unit == "kilometers":
distances = F.udf(
lambda x, y=center_point: pythagorean_projection(x, y),
pyspark.types.FloatType(),
)
elif unit == "miles":
distances = F.udf(
lambda x, y=center_point: pythagorean_projection(x, y) * 1.609344,
pyspark.types.FloatType(),
)
range = range * 1.609344
return F.when(distances(column) < range, F.lit(True)).otherwise(F.lit(False))
def fcc_projection(loc1, loc2):
"""
Application of the Pythagorean theorem to calculate the distance in kilometers between two lat/lon points on an ellipsoidal earth
projected to a plane, as prescribed by the FCC for distances not exceeding 475km/295mi.
See https://www.govinfo.gov/content/pkg/CFR-2016-title47-vol4/pdf/CFR-2016-title47-vol4-sec73-208.pdf
:param loc1: A tuple of lat/lon
:param loc2: A tuple of lat/lon
:return distance: Distance between points in km
"""
lat1, lat2 = float(loc1[0]), float(loc2[0])
lon1, lon2 = float(loc1[1]), float(loc2[1])
mean_lat = (lat1 + lat2) / 2
delta_lat = lat2 - lat1
delta_lon = lon2 - lon1
k1 = 111.13209 - (0.56605 * cos(2 * mean_lat)) + (0.0012 * cos(4 * mean_lat))
k2 = (111.41513 * cos(mean_lat)) - (0.09455 * cos(3 * mean_lat)) + (0.00012 * cos(5 * mean_lat))
distance = sqrt((k1 * delta_lat) ** 2 + (k2 * delta_lon) ** 2)
return distance
def pythagorean_projection(loc1, loc2):
"""
Application of the pythagorean theorem to calculate the distance in kilometers between two lat/lon points on
a spherical earth projected to a plane.
:param loc1: A tuple of lat/lon
:param loc2: A tuple of lat/lon
:return distance: Distance between points in km
"""
lat1, lat2 = float(loc1[0]), float(loc2[0])
lon1, lon2 = float(loc1[1]), float(loc2[1])
mean_lat = (lat1 + lat2) / 2
delta_lat = (lat2 - lat1) * (pi / 180) # converting from degree-decimal to radians
delta_lon = (lon2 - lon1) * (pi / 180) # converting from degree-decimal to radians
radius = 6371.009
distance = radius * sqrt((delta_lat**2) + (cos(mean_lat) * delta_lon) ** 2)
return distance
# This class defines the Expectation itself
|
ColumnValuesAreLatLonCoordinatesInRange
|
python
|
tensorflow__tensorflow
|
tensorflow/lite/python/interpreter_test.py
|
{
"start": 19859,
"end": 20988
}
|
class ____(test_util.TensorFlowTestCase):
def setUp(self):
super().setUp()
self.interpreter = interpreter_wrapper.Interpreter(
model_path=resource_loader.get_path_to_datafile(
'testdata/permute_float.tflite'
)
)
self.interpreter.allocate_tensors()
self.input0 = self.interpreter.get_input_details()[0]['index']
self.initial_data = np.array([[-1.0, -2.0, -3.0, -4.0]], np.float32)
def testValidNode(self):
"""Check that tensor returns a reference."""
ops_details = self.interpreter._get_ops_details()
self.assertEqual(ops_details[0]['index'], 0)
self.assertEqual(ops_details[0]['op_name'], 'FULLY_CONNECTED')
self.assertAllEqual(ops_details[0]['inputs'], [0, 1, -1])
self.assertAllEqual(ops_details[0]['outputs'], [2])
self.assertAllEqual(
ops_details[0]['operand_types'], [np.float32, np.float32]
)
self.assertAllEqual(ops_details[0]['result_types'], [np.float32])
def testInvalidNode(self):
with self.assertRaisesRegex(ValueError, 'Invalid node index'):
self.interpreter._get_op_details(4)
|
InterpreterNodeAccessTest
|
python
|
scipy__scipy
|
scipy/stats/tests/test_morestats.py
|
{
"start": 89733,
"end": 91910
}
|
class ____:
def setup_method(self):
self.x = _old_loggamma_rvs(5, size=500, random_state=7654321) + 5
def test_basic(self):
N = 5
svals, ppcc = stats.ppcc_plot(self.x, -10, 10, N=N)
ppcc_expected = [0.21139644, 0.21384059, 0.98766719, 0.97980182,
0.93519298]
assert_allclose(svals, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
def test_dist(self):
# Test that we can specify distributions both by name and as objects.
svals1, ppcc1 = stats.ppcc_plot(self.x, -10, 10, dist='tukeylambda')
svals2, ppcc2 = stats.ppcc_plot(self.x, -10, 10,
dist=stats.tukeylambda)
assert_allclose(svals1, svals2, rtol=1e-20)
assert_allclose(ppcc1, ppcc2, rtol=1e-20)
# Test that 'tukeylambda' is the default dist
svals3, ppcc3 = stats.ppcc_plot(self.x, -10, 10)
assert_allclose(svals1, svals3, rtol=1e-20)
assert_allclose(ppcc1, ppcc3, rtol=1e-20)
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
ax = fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=plt)
fig.delaxes(ax)
# Check that a Matplotlib Axes object is accepted
ax = fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `b` has to be larger than `a`
assert_raises(ValueError, stats.ppcc_plot, self.x, 1, 0)
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.ppcc_plot, [1, 2, 3], 0, 1,
dist="plate_of_shrimp")
def test_empty(self):
# For consistency with probplot return for one empty array,
# ppcc contains all zeros and svals is the same as for normal array
# input.
svals, ppcc = stats.ppcc_plot([], 0, 1)
assert_allclose(svals, np.linspace(0, 1, num=80))
assert_allclose(ppcc, np.zeros(80, dtype=float))
|
TestPpccPlot
|
python
|
pydantic__pydantic
|
pydantic-core/python/pydantic_core/core_schema.py
|
{
"start": 142428,
"end": 143465
}
|
class ____(TypedDict, total=False):
type: Required[Literal['definitions']]
schema: Required[CoreSchema]
definitions: Required[list[CoreSchema]]
metadata: dict[str, Any]
serialization: SerSchema
def definitions_schema(schema: CoreSchema, definitions: list[CoreSchema]) -> DefinitionsSchema:
"""
Build a schema that contains both an inner schema and a list of definitions which can be used
within the inner schema.
```py
from pydantic_core import SchemaValidator, core_schema
schema = core_schema.definitions_schema(
core_schema.list_schema(core_schema.definition_reference_schema('foobar')),
[core_schema.int_schema(ref='foobar')],
)
v = SchemaValidator(schema)
assert v.validate_python([1, 2, '3']) == [1, 2, 3]
```
Args:
schema: The inner schema
definitions: List of definitions which can be referenced within inner schema
"""
return DefinitionsSchema(type='definitions', schema=schema, definitions=definitions)
|
DefinitionsSchema
|
python
|
falconry__falcon
|
examples/things.py
|
{
"start": 282,
"end": 1210
}
|
class ____:
def on_get(self, req, resp):
"""Handles GET requests"""
resp.status = falcon.HTTP_200 # This is the default status
resp.content_type = falcon.MEDIA_TEXT # Default is JSON, so override
resp.text = (
'\nTwo things awe me most, the starry sky '
'above me and the moral law within me.\n'
'\n'
' ~ Immanuel Kant\n\n'
)
# falcon.App instances are callable WSGI apps
# in larger applications the app is created in a separate file
app = falcon.App()
# Resources are represented by long-lived class instances
things = ThingsResource()
# things will handle all requests to the '/things' URL path
app.add_route('/things', things)
if __name__ == '__main__':
with make_server('', 8000, app) as httpd:
print('Serving on port 8000...')
# Serve until process is killed
httpd.serve_forever()
|
ThingsResource
|
python
|
huggingface__transformers
|
src/transformers/models/parakeet/tokenization_parakeet_fast.py
|
{
"start": 751,
"end": 1952
}
|
class ____(PreTrainedTokenizerFast):
"""
Inherits all methods from [`PreTrainedTokenizerFast`]. Users should refer to this superclass for more information regarding those methods,
except for `_decode` which is overridden to adapt it to CTC decoding:
1. Group consecutive tokens
2. Filter out the blank token
"""
def _decode(
self,
token_ids: Union[int, list[int]],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: Optional[bool] = None,
group_tokens: bool = True,
**kwargs,
) -> str:
if isinstance(token_ids, int):
token_ids = [token_ids]
if group_tokens:
token_ids = [token_group[0] for token_group in itertools.groupby(token_ids)]
# for CTC we filter out the blank token, which is the pad token
token_ids = [token for token in token_ids if token != self.pad_token_id]
return super()._decode(
token_ids=token_ids,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
**kwargs,
)
__all__ = ["ParakeetTokenizerFast"]
|
ParakeetTokenizerFast
|
python
|
Textualize__textual
|
src/textual/drivers/_byte_stream.py
|
{
"start": 830,
"end": 3286
}
|
class ____(Generic[TokenType]):
"""A parser to feed in binary data and generate a sequence of tokens."""
read = _Read
read1 = _Read1
def __init__(self) -> None:
"""Initialize the parser."""
self._buffer = io.BytesIO()
self._eof = False
self._tokens: Deque[TokenType] = deque()
self._gen = self.parse(self._tokens.append)
self._awaiting: Awaitable | TokenType = next(self._gen)
@property
def is_eof(self) -> bool:
"""Is the parser at the end of file?"""
return self._eof
def feed(self, data: bytes) -> Iterable[TokenType]:
"""Feed the parser some data, return an iterable of tokens."""
if self._eof:
raise ParseError("end of file reached") from None
if not data:
self._eof = True
try:
self._gen.send(self._buffer.getvalue())
except StopIteration:
raise ParseError("end of file reached") from None
while self._tokens:
yield self._tokens.popleft()
self._buffer.truncate(0)
return
_buffer = self._buffer
pos = 0
tokens = self._tokens
popleft = tokens.popleft
data_size = len(data)
while tokens:
yield popleft()
while pos < data_size:
_awaiting = self._awaiting
if isinstance(_awaiting, _Read1):
self._awaiting = self._gen.send(data[pos : pos + 1])
pos += 1
elif isinstance(_awaiting, _Read):
remaining = _awaiting.remaining
chunk = data[pos : pos + remaining]
chunk_size = len(chunk)
pos += chunk_size
_buffer.write(chunk)
remaining -= chunk_size
if remaining:
_awaiting.remaining = remaining
else:
self._awaiting = self._gen.send(_buffer.getvalue())
_buffer.seek(0)
_buffer.truncate()
while tokens:
yield popleft()
def parse(
self, on_token: ByteStreamTokenCallback
) -> Generator[Awaitable, bytes, None]:
"""Implement in a sub-class to define parse behavior.
Args:
on_token: A callable which accepts the token type, and returns None.
"""
yield from ()
|
ByteStreamParser
|
python
|
apache__avro
|
lang/py/avro/test/test_datafile_interop.py
|
{
"start": 1118,
"end": 2169
}
|
class ____(unittest.TestCase):
def test_interop(self) -> None:
"""Test Interop"""
datum: Optional[object] = None
for filename in _INTEROP_DATA_DIR.iterdir():
self.assertGreater(os.stat(filename).st_size, 0)
base_ext = filename.stem.split("_", 1)
if len(base_ext) > 1 and base_ext[1] not in avro.codecs.KNOWN_CODECS:
print(f"SKIPPING {filename} due to an unsupported codec\n")
continue
i = None
with self.subTest(filename=filename), avro.datafile.DataFileReader(filename.open("rb"), avro.io.DatumReader()) as dfr:
user_metadata = dfr.get_meta("user_metadata")
if user_metadata is not None:
self.assertEqual(user_metadata, b"someByteArray")
for i, datum in enumerate(cast(avro.datafile.DataFileReader, dfr), 1):
self.assertIsNotNone(datum)
self.assertIsNotNone(i)
if __name__ == "__main__":
unittest.main()
|
TestDataFileInterop
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 25020,
"end": 25209
}
|
class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("CREATED_AT", "NAME", "POSITION")
|
ProjectV2FieldOrderField
|
python
|
astropy__astropy
|
astropy/units/tests/test_format.py
|
{
"start": 797,
"end": 10902
}
|
class ____(NamedTuple):
string: str
unit: UnitBase
def list_format_string_pairs(*test_cases: tuple[str, str]) -> list[FormatStringPair]:
return [FormatStringPair(format, string) for format, string in test_cases]
def list_string_unit_pairs(
*test_cases: tuple[Iterable[str], UnitBase],
) -> list[StringUnitPair]:
return [
StringUnitPair(string, unit)
for strings, unit in test_cases
for string in strings
]
@pytest.mark.parametrize(
"test_pair",
list_string_unit_pairs(
(["m s", "m*s", "m.s"], u.m * u.s),
(["m/s", "m*s**-1", "m /s", "m / s", "m/ s"], u.m / u.s),
(["m**2", "m2", "m**(2)", "m**+2", "m+2", "m^(+2)"], u.m**2),
(["m**-3", "m-3", "m^(-3)", "/m3"], u.m**-3),
(["m**(1.5)", "m(3/2)", "m**(3/2)", "m^(3/2)"], u.m**1.5),
(["2.54 cm"], u.Unit(u.cm * 2.54)),
(["10+8m"], u.Unit(u.m * 1e8)),
# This is the VOUnits documentation, but doesn't seem to follow the
# unity grammar (["3.45 10**(-4)Jy"], 3.45 * 1e-4 * u.Jy)
(["sqrt(m)"], u.m**0.5),
(["dB(mW)", "dB (mW)"], u.DecibelUnit(u.mW)),
(["mag"], u.mag),
(["mag(ct/s)"], u.MagUnit(u.ct / u.s)),
(["dex"], u.dex),
(["dex(cm s**-2)", "dex(cm/s2)"], u.DexUnit(u.cm / u.s**2)),
),
ids=lambda x: x.string,
)
def test_unit_grammar(test_pair: StringUnitPair):
assert u_format.Generic.parse(test_pair.string) == test_pair.unit
@pytest.mark.parametrize(
"string", ["sin( /pixel /s)", "mag(mag)", "dB(dB(mW))", "dex()"]
)
def test_unit_grammar_fail(string):
with pytest.raises(ValueError):
u_format.Generic.parse(string)
@pytest.mark.parametrize(
"test_pair",
list_string_unit_pairs(
(["0.1nm"], u.AA),
(["mW/m2"], u.Unit(u.erg / u.cm**2 / u.s)),
(["mW/(m2)"], u.Unit(u.erg / u.cm**2 / u.s)),
(["km/s", "km.s-1"], u.km / u.s),
(["km/s/Mpc"], u.km / u.s / u.Mpc),
(["km/(s.Mpc)"], u.km / u.s / u.Mpc),
(["10+3J/m/s/kpc2"], u.Unit(1e3 * u.W / (u.m * u.kpc**2))),
(["10pix/nm"], u.Unit(10 * u.pix / u.nm)),
(["1.5x10+11m"], u.Unit(1.5e11 * u.m)),
(["1.5×10+11/m"], u.Unit(1.5e11 / u.m)),
(["/s"], u.s**-1),
(["m2"], u.m**2),
(["10+21m"], u.Unit(u.m * 1e21)),
(["2.54cm"], u.Unit(u.cm * 2.54)),
(["20%"], 0.20 * u.dimensionless_unscaled),
(["10+9"], 1.0e9 * u.dimensionless_unscaled),
(["2x10-9"], 2.0e-9 * u.dimensionless_unscaled),
(["---"], u.dimensionless_unscaled),
(["ma"], u.ma),
(["mAU"], u.mAU),
(["uarcmin"], u.uarcmin),
(["uarcsec"], u.uarcsec),
(["kbarn"], u.kbarn),
(["Gbit"], u.Gbit),
(["Gibit"], 2**30 * u.bit),
(["kbyte"], u.kbyte),
(["mRy"], 0.001 * u.Ry),
(["mmag"], u.mmag),
(["Mpc"], u.Mpc),
(["Gyr"], u.Gyr),
(["°"], u.degree),
(["°/s"], u.degree / u.s),
(["Å"], u.AA),
(["Å/s"], u.AA / u.s),
(["\\h"], si.h),
(["[cm/s2]"], dex(u.cm / u.s**2)),
(["[K]"], dex(u.K)),
(["[-]"], dex(u.dimensionless_unscaled)),
(["eps0/mu0"], cds.eps0 / cds.mu0),
(["a0.s"], cds.a0 * u.s),
),
ids=lambda x: x.string,
)
def test_cds_grammar(test_pair: StringUnitPair):
assert u_format.CDS.parse(test_pair.string) == test_pair.unit
@pytest.mark.parametrize(
"string",
[
"0.1 nm",
"solMass(3/2)",
"km / s",
"km s-1",
"km/s.Mpc-1",
"/s.Mpc",
"pix0.1nm",
"pix/(0.1nm)",
"km*s",
"km**2",
"5x8+3m",
"0.1---",
"---m",
"m---",
"--",
"0.1-",
"-m",
"m-",
"mag(s-1)",
"dB(mW)",
"dex(cm s-2)",
"[--]",
],
)
def test_cds_grammar_fail(string):
with pytest.raises(ValueError):
u_format.CDS.parse(string)
def test_cds_dimensionless():
assert u.Unit("---", format="cds") == u.dimensionless_unscaled
assert u.dimensionless_unscaled.to_string(format="cds") == "---"
def test_cds_log10_dimensionless():
assert u.Unit("[-]", format="cds") == u.dex(u.dimensionless_unscaled)
assert u.dex(u.dimensionless_unscaled).to_string(format="cds") == "[-]"
def test_cds_angstrom_str():
# Regression test for a problem noticed in
# https://github.com/astropy/astropy/pull/17527#discussion_r1880555481
# that the string representation of the cds version of Angstrom was "AA".
assert str(u.cds.Angstrom) == str(u.Angstrom) == "Angstrom"
# Since this is a NamedUnit, let's check the name for completeness.
assert u.cds.Angstrom.name == "Angstrom"
def test_cds_solMass_str():
# CDS allows writing solar mass as Msun or solMass,
# but cds.solMass and u.solMass should be consistent.
assert u.solMass.to_string("cds") == "solMass"
assert u.cds.solMass.to_string("cds") == "solMass"
# These examples are taken from the EXAMPLES section of
# https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/
@pytest.mark.parametrize(
"test_pair",
list_string_unit_pairs(
(
["count /s", "count/s", "count s**(-1)", "count / s", "count /s "],
u.count / u.s,
),
(
["/pixel /s", "/(pixel * s)"],
(u.pixel * u.s) ** -1,
),
(
[
"count /m**2 /s /eV",
"count m**(-2) * s**(-1) * eV**(-1)",
"count /(m**2 * s * eV)",
],
u.count * u.m**-2 * u.s**-1 * u.eV**-1,
),
(
["erg /pixel /s /GHz", "erg /s /GHz /pixel", "erg /pixel /(s * GHz)"],
u.erg / (u.s * u.GHz * u.pixel),
),
(
["keV**2 /yr /angstrom", "10**(10) keV**2 /yr /m"],
# Though this is given as an example, it seems to violate the rules
# of not raising scales to powers, so I'm just excluding it
# "(10**2 MeV)**2 /yr /m"
u.keV**2 / (u.yr * u.angstrom),
),
(
[
"10**(46) erg /s",
"10**46 erg /s",
"10**(39) J /s",
"10**(39) W",
"10**(15) YW",
"YJ /fs",
],
10**46 * u.erg / u.s,
),
(
[
"10**(-7) J /cm**2 /MeV",
"10**(-9) J m**(-2) eV**(-1)",
"nJ m**(-2) eV**(-1)",
"nJ /m**2 /eV",
],
10**-7 * u.J * u.cm**-2 * u.MeV**-1,
),
(
[
"sqrt(erg /pixel /s /GHz)",
"(erg /pixel /s /GHz)**(0.5)",
"(erg /pixel /s /GHz)**(1/2)",
"erg**(0.5) pixel**(-0.5) s**(-0.5) GHz**(-0.5)",
],
(u.erg * u.pixel**-1 * u.s**-1 * u.GHz**-1) ** 0.5,
),
(
[
"(count /s) (/pixel /s)",
"(count /s) * (/pixel /s)",
"count /pixel /s**2",
],
(u.count / u.s) * (1.0 / (u.pixel * u.s)),
),
),
ids=lambda x: x.string,
)
def test_ogip_grammar(test_pair: StringUnitPair):
assert u_format.OGIP.parse(test_pair.string) == test_pair.unit
@pytest.mark.parametrize(
"string",
[
"log(photon /m**2 /s /Hz)",
"sin( /pixel /s)",
"log(photon /cm**2 /s /Hz) /(sin( /pixel /s))",
"log(photon /cm**2 /s /Hz) (sin( /pixel /s))**(-1)",
"dB(mW)",
"dex(cm/s**2)",
],
)
def test_ogip_grammar_fail(string):
with pytest.raises(ValueError):
u_format.OGIP.parse(string)
@pytest.mark.xfail(reason="'acos' is not understood by astropy", raises=ValueError)
def test_ogip_unusable_function():
u_format.OGIP.parse("acos(m)**2")
@pytest.mark.parametrize("string", ["sqrt(m)**3", "sqrt(m**3)", "(sqrt(m))**3"])
def test_ogip_sqrt(string):
# Regression test for #16743 - sqrt(m)**3 caused a ValueError
assert u_format.OGIP.parse(string) == u.m ** Fraction(3, 2)
@pytest.mark.parametrize(
"string,message,unit",
[
pytest.param(
"m(s)**2",
(
r"^if 'm\(s\)\*\*2' was meant to be a multiplication, "
r"it should have been written as 'm \(s\)\*\*2'.$"
),
u.m * u.s**2,
id="m(s)**2",
),
pytest.param(
"m(s)",
(
r"^if 'm\(s\)' was meant to be a multiplication, "
r"it should have been written as 'm \(s\)'.$"
),
u.m * u.s,
id="m(s)",
),
],
)
def test_ogip_invalid_multiplication(string, message, unit):
# Regression test for #16749
with pytest.warns(UnitParserWarning, match=message):
assert u_format.OGIP.parse(string) == unit
@pytest.mark.parametrize(
"string,unit,power",
[
pytest.param("s**-1", u.s**-1, "-1", id="int_unit_power"),
pytest.param("m**-2.0", u.m**-2, "-2.0", id="float_unit_power"),
pytest.param("10**-3 kg", u.g, "-3", id="int_scale_power"),
],
)
def test_ogip_negative_exponent_parenthesis(string, unit, power):
# Regression test for #16788 - negative powers require parenthesis
with pytest.warns(
UnitParserWarning,
match=(
r"^negative exponents must be enclosed in parenthesis\. "
rf"Expected '\*\*\({power}\)' instead of '\*\*{power}'\.$"
),
):
assert u_format.OGIP.parse(string) == unit
def test_ogip_ohm():
# Regression test for #17200 - OGIP converted u.ohm to 'V / A'
assert u_format.OGIP.to_string(u.ohm) == "ohm"
@pytest.mark.parametrize(
"string",
[pytest.param("m**(-0.5)", id="float"), pytest.param("m**(-1/2)", id="fraction")],
)
def test_ogip_negative_powers(string):
# Regression test for #18776 - negative fractions were not recognized
assert u_format.OGIP.parse(string) == u.m**-0.5
|
StringUnitPair
|
python
|
crytic__slither
|
slither/tools/properties/properties/properties.py
|
{
"start": 265,
"end": 483
}
|
class ____(Enum):
OWNER = 1
SENDER = 2
ATTACKER = 3
ALL = 4 # If all the actors should call the function. Typically if the test uses msg.sender
ANY = 5 # If the caller does not matter
|
PropertyCaller
|
python
|
Textualize__textual
|
src/textual/__init__.py
|
{
"start": 1125,
"end": 5694
}
|
class ____:
"""A [logger class](/guide/devtools/#logging-handler) that logs to the Textual [console](/guide/devtools#console)."""
def __init__(
self,
log_callable: LogCallable | None,
group: LogGroup = LogGroup.INFO,
verbosity: LogVerbosity = LogVerbosity.NORMAL,
app: _App | None = None,
) -> None:
self._log = log_callable
self._group = group
self._verbosity = verbosity
self._app = None if app is None else weakref.ref(app)
@property
def app(self) -> _App | None:
"""The associated application, or `None` if there isn't one."""
return None if self._app is None else self._app()
def __rich_repr__(self) -> rich.repr.Result:
yield self._group, LogGroup.INFO
yield self._verbosity, LogVerbosity.NORMAL
def __call__(self, *args: object, **kwargs) -> None:
if constants.LOG_FILE:
output = " ".join(str(arg) for arg in args)
if kwargs:
key_values = " ".join(
f"{key}={value!r}" for key, value in kwargs.items()
)
output = f"{output} {key_values}" if output else key_values
with open(constants.LOG_FILE, "a", encoding="utf-8") as log_file:
print(output, file=log_file)
app = self.app
if app is None:
try:
app = active_app.get()
except LookupError:
if constants.DEBUG:
print_args = (
*args,
*[f"{key}={value!r}" for key, value in kwargs.items()],
)
print(*print_args)
return
if not app._is_devtools_connected:
return
current_frame = inspect.currentframe()
assert current_frame is not None
previous_frame = current_frame.f_back
assert previous_frame is not None
caller = inspect.getframeinfo(previous_frame)
_log = self._log or app._log
try:
_log(
self._group,
self._verbosity,
caller,
*args,
**kwargs,
)
except LoggerError:
# If there is not active app, try printing
if constants.DEBUG:
print_args = (
*args,
*[f"{key}={value!r}" for key, value in kwargs.items()],
)
print(*print_args)
def verbosity(self, verbose: bool) -> Logger:
"""Get a new logger with selective verbosity.
Args:
verbose: True to use HIGH verbosity, otherwise NORMAL.
Returns:
New logger.
"""
verbosity = LogVerbosity.HIGH if verbose else LogVerbosity.NORMAL
return Logger(self._log, self._group, verbosity, app=self.app)
@property
def verbose(self) -> Logger:
"""A verbose logger."""
return Logger(self._log, self._group, LogVerbosity.HIGH, app=self.app)
@property
def event(self) -> Logger:
"""Logs events."""
return Logger(self._log, LogGroup.EVENT, app=self.app)
@property
def debug(self) -> Logger:
"""Logs debug messages."""
return Logger(self._log, LogGroup.DEBUG, app=self.app)
@property
def info(self) -> Logger:
"""Logs information."""
return Logger(self._log, LogGroup.INFO, app=self.app)
@property
def warning(self) -> Logger:
"""Logs warnings."""
return Logger(self._log, LogGroup.WARNING, app=self.app)
@property
def error(self) -> Logger:
"""Logs errors."""
return Logger(self._log, LogGroup.ERROR, app=self.app)
@property
def system(self) -> Logger:
"""Logs system information."""
return Logger(self._log, LogGroup.SYSTEM, app=self.app)
@property
def logging(self) -> Logger:
"""Logs from stdlib logging module."""
return Logger(self._log, LogGroup.LOGGING, app=self.app)
@property
def worker(self) -> Logger:
"""Logs worker information."""
return Logger(self._log, LogGroup.WORKER, app=self.app)
log = Logger(None)
"""Global logger that logs to the currently active app.
Example:
```python
from textual import log
log(locals())
```
!!! note
This logger will only work if there is an active app in the current thread.
Use `app.log` to write logs from a thread without an active app.
"""
|
Logger
|
python
|
doocs__leetcode
|
lcof/面试题26. 树的子结构/Solution.py
|
{
"start": 164,
"end": 647
}
|
class ____:
def isSubStructure(self, A: TreeNode, B: TreeNode) -> bool:
def dfs(A, B):
if B is None:
return True
if A is None or A.val != B.val:
return False
return dfs(A.left, B.left) and dfs(A.right, B.right)
if A is None or B is None:
return False
if dfs(A, B):
return True
return self.isSubStructure(A.left, B) or self.isSubStructure(A.right, B)
|
Solution
|
python
|
tensorflow__tensorflow
|
tensorflow/python/checkpoint/testdata/generate_checkpoint.py
|
{
"start": 1054,
"end": 1551
}
|
class ____(module.Module):
"""Three vars (one in a sub-module) and compute method."""
def __init__(self):
default_value = -1
empty_key = 0
deleted_key = -1
self.lookup_table = lookup_ops.DenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name="t1",
initial_num_buckets=32)
self.lookup_table.insert(1, 1)
self.lookup_table.insert(2, 4)
|
TableModule
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/dialects/mssql/pyodbc.py
|
{
"start": 19693,
"end": 19937
}
|
class ____(_MSUnicodeText):
def get_dbapi_type(self, dbapi):
if self.length in (None, "max") or self.length >= 2000:
return (dbapi.SQL_WVARCHAR, 0, 0)
else:
return dbapi.SQL_WVARCHAR
|
_UnicodeText_pyodbc
|
python
|
spack__spack
|
lib/spack/spack/vendor/jinja2/nodes.py
|
{
"start": 14985,
"end": 15744
}
|
class ____(Expr):
"""Baseclass for all binary expressions."""
fields = ("left", "right")
left: Expr
right: Expr
operator: str
abstract = True
def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if (
eval_ctx.environment.sandboxed
and self.operator in eval_ctx.environment.intercepted_binops # type: ignore
):
raise Impossible()
f = _binop_to_func[self.operator]
try:
return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
except Exception as e:
raise Impossible() from e
|
BinExpr
|
python
|
scipy__scipy
|
scipy/stats/_discrete_distns.py
|
{
"start": 11193,
"end": 14525
}
|
class ____(rv_discrete):
r"""A beta-negative-binomial discrete random variable.
%(before_notes)s
Notes
-----
The beta-negative-binomial distribution is a negative binomial
distribution with a probability of success `p` that follows a
beta distribution.
The probability mass function for `betanbinom` is:
.. math::
f(k) = \binom{n + k - 1}{k} \frac{B(a + n, b + k)}{B(a, b)}
for :math:`k \ge 0`, :math:`n \geq 0`, :math:`a > 0`,
:math:`b > 0`, where :math:`B(a, b)` is the beta function.
`betanbinom` takes :math:`n`, :math:`a`, and :math:`b` as shape parameters.
%(after_notes)s
References
----------
.. [1] https://en.wikipedia.org/wiki/Beta_negative_binomial_distribution
.. versionadded:: 1.12.0
See Also
--------
betabinom : Beta binomial distribution
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("n", True, (0, np.inf), (True, False)),
_ShapeInfo("a", False, (0, np.inf), (False, False)),
_ShapeInfo("b", False, (0, np.inf), (False, False))]
def _rvs(self, n, a, b, size=None, random_state=None):
p = random_state.beta(a, b, size)
return random_state.negative_binomial(n, p, size)
def _argcheck(self, n, a, b):
return (n >= 0) & _isintegral(n) & (a > 0) & (b > 0)
def _logpmf(self, x, n, a, b):
k = floor(x)
combiln = -np.log(n + k) - betaln(n, k + 1)
return combiln + betaln(a + n, b + k) - betaln(a, b)
def _pmf(self, x, n, a, b):
return exp(self._logpmf(x, n, a, b))
def _stats(self, n, a, b, moments='mv'):
# reference: Wolfram Alpha input
# BetaNegativeBinomialDistribution[a, b, n]
def mean(n, a, b):
return n * b / (a - 1.)
mu = xpx.apply_where(a > 1, (n, a, b), mean, fill_value=np.inf)
def var(n, a, b):
return (n * b * (n + a - 1.) * (a + b - 1.)
/ ((a - 2.) * (a - 1.)**2.))
var = xpx.apply_where(a > 2, (n, a, b), var, fill_value=np.inf)
g1, g2 = None, None
def skew(n, a, b):
return ((2 * n + a - 1.) * (2 * b + a - 1.)
/ (a - 3.) / sqrt(n * b * (n + a - 1.) * (b + a - 1.)
/ (a - 2.)))
if 's' in moments:
g1 = xpx.apply_where(a > 3, (n, a, b), skew, fill_value=np.inf)
def kurtosis(n, a, b):
term = (a - 2.)
term_2 = ((a - 1.)**2. * (a**2. + a * (6 * b - 1.)
+ 6. * (b - 1.) * b)
+ 3. * n**2. * ((a + 5.) * b**2. + (a + 5.)
* (a - 1.) * b + 2. * (a - 1.)**2)
+ 3 * (a - 1.) * n
* ((a + 5.) * b**2. + (a + 5.) * (a - 1.) * b
+ 2. * (a - 1.)**2.))
denominator = ((a - 4.) * (a - 3.) * b * n
* (a + b - 1.) * (a + n - 1.))
# Wolfram Alpha uses Pearson kurtosis, so we subtract 3 to get
# scipy's Fisher kurtosis
return term * term_2 / denominator - 3.
if 'k' in moments:
g2 = xpx.apply_where(a > 4, (n, a, b), kurtosis, fill_value=np.inf)
return mu, var, g1, g2
betanbinom = betanbinom_gen(name='betanbinom')
|
betanbinom_gen
|
python
|
pypa__warehouse
|
tests/unit/packaging/test_views.py
|
{
"start": 4410,
"end": 12256
}
|
class ____:
def test_normalizing_name_redirects(self, db_request):
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="3.0")
db_request.matchdict = {"name": project.name.swapcase()}
db_request.current_route_path = pretend.call_recorder(
lambda name: "/project/the-redirect/3.0/"
)
resp = views.release_detail(release, db_request)
assert isinstance(resp, HTTPMovedPermanently)
assert resp.headers["Location"] == "/project/the-redirect/3.0/"
assert db_request.current_route_path.calls == [
pretend.call(name=release.project.name)
]
def test_normalizing_version_redirects(self, db_request):
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="3.0")
db_request.matchdict = {"name": project.name, "version": "3.0.0.0.0"}
db_request.current_route_path = pretend.call_recorder(
lambda **kw: "/project/the-redirect/3.0/"
)
resp = views.release_detail(release, db_request)
assert isinstance(resp, HTTPMovedPermanently)
assert resp.headers["Location"] == "/project/the-redirect/3.0/"
assert db_request.current_route_path.calls == [
pretend.call(name=release.project.name, version=release.version)
]
def test_detail_rendered(self, db_request):
users = [UserFactory.create(), UserFactory.create(), UserFactory.create()]
project = ProjectFactory.create()
releases = [
ReleaseFactory.create(
project=project,
version=v,
description=DescriptionFactory.create(
raw="unrendered description",
html="rendered description",
content_type="text/html",
),
)
for v in ["1.0", "2.0", "3.0", "4.0.dev0"]
] + [
ReleaseFactory.create(
project=project,
version="5.0",
description=DescriptionFactory.create(
raw="plaintext description",
html="",
content_type="text/plain",
),
yanked=True,
yanked_reason="plaintext yanked reason",
)
]
files = [
FileFactory.create(
release=r,
filename=f"{project.name}-{r.version}.tar.gz",
python_version="source",
packagetype="sdist",
)
for r in releases
]
# Create a role for each user
for user in users:
RoleFactory.create(user=user, project=project)
result = views.release_detail(releases[1], db_request)
assert result == {
"project": project,
"release": releases[1],
"files": [files[1]],
"sdists": [files[1]],
"bdists": [],
"description": "rendered description",
"latest_version": project.latest_version,
"all_versions": [
(r.version, r.created, r.is_prerelease, r.yanked, r.yanked_reason)
for r in reversed(releases)
],
"maintainers": sorted(users, key=lambda u: u.username.lower()),
"license": None,
"PEP740AttestationViewer": views.PEP740AttestationViewer,
"wheel_filters_all": {"interpreters": [], "abis": [], "platforms": []},
"wheel_filters_params": {
"filename": "",
"interpreters": "",
"abis": "",
"platforms": "",
},
}
def test_detail_renders_files_natural_sort(self, db_request):
"""Tests that when a release has multiple versions of Python,
the sort order is most recent Python version first."""
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="3.0")
files = [
FileFactory.create(
release=release,
filename="-".join(
[project.name, release.version, py_ver, py_abi, py_platform]
)
+ ".whl",
python_version="py2.py3",
packagetype="bdist_wheel",
)
for py_ver in ["cp27", "cp310", "cp39"] # intentionally out of order
for py_abi in ["none"]
for py_platform in ["any"]
]
sorted_files = natsorted(files, reverse=True, key=lambda f: f.filename)
result = views.release_detail(release, db_request)
assert result["files"] == sorted_files
assert [file.wheel_filters for file in result["files"]] == [
{"interpreters": ["cp310"], "abis": ["none"], "platforms": ["any"]},
{"interpreters": ["cp39"], "abis": ["none"], "platforms": ["any"]},
{"interpreters": ["cp27"], "abis": ["none"], "platforms": ["any"]},
]
def test_license_from_classifier(self, db_request):
"""A license label is added when a license classifier exists."""
other_classifier = ClassifierFactory.create(
classifier="Some :: Random :: Classifier"
)
classifier = ClassifierFactory.create(
classifier="License :: OSI Approved :: BSD License"
)
release = ReleaseFactory.create(
_classifiers=[other_classifier, classifier],
license="Will be added at the end",
)
result = views.release_detail(release, db_request)
assert result["license"] == "BSD License (Will be added at the end)"
def test_license_with_no_classifier(self, db_request):
"""With no classifier, a license is used from metadata."""
release = ReleaseFactory.create(license="MIT License")
result = views.release_detail(release, db_request)
assert result["license"] == "MIT License"
def test_multiline_license(self, db_request):
"""When license metadata is longer than one line, the first is used."""
release = ReleaseFactory.create(license="Multiline License\nhow terrible")
result = views.release_detail(release, db_request)
assert result["license"] == "Multiline License"
def test_no_license(self, db_request):
"""With no license classifier or metadata, no license is in context."""
release = ReleaseFactory.create()
result = views.release_detail(release, db_request)
assert result["license"] is None
def test_multiple_licenses_from_classifiers(self, db_request):
"""A license label is added when multiple license classifiers exist."""
license_1 = ClassifierFactory.create(
classifier="License :: OSI Approved :: BSD License"
)
license_2 = ClassifierFactory.create(
classifier="License :: OSI Approved :: MIT License"
)
release = ReleaseFactory.create(_classifiers=[license_1, license_2])
result = views.release_detail(release, db_request)
assert result["license"] == "BSD License, MIT License"
def test_long_singleline_license(self, db_request):
"""When license metadata contains no newlines, it gets truncated"""
release = ReleaseFactory.create(
license="Multiline License is very long, so long that it is far longer than"
" 100 characters, it's really so long, how terrible"
)
result = views.release_detail(release, db_request)
assert result["license"] == (
"Multiline License is very long, so long that it is far longer than 100 "
"characters, it's really so lo..."
)
|
TestReleaseDetail
|
python
|
google__jax
|
tests/lax_scipy_test.py
|
{
"start": 2970,
"end": 27045
}
|
class ____(jtu.JaxTestCase):
"""Tests for LAX-backed Scipy implementation."""
@jtu.sample_product(
[dict(shapes=shapes, axis=axis, use_b=use_b)
for shape_group in compatible_shapes
for use_b in [False, True]
for shapes in itertools.product(*(
(shape_group, shape_group) if use_b else (shape_group,)))
for axis in range(-max(len(shape) for shape in shapes),
max(len(shape) for shape in shapes))
],
dtype=float_dtypes + complex_dtypes + int_dtypes,
keepdims=[False, True],
return_sign=[False, True],
)
@jtu.ignore_warning(category=RuntimeWarning, message="invalid value encountered in .*")
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testLogSumExp(self, shapes, dtype, axis,
keepdims, return_sign, use_b):
if jnp.issubdtype(dtype, jnp.complexfloating) and scipy_version < (1, 13, 0):
self.skipTest("logsumexp of complex input uses scipy 1.13.0 semantics.")
if use_b and scipy_version >= (1, 15) and scipy_version < (1, 15, 3):
self.skipTest(
"TODO(https://github.com/scipy/scipy/issues/22903): logsumexp with a"
" b scale array is buggy in scipy 1.15"
)
rng = jtu.rand_default(self.rng())
# TODO(mattjj): test autodiff
if use_b:
def scipy_fun(array_to_reduce, scale_array):
res = osp_special.logsumexp(array_to_reduce, axis, keepdims=keepdims,
return_sign=return_sign, b=scale_array)
if dtype == np.int32:
res = jax.tree.map(lambda x: x.astype('float32'), res)
return res
def lax_fun(array_to_reduce, scale_array):
return lsp_special.logsumexp(array_to_reduce, axis, keepdims=keepdims,
return_sign=return_sign, b=scale_array)
args_maker = lambda: [rng(shapes[0], dtype), rng(shapes[1], dtype)]
else:
def scipy_fun(array_to_reduce):
res = osp_special.logsumexp(array_to_reduce, axis, keepdims=keepdims,
return_sign=return_sign)
if dtype == np.int32:
res = jax.tree.map(lambda x: x.astype('float32'), res)
return res
def lax_fun(array_to_reduce):
return lsp_special.logsumexp(array_to_reduce, axis, keepdims=keepdims,
return_sign=return_sign)
args_maker = lambda: [rng(shapes[0], dtype)]
tol = (
{np.float32: 2e-4, np.complex64: 2e-4}
if jtu.test_device_matches(["tpu"])
else None
)
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, rtol=tol, atol=tol)
tol = {np.float32: 1E-6, np.float64: 1E-14}
self._CompileAndCheck(lax_fun, args_maker, rtol=tol, atol=tol)
def testLogSumExpComplexSign(self):
# Tests behavior of complex sign, which changed in SciPy 1.13
x = jnp.array([1 + 1j, 2 - 1j, -2 + 3j])
logsumexp, sign = lsp_special.logsumexp(x, return_sign=True)
expected_sumexp = jnp.exp(x).sum()
expected_sign = expected_sumexp / abs(expected_sumexp).astype(x.dtype)
self.assertEqual(logsumexp.dtype, sign.real.dtype)
tol = 1E-4 if jtu.test_device_matches(['tpu']) else 1E-6
self.assertAllClose(sign, expected_sign, rtol=tol)
self.assertAllClose(sign * np.exp(logsumexp).astype(x.dtype), expected_sumexp, rtol=tol)
def testLogSumExpZeros(self):
# Regression test for https://github.com/jax-ml/jax/issues/5370
scipy_fun = lambda a, b: osp_special.logsumexp(a, b=b)
lax_fun = lambda a, b: lsp_special.logsumexp(a, b=b)
args_maker = lambda: [np.array([-1000, -2]), np.array([1, 0])]
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker)
self._CompileAndCheck(lax_fun, args_maker)
def testLogSumExpOnes(self):
# Regression test for https://github.com/jax-ml/jax/issues/7390
args_maker = lambda: [np.ones(4, dtype='float32')]
with jax.debug_infs(True):
self._CheckAgainstNumpy(osp_special.logsumexp, lsp_special.logsumexp, args_maker)
self._CompileAndCheck(lsp_special.logsumexp, args_maker)
def testLogSumExpNans(self):
# Regression test for https://github.com/jax-ml/jax/issues/7634
with jax.debug_nans(True):
with jax.disable_jit():
result = lsp_special.logsumexp(1.0)
self.assertEqual(result, 1.0)
result = lsp_special.logsumexp(1.0, b=1.0)
self.assertEqual(result, 1.0)
def testLogSumExpInfs(self):
out, sign = lsp_special.logsumexp(jnp.array([1.0, np.inf]), return_sign=True)
self.assertEqual(out, np.inf)
self.assertEqual(sign, 1.0)
@jtu.sample_product(
shape=[(0,), (1,), (2,), (3,), (4,), (5,)],
dtype=float_dtypes,
)
def testLogSumExpWhere(self, shape, dtype):
rng = jtu.rand_default(self.rng())
x = rng(shape, dtype)
rng = jtu.rand_bool(self.rng())
mask = rng(shape, bool)
y_expected = osp_special.logsumexp(x[mask]) if mask.any() else -jnp.inf
y_actual = lsp_special.logsumexp(x, where=mask)
self.assertAllClose(y_expected, y_actual, check_dtypes=False)
def testLogSumExpWhereGrad(self):
x = jnp.array([0., 0., 0., 0., 100.])
g = jax.grad(lambda x: lsp_special.logsumexp(x, where=jnp.arange(5) < 4))(x)
self.assertAllClose(g, jnp.array([0.25, 0.25, 0.25, 0.25, 0.]))
@jtu.sample_product(
shape=all_shapes,
dtype=float_dtypes,
d=[1, 2, 5],
)
@jax.numpy_rank_promotion('raise')
def testMultigammaln(self, shape, dtype, d):
def scipy_fun(a):
return osp_special.multigammaln(a, d)
def lax_fun(a):
return lsp_special.multigammaln(a, d)
rng = jtu.rand_positive(self.rng())
args_maker = lambda: [rng(shape, dtype) + (d - 1) / 2.]
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker,
tol={np.float32: 1e-3, np.float64: 1e-14},
check_dtypes=False)
self._CompileAndCheck(
lax_fun, args_maker, rtol={
np.float32: 5e-5 if jtu.test_device_matches(["tpu"]) else 1e-05,
np.float64: 4e-15
})
def testIssue980(self):
x = np.full((4,), -1e20, dtype=np.float32)
self.assertAllClose(np.zeros((4,), dtype=np.float32),
lsp_special.expit(x))
def testIssue13267(self):
"""Tests betaln(x, 1) across wide range of x."""
xs = jnp.geomspace(1, 1e30, 1000)
primals_out, tangents_out = jax.jvp(lsp_special.betaln, primals=[xs, 1.0], tangents=[jnp.ones_like(xs), 0.0])
# Check that betaln(x, 1) = -log(x).
# Betaln is still not perfect for small values, hence the atol (but it's close)
atol = 1e-3 if jtu.test_device_matches(["tpu"]) else 1e-5
self.assertAllClose(primals_out, -jnp.log(xs), atol=atol)
# Check that d/dx betaln(x, 1) = d/dx -log(x) = -1/x.
self.assertAllClose(tangents_out, -1 / xs, atol=atol)
def testXlogyShouldReturnZero(self):
self.assertAllClose(lsp_special.xlogy(0., 0.), 0., check_dtypes=False)
def testGradOfXlogyAtZero(self):
# https://github.com/jax-ml/jax/issues/15598
x0, y0 = 0.0, 3.0
d_xlog1py_dx = jax.grad(lsp_special.xlogy, argnums=0)(x0, y0)
self.assertAllClose(d_xlog1py_dx, lax.log(y0))
d_xlog1py_dy = jax.grad(lsp_special.xlogy, argnums=1)(x0, y0)
self.assertAllClose(d_xlog1py_dy, 0.0)
jtu.check_grads(lsp_special.xlogy, (x0, y0), order=2)
def testXlog1pyShouldReturnZero(self):
self.assertAllClose(lsp_special.xlog1py(0., -1.), 0., check_dtypes=False)
def testGradOfXlog1pyAtZero(self):
# https://github.com/jax-ml/jax/issues/15598
x0, y0 = 0.0, 3.0
d_xlog1py_dx = jax.grad(lsp_special.xlog1py, argnums=0)(x0, y0)
self.assertAllClose(d_xlog1py_dx, lax.log1p(y0))
d_xlog1py_dy = jax.grad(lsp_special.xlog1py, argnums=1)(x0, y0)
self.assertAllClose(d_xlog1py_dy, 0.0)
jtu.check_grads(lsp_special.xlog1py, (x0, y0), order=2)
def testXLogX(self):
scipy_op = lambda x: osp_special.xlogy(x, x)
lax_op = lsp_special_internal._xlogx
rng = jtu.rand_positive(self.rng())
args_maker = lambda: [rng((2, 3, 4), np.float32)]
self._CheckAgainstNumpy(
scipy_op, lax_op, args_maker,
rtol=5e-4 if jtu.test_device_matches(["tpu"]) else None)
self._CompileAndCheck(lax_op, args_maker)
jtu.check_grads(lax_op, args_maker(), order=1,
atol=.1 if jtu.test_device_matches(["tpu"]) else 1e-3,
rtol=.1, eps=1e-3)
def testGradOfEntrAtZero(self):
# https://github.com/jax-ml/jax/issues/15709
self.assertEqual(jax.jacfwd(lsp_special.entr)(0.0), jnp.inf)
self.assertEqual(jax.jacrev(lsp_special.entr)(0.0), jnp.inf)
@jtu.sample_product(
[dict(order=order, z=z, n_iter=n_iter)
for order, z, n_iter in zip(
[0, 1, 2, 3, 6], [0.01, 1.1, 11.4, 30.0, 100.6], [5, 20, 50, 80, 200]
)],
)
def testBesselJn(self, order, z, n_iter):
def lax_fun(z):
return lsp_special.bessel_jn(z, v=order, n_iter=n_iter)
def scipy_fun(z):
vals = [osp_special.jv(v, z) for v in range(order+1)]
return np.array(vals)
args_maker = lambda : [z]
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, rtol=1E-6)
self._CompileAndCheck(lax_fun, args_maker, rtol=1E-8)
@jtu.sample_product(
order=[3, 4],
shape=[(2,), (3,), (4,), (3, 5), (2, 2, 3)],
dtype=float_dtypes,
)
def testBesselJnRandomPositiveZ(self, order, shape, dtype):
rng = jtu.rand_default(self.rng(), scale=1)
points = jnp.abs(rng(shape, dtype))
args_maker = lambda: [points]
def lax_fun(z):
return lsp_special.bessel_jn(z, v=order, n_iter=15)
def scipy_fun(z):
vals = [osp_special.jv(v, z) for v in range(order+1)]
return np.stack(vals, axis=0)
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, rtol=1E-6)
self._CompileAndCheck(lax_fun, args_maker, rtol=1E-8)
@jtu.sample_product(
l_max=[1, 2, 3, 6],
shape=[(5,), (10,)],
dtype=float_dtypes,
)
@jtu.ignore_warning(category=DeprecationWarning, message=".*scipy.special.lpmn.*")
@unittest.skipIf(scipy_version >= (1, 17, 0), "scipy.special.lpmn has been removed.")
def testLpmn(self, l_max, shape, dtype):
if jtu.is_device_tpu_at_least(6):
self.skipTest("TODO(b/364258243): fails on TPU v6+")
rng = jtu.rand_uniform(self.rng(), low=-0.2, high=0.9)
args_maker = lambda: [rng(shape, dtype)]
lax_fun = partial(lsp_special.lpmn, l_max, l_max)
def scipy_fun(z, m=l_max, n=l_max):
# scipy only supports scalar inputs for z, so we must loop here.
vals, derivs = zip(*(osp_special.lpmn(m, n, zi) for zi in z.astype('float64')))
return np.dstack(vals).astype(z.dtype), np.dstack(derivs).astype(z.dtype)
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, rtol=1e-5,
atol=3e-3, check_dtypes=False)
self._CompileAndCheck(lax_fun, args_maker, rtol=1E-5, atol=3e-3)
@jtu.sample_product(
l_max=[3, 4, 6, 32],
shape=[(2,), (3,), (4,), (64,)],
dtype=float_dtypes,
)
@jtu.ignore_warning(category=DeprecationWarning, message=".*scipy.special.lpmn.*")
@unittest.skipIf(scipy_version >= (1, 17, 0), "scipy.special.lpmn_values has been removed.")
def testNormalizedLpmnValues(self, l_max, shape, dtype):
rng = jtu.rand_uniform(self.rng(), low=-0.2, high=0.9)
args_maker = lambda: [rng(shape, dtype)]
# Note: we test only the normalized values, not the derivatives.
lax_fun = partial(lsp_special.lpmn_values, l_max, l_max, is_normalized=True)
def scipy_fun(z, m=l_max, n=l_max):
# scipy only supports scalar inputs for z, so we must loop here.
vals, _ = zip(*(osp_special.lpmn(m, n, zi) for zi in z.astype('float64')))
a = np.dstack(vals)
# apply the normalization
num_m, num_l, _ = a.shape
a_normalized = np.zeros_like(a)
for m in range(num_m):
for l in range(num_l):
c0 = (2.0 * l + 1.0) * osp_special.factorial(l - m)
c1 = (4.0 * np.pi) * osp_special.factorial(l + m)
c2 = np.sqrt(c0 / c1)
a_normalized[m, l] = c2 * a[m, l]
return a_normalized.astype(z.dtype)
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker,
rtol=1e-5, atol=1e-5, check_dtypes=False)
self._CompileAndCheck(lax_fun, args_maker, rtol=1E-6, atol=1E-6)
@jtu.ignore_warning(category=DeprecationWarning,
message=".*scipy.special.sph_harm.*")
@unittest.skipIf(scipy_version >= (1, 17, 0), "scipy.special.sph_harm has been removed.")
@jax.numpy_dtype_promotion('standard') # This test explicitly exercises dtype promotion
def testSphHarmAccuracy(self):
if not hasattr(lsp_special, 'sph_harm'):
self.skipTest("jax.scipy.special.sph_harm has been removed.")
m = jnp.arange(-3, 3)[:, None]
n = jnp.arange(3, 6)
n_max = 5
theta = 0.0
phi = jnp.pi
expected = lsp_special.sph_harm(m, n, theta, phi, n_max)
actual = osp_special.sph_harm(m, n, theta, phi)
self.assertAllClose(actual, expected, rtol=1e-8, atol=9e-5)
@jtu.ignore_warning(category=DeprecationWarning,
message=".*scipy.special.sph_harm.*")
@unittest.skipIf(scipy_version >= (1, 17, 0), "scipy.special.sph_harm has been removed.")
@jax.numpy_dtype_promotion('standard') # This test explicitly exercises dtype promotion
def testSphHarmOrderZeroDegreeZero(self):
"""Tests the spherical harmonics of order zero and degree zero."""
if not hasattr(lsp_special, 'sph_harm'):
self.skipTest("jax.scipy.special.sph_harm has been removed.")
theta = jnp.array([0.3])
phi = jnp.array([2.3])
n_max = 0
expected = jnp.array([1.0 / jnp.sqrt(4.0 * np.pi)])
actual = jnp.real(
lsp_special.sph_harm(jnp.array([0]), jnp.array([0]), theta, phi, n_max))
self.assertAllClose(actual, expected, rtol=1.1e-7, atol=3e-8)
@jtu.ignore_warning(category=DeprecationWarning,
message=".*scipy.special.sph_harm.*")
@unittest.skipIf(scipy_version >= (1, 17, 0), "scipy.special.sph_harm has been removed.")
@jax.numpy_dtype_promotion('standard') # This test explicitly exercises dtype promotion
def testSphHarmOrderZeroDegreeOne(self):
"""Tests the spherical harmonics of order one and degree zero."""
if not hasattr(lsp_special, 'sph_harm'):
self.skipTest("jax.scipy.special.sph_harm has been removed.")
theta = jnp.array([2.0])
phi = jnp.array([3.1])
n_max = 1
expected = jnp.sqrt(3.0 / (4.0 * np.pi)) * jnp.cos(phi)
actual = jnp.real(
lsp_special.sph_harm(jnp.array([0]), jnp.array([1]), theta, phi, n_max))
self.assertAllClose(actual, expected, rtol=2e-7, atol=6e-8)
@jtu.ignore_warning(category=DeprecationWarning,
message=".*scipy.special.sph_harm.*")
@unittest.skipIf(scipy_version >= (1, 17, 0), "scipy.special.sph_harm has been removed.")
@jax.numpy_dtype_promotion('standard') # This test explicitly exercises dtype promotion
def testSphHarmOrderOneDegreeOne(self):
"""Tests the spherical harmonics of order one and degree one."""
if not hasattr(lsp_special, 'sph_harm'):
self.skipTest("jax.scipy.special.sph_harm has been removed.")
theta = jnp.array([2.0])
phi = jnp.array([2.5])
n_max = 1
expected = (-1.0 / 2.0 * jnp.sqrt(3.0 / (2.0 * np.pi)) *
jnp.sin(phi) * jnp.exp(1j * theta))
actual = lsp_special.sph_harm(
jnp.array([1]), jnp.array([1]), theta, phi, n_max)
self.assertAllClose(actual, expected, rtol=1e-8, atol=6e-8)
@jtu.sample_product(
[dict(l_max=l_max, num_z=num_z)
for l_max, num_z in zip([1, 3, 8, 10], [2, 6, 7, 8])
],
dtype=jtu.dtypes.all_integer,
)
@jtu.ignore_warning(category=DeprecationWarning,
message=".*scipy.special.sph_harm.*")
@unittest.skipIf(scipy_version >= (1, 17, 0), "scipy.special.sph_harm has been removed.")
@jax.numpy_dtype_promotion('standard') # This test explicitly exercises dtype promotion
def testSphHarmForJitAndAgainstNumpy(self, l_max, num_z, dtype):
"""Tests against JIT compatibility and Numpy."""
if not hasattr(lsp_special, 'sph_harm'):
self.skipTest("jax.scipy.special.sph_harm has been removed.")
if jtu.is_device_tpu_at_least(6):
self.skipTest("TODO(b/364258243): fails on TPU v6+")
n_max = l_max
shape = (num_z,)
rng = jtu.rand_int(self.rng(), -l_max, l_max + 1)
lsp_special_fn = partial(lsp_special.sph_harm, n_max=n_max)
def args_maker():
m = rng(shape, dtype)
n = abs(m)
theta = np.linspace(-4.0, 5.0, num_z)
phi = np.linspace(-2.0, 1.0, num_z)
return m, n, theta, phi
with self.subTest('Test JIT compatibility'):
self._CompileAndCheck(lsp_special_fn, args_maker)
with self.subTest('Test against numpy.'):
self._CheckAgainstNumpy(osp_special.sph_harm, lsp_special_fn, args_maker)
@jtu.ignore_warning(category=DeprecationWarning,
message=".*scipy.special.sph_harm.*")
@unittest.skipIf(scipy_version >= (1, 17, 0), "scipy.special.sph_harm has been removed.")
@jax.numpy_dtype_promotion('standard') # This test explicitly exercises dtype promotion
def testSphHarmCornerCaseWithWrongNmax(self):
"""Tests the corner case where `n_max` is not the maximum value of `n`."""
if not hasattr(lsp_special, 'sph_harm'):
self.skipTest("jax.scipy.special.sph_harm has been removed.")
m = jnp.array([2])
n = jnp.array([10])
n_clipped = jnp.array([6])
n_max = 6
theta = jnp.array([0.9])
phi = jnp.array([0.2])
expected = lsp_special.sph_harm(m, n, theta, phi, n_max)
actual = lsp_special.sph_harm(m, n_clipped, theta, phi, n_max)
self.assertAllClose(actual, expected, rtol=1e-8, atol=9e-5)
@jtu.sample_product(
[dict(l_max=l_max, num_z=num_z)
for l_max, num_z in zip([1, 3, 8, 10], [2, 6, 7, 8])
],
dtype=jtu.dtypes.all_integer,
)
@jax.numpy_dtype_promotion('standard') # This test explicitly exercises dtype promotion
def testSphHarmY(self, l_max, num_z, dtype):
if jtu.is_device_tpu_at_least(6):
self.skipTest("TODO(b/364258243): fails on TPU v6+")
n_max = l_max
shape = (num_z,)
rng = jtu.rand_int(self.rng(), -l_max, l_max + 1)
def args_maker():
m = rng(shape, dtype)
n = abs(m)
theta = np.linspace(-2.0, 1.0, num_z)
phi = np.linspace(-4.0, 5.0, num_z)
return n, m, theta, phi
lsp_special_fn = partial(lsp_special.sph_harm_y, n_max=n_max)
self._CompileAndCheck(lsp_special_fn, args_maker)
if scipy_version < (1, 15, 0):
osp_special_fn = lambda n, m, theta, phi: osp_special.sph_harm(m, n, phi, theta)
else:
osp_special_fn = osp_special.sph_harm_y
self._CheckAgainstNumpy(osp_special_fn, lsp_special_fn, args_maker)
@jtu.sample_product(
n_zero_sv=n_zero_svs,
degeneracy=degeneracies,
geometric_spectrum=geometric_spectra,
max_sv=max_svs,
shape=polar_shapes,
method=methods,
side=sides,
nonzero_condition_number=nonzero_condition_numbers,
dtype=jtu.dtypes.inexact,
seed=seeds,
)
def testPolar(
self, n_zero_sv, degeneracy, geometric_spectrum, max_sv, shape, method,
side, nonzero_condition_number, dtype, seed):
""" Tests jax.scipy.linalg.polar."""
if not jtu.test_device_matches(["cpu"]):
if jnp.dtype(dtype).name in ("bfloat16", "float16"):
raise unittest.SkipTest("Skip half precision off CPU.")
m, n = shape
if (method == "qdwh" and ((side == "left" and m >= n) or
(side == "right" and m < n))):
raise unittest.SkipTest("method=qdwh does not support these sizes")
matrix, _ = _initialize_polar_test(self.rng(),
shape, n_zero_sv, degeneracy, geometric_spectrum, max_sv,
nonzero_condition_number, dtype)
if jnp.dtype(dtype).name in ("bfloat16", "float16"):
self.assertRaises(
NotImplementedError, jsp.linalg.polar, matrix, method=method,
side=side)
return
unitary, posdef = jsp.linalg.polar(matrix, method=method, side=side)
if shape[0] >= shape[1]:
should_be_eye = np.matmul(unitary.conj().T, unitary)
else:
should_be_eye = np.matmul(unitary, unitary.conj().T)
tol = 650 * float(jnp.finfo(matrix.dtype).eps)
eye_mat = np.eye(should_be_eye.shape[0], dtype=should_be_eye.dtype)
with self.subTest('Test unitarity.'):
self.assertAllClose(
eye_mat, should_be_eye, atol=tol * 1000 * min(shape))
with self.subTest('Test Hermiticity.'):
self.assertAllClose(
posdef, posdef.conj().T, atol=tol * jnp.linalg.norm(posdef))
ev, _ = np.linalg.eigh(posdef)
ev = ev[np.abs(ev) > tol * np.linalg.norm(posdef)]
negative_ev = jnp.sum(ev < 0.)
with self.subTest('Test positive definiteness.'):
self.assertEqual(negative_ev, 0)
if side == "right":
recon = jnp.matmul(unitary, posdef, precision=lax.Precision.HIGHEST)
elif side == "left":
recon = jnp.matmul(posdef, unitary, precision=lax.Precision.HIGHEST)
with self.subTest('Test reconstruction.'):
self.assertAllClose(
matrix, recon, atol=tol * jnp.linalg.norm(matrix))
@jtu.sample_product(
n_obs=[1, 3, 5],
n_codes=[1, 2, 4],
n_feats=[()] + [(i,) for i in range(1, 3)],
dtype=float_dtypes + int_dtypes, # scipy doesn't support complex
)
def test_vq(self, n_obs, n_codes, n_feats, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng((n_obs, *n_feats), dtype), rng((n_codes, *n_feats), dtype)]
self._CheckAgainstNumpy(osp_cluster.vq.vq, lsp_cluster.vq.vq, args_maker, check_dtypes=False)
self._CompileAndCheck(lsp_cluster.vq.vq, args_maker)
@jtu.sample_product(
shape=all_shapes,
dtype=float_dtypes,
)
def test_spence(self, shape, dtype):
rng = jtu.rand_positive(self.rng())
args_maker = lambda: [rng(shape, dtype)]
with self.subTest('Test against SciPy'):
rtol = 1e-4 if jtu.test_device_matches(["tpu"]) else 1e-8
self._CheckAgainstNumpy(osp_special.spence, lsp_special.spence, args_maker,
rtol=rtol, check_dtypes=False)
with self.subTest('Test JIT compatibility'):
self._CompileAndCheck(lsp_special.spence, args_maker)
# This function is not defined for negative values, this makes sure they are nan
with self.subTest('Test Negative Values'):
x = -rng(shape, dtype)
nan_array = jnp.nan * jnp.ones_like(x)
actual = lsp_special.spence(x)
self.assertArraysEqual(actual, nan_array, check_dtypes=False)
@jtu.sample_product(
[dict(yshape=yshape, xshape=xshape, dx=dx, axis=axis)
for yshape, xshape, dx, axis in [
((10,), None, 1.0, -1),
((3, 10), None, 2.0, -1),
((3, 10), None, 3.0, -0),
((10, 3), (10,), 1.0, -2),
((3, 10), (10,), 1.0, -1),
((3, 10), (3, 10), 1.0, -1),
((2, 3, 10), (3, 10), 1.0, -2),
]
],
dtype=float_dtypes + int_dtypes,
)
@jtu.skip_on_devices("tpu") # TODO(jakevdp): fix and re-enable this test.
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testIntegrateTrapezoid(self, yshape, xshape, dtype, dx, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(yshape, dtype), rng(xshape, dtype) if xshape is not None else None]
np_fun = partial(scipy.integrate.trapezoid, dx=dx, axis=axis)
jnp_fun = partial(jax.scipy.integrate.trapezoid, dx=dx, axis=axis)
tol = jtu.tolerance(dtype, {np.float16: 2e-3, np.float64: 1e-12,
jax.dtypes.bfloat16: 4e-2})
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol=tol,
check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol, rtol=tol,
check_dtypes=False)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
LaxBackedScipyTests
|
python
|
encode__django-rest-framework
|
tests/browsable_api/test_form_rendering.py
|
{
"start": 866,
"end": 1810
}
|
class ____(TestCase):
"""
POSTing a list of data to a regular view should not cause the browsable
API to fail during rendering.
Regression test for https://github.com/encode/django-rest-framework/issues/5637
"""
def test_json_response(self):
# sanity check for non-browsable API responses
view = StandardPostView.as_view()
request = factory.post('/', [{}], format='json')
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('non_field_errors' in response.data)
def test_browsable_api(self):
view = StandardPostView.as_view()
request = factory.post('/?format=api', [{}], format='json')
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('non_field_errors' in response.data)
|
TestPostingListData
|
python
|
django__django
|
tests/admin_changelist/models.py
|
{
"start": 3345,
"end": 3440
}
|
class ____(models.Model):
char_pk = models.CharField(max_length=100, primary_key=True)
|
CharPK
|
python
|
spack__spack
|
lib/spack/spack/spec.py
|
{
"start": 208000,
"end": 210671
}
|
class ____(SpecfileReaderBase):
SPEC_VERSION = 1
@classmethod
def load(cls, data):
"""Construct a spec from JSON/YAML using the format version 1.
Note: Version 1 format has no notion of a build_spec, and names are
guaranteed to be unique. This function is guaranteed to read specs as
old as v0.10 - while it was not checked for older formats.
Args:
data: a nested dict/list data structure read from YAML or JSON.
"""
nodes = data["spec"]
# Read nodes out of list. Root spec is the first element;
# dependencies are the following elements.
dep_list = [cls.from_node_dict(node) for node in nodes]
if not dep_list:
raise spack.error.SpecError("specfile contains no nodes.")
deps = {spec.name: spec for spec in dep_list}
result = dep_list[0]
for node in nodes:
# get dependency dict from the node.
name, data = cls.name_and_data(node)
for dname, _, dtypes, _, virtuals, direct in cls.dependencies_from_node_dict(data):
deps[name]._add_dependency(
deps[dname], depflag=dt.canonicalize(dtypes), virtuals=virtuals, direct=direct
)
reconstruct_virtuals_on_edges(result)
return result
@classmethod
def name_and_data(cls, node):
name = next(iter(node))
node = node[name]
return name, node
@classmethod
def dependencies_from_node_dict(cls, node):
if "dependencies" not in node:
return []
for t in cls.read_specfile_dep_specs(node["dependencies"]):
yield t
@classmethod
def read_specfile_dep_specs(cls, deps, hash_type=ht.dag_hash.name):
"""Read the DependencySpec portion of a YAML-formatted Spec.
This needs to be backward-compatible with older spack spec
formats so that reindex will work on old specs/databases.
"""
for dep_name, elt in deps.items():
if isinstance(elt, dict):
for h in ht.HASHES:
if h.name in elt:
dep_hash, deptypes = elt[h.name], elt["type"]
hash_type = h.name
virtuals = []
break
else: # We never determined a hash type...
raise spack.error.SpecError("Couldn't parse dependency spec.")
else:
raise spack.error.SpecError("Couldn't parse dependency types in spec.")
yield dep_name, dep_hash, list(deptypes), hash_type, list(virtuals), True
|
SpecfileV1
|
python
|
scikit-learn__scikit-learn
|
sklearn/linear_model/_glm/glm.py
|
{
"start": 1009,
"end": 17729
}
|
class ____(RegressorMixin, BaseEstimator):
"""Regression via a penalized Generalized Linear Model (GLM).
GLMs based on a reproductive Exponential Dispersion Model (EDM) aim at fitting and
predicting the mean of the target y as y_pred=h(X*w) with coefficients w.
Therefore, the fit minimizes the following objective function with L2 priors as
regularizer::
1/(2*sum(s_i)) * sum(s_i * deviance(y_i, h(x_i*w)) + 1/2 * alpha * ||w||_2^2
with inverse link function h, s=sample_weight and per observation (unit) deviance
deviance(y_i, h(x_i*w)). Note that for an EDM, 1/2 * deviance is the negative
log-likelihood up to a constant (in w) term.
The parameter ``alpha`` corresponds to the lambda parameter in glmnet.
Instead of implementing the EDM family and a link function separately, we directly
use the loss functions `from sklearn._loss` which have the link functions included
in them for performance reasons. We pick the loss functions that implement
(1/2 times) EDM deviances.
Read more in the :ref:`User Guide <Generalized_linear_models>`.
.. versionadded:: 0.23
Parameters
----------
alpha : float, default=1
Constant that multiplies the penalty term and thus determines the
regularization strength. ``alpha = 0`` is equivalent to unpenalized
GLMs. In this case, the design matrix `X` must have full column rank
(no collinearities).
Values must be in the range `[0.0, inf)`.
fit_intercept : bool, default=True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the linear predictor (X @ coef + intercept).
solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs'
Algorithm to use in the optimization problem:
'lbfgs'
Calls scipy's L-BFGS-B optimizer.
'newton-cholesky'
Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to
iterated reweighted least squares) with an inner Cholesky based solver.
This solver is a good choice for `n_samples` >> `n_features`, especially
with one-hot encoded categorical features with rare categories. Be aware
that the memory usage of this solver has a quadratic dependency on
`n_features` because it explicitly computes the Hessian matrix.
.. versionadded:: 1.2
max_iter : int, default=100
The maximal number of iterations for the solver.
Values must be in the range `[1, inf)`.
tol : float, default=1e-4
Stopping criterion. For the lbfgs solver,
the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
where ``g_j`` is the j-th component of the gradient (derivative) of
the objective function.
Values must be in the range `(0.0, inf)`.
warm_start : bool, default=False
If set to ``True``, reuse the solution of the previous call to ``fit``
as initialization for ``coef_`` and ``intercept_``.
verbose : int, default=0
For the lbfgs solver set verbose to any positive number for verbosity.
Values must be in the range `[0, inf)`.
Attributes
----------
coef_ : array of shape (n_features,)
Estimated coefficients for the linear predictor (`X @ coef_ +
intercept_`) in the GLM.
intercept_ : float
Intercept (a.k.a. bias) added to linear predictor.
n_iter_ : int
Actual number of iterations used in the solver.
_base_loss : BaseLoss, default=HalfSquaredError()
This is set during fit via `self._get_loss()`.
A `_base_loss` contains a specific loss function as well as the link
function. The loss to be minimized specifies the distributional assumption of
the GLM, i.e. the distribution from the EDM. Here are some examples:
======================= ======== ==========================
_base_loss Link Target Domain
======================= ======== ==========================
HalfSquaredError identity y any real number
HalfPoissonLoss log 0 <= y
HalfGammaLoss log 0 < y
HalfTweedieLoss log dependent on tweedie power
HalfTweedieLossIdentity identity dependent on tweedie power
======================= ======== ==========================
The link function of the GLM, i.e. mapping from linear predictor
`X @ coeff + intercept` to prediction `y_pred`. For instance, with a log link,
we have `y_pred = exp(X @ coeff + intercept)`.
"""
# We allow for NewtonSolver classes for the "solver" parameter but do not
# make them public in the docstrings. This facilitates testing and
# benchmarking.
_parameter_constraints: dict = {
"alpha": [Interval(Real, 0.0, None, closed="left")],
"fit_intercept": ["boolean"],
"solver": [
StrOptions({"lbfgs", "newton-cholesky"}),
Hidden(type),
],
"max_iter": [Interval(Integral, 1, None, closed="left")],
"tol": [Interval(Real, 0.0, None, closed="neither")],
"warm_start": ["boolean"],
"verbose": ["verbose"],
}
def __init__(
self,
*,
alpha=1.0,
fit_intercept=True,
solver="lbfgs",
max_iter=100,
tol=1e-4,
warm_start=False,
verbose=0,
):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.solver = solver
self.max_iter = max_iter
self.tol = tol
self.warm_start = warm_start
self.verbose = verbose
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None):
"""Fit a Generalized Linear Model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : object
Fitted model.
"""
X, y = validate_data(
self,
X,
y,
accept_sparse=["csc", "csr"],
dtype=[np.float64, np.float32],
y_numeric=True,
multi_output=False,
)
# required by losses
if self.solver == "lbfgs":
# lbfgs will force coef and therefore raw_prediction to be float64. The
# base_loss needs y, X @ coef and sample_weight all of same dtype
# (and contiguous).
loss_dtype = np.float64
else:
loss_dtype = min(max(y.dtype, X.dtype), np.float64)
y = check_array(y, dtype=loss_dtype, order="C", ensure_2d=False)
if sample_weight is not None:
# Note that _check_sample_weight calls check_array(order="C") required by
# losses.
sample_weight = _check_sample_weight(sample_weight, X, dtype=loss_dtype)
n_samples, n_features = X.shape
self._base_loss = self._get_loss()
linear_loss = LinearModelLoss(
base_loss=self._base_loss,
fit_intercept=self.fit_intercept,
)
if not linear_loss.base_loss.in_y_true_range(y):
raise ValueError(
"Some value(s) of y are out of the valid range of the loss"
f" {self._base_loss.__class__.__name__!r}."
)
# TODO: if alpha=0 check that X is not rank deficient
# NOTE: Rescaling of sample_weight:
# We want to minimize
# obj = 1/(2 * sum(sample_weight)) * sum(sample_weight * deviance)
# + 1/2 * alpha * L2,
# with
# deviance = 2 * loss.
# The objective is invariant to multiplying sample_weight by a constant. We
# could choose this constant such that sum(sample_weight) = 1 in order to end
# up with
# obj = sum(sample_weight * loss) + 1/2 * alpha * L2.
# But LinearModelLoss.loss() already computes
# average(loss, weights=sample_weight)
# Thus, without rescaling, we have
# obj = LinearModelLoss.loss(...)
if self.warm_start and hasattr(self, "coef_"):
if self.fit_intercept:
# LinearModelLoss needs intercept at the end of coefficient array.
coef = np.concatenate((self.coef_, np.array([self.intercept_])))
else:
coef = self.coef_
coef = coef.astype(loss_dtype, copy=False)
else:
coef = linear_loss.init_zero_coef(X, dtype=loss_dtype)
if self.fit_intercept:
coef[-1] = linear_loss.base_loss.link.link(
np.average(y, weights=sample_weight)
)
l2_reg_strength = self.alpha
n_threads = _openmp_effective_n_threads()
# Algorithms for optimization:
# Note again that our losses implement 1/2 * deviance.
if self.solver == "lbfgs":
func = linear_loss.loss_gradient
opt_res = scipy.optimize.minimize(
func,
coef,
method="L-BFGS-B",
jac=True,
options={
"maxiter": self.max_iter,
"maxls": 50, # default is 20
"gtol": self.tol,
# The constant 64 was found empirically to pass the test suite.
# The point is that ftol is very small, but a bit larger than
# machine precision for float64, which is the dtype used by lbfgs.
"ftol": 64 * np.finfo(float).eps,
**_get_additional_lbfgs_options_dict("iprint", self.verbose - 1),
},
args=(X, y, sample_weight, l2_reg_strength, n_threads),
)
self.n_iter_ = _check_optimize_result(
"lbfgs", opt_res, max_iter=self.max_iter
)
coef = opt_res.x
elif self.solver == "newton-cholesky":
sol = NewtonCholeskySolver(
coef=coef,
linear_loss=linear_loss,
l2_reg_strength=l2_reg_strength,
tol=self.tol,
max_iter=self.max_iter,
n_threads=n_threads,
verbose=self.verbose,
)
coef = sol.solve(X, y, sample_weight)
self.n_iter_ = sol.iteration
elif issubclass(self.solver, NewtonSolver):
sol = self.solver(
coef=coef,
linear_loss=linear_loss,
l2_reg_strength=l2_reg_strength,
tol=self.tol,
max_iter=self.max_iter,
n_threads=n_threads,
)
coef = sol.solve(X, y, sample_weight)
self.n_iter_ = sol.iteration
else:
raise ValueError(f"Invalid solver={self.solver}.")
if self.fit_intercept:
self.intercept_ = coef[-1]
self.coef_ = coef[:-1]
else:
# set intercept to zero as the other linear models do
self.intercept_ = 0.0
self.coef_ = coef
return self
def _linear_predictor(self, X):
"""Compute the linear_predictor = `X @ coef_ + intercept_`.
Note that we often use the term raw_prediction instead of linear predictor.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples.
Returns
-------
y_pred : array of shape (n_samples,)
Returns predicted values of linear predictor.
"""
check_is_fitted(self)
X = validate_data(
self,
X,
accept_sparse=["csr", "csc", "coo"],
dtype=[np.float64, np.float32],
ensure_2d=True,
allow_nd=False,
reset=False,
)
return X @ self.coef_ + self.intercept_
def predict(self, X):
"""Predict using GLM with feature matrix X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples.
Returns
-------
y_pred : array of shape (n_samples,)
Returns predicted values.
"""
# check_array is done in _linear_predictor
raw_prediction = self._linear_predictor(X)
y_pred = self._base_loss.link.inverse(raw_prediction)
return y_pred
def score(self, X, y, sample_weight=None):
"""Compute D^2, the percentage of deviance explained.
D^2 is a generalization of the coefficient of determination R^2.
R^2 uses squared error and D^2 uses the deviance of this GLM, see the
:ref:`User Guide <regression_metrics>`.
D^2 is defined as
:math:`D^2 = 1-\\frac{D(y_{true},y_{pred})}{D_{null}}`,
:math:`D_{null}` is the null deviance, i.e. the deviance of a model
with intercept alone, which corresponds to :math:`y_{pred} = \\bar{y}`.
The mean :math:`\\bar{y}` is averaged by sample_weight.
Best possible score is 1.0 and it can be negative (because the model
can be arbitrarily worse).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,)
True values of target.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
D^2 of self.predict(X) w.r.t. y.
"""
# TODO: Adapt link to User Guide in the docstring, once
# https://github.com/scikit-learn/scikit-learn/pull/22118 is merged.
#
# Note, default score defined in RegressorMixin is R^2 score.
# TODO: make D^2 a score function in module metrics (and thereby get
# input validation and so on)
raw_prediction = self._linear_predictor(X) # validates X
# required by losses
y = check_array(y, dtype=raw_prediction.dtype, order="C", ensure_2d=False)
if sample_weight is not None:
# Note that _check_sample_weight calls check_array(order="C") required by
# losses.
sample_weight = _check_sample_weight(sample_weight, X, dtype=y.dtype)
base_loss = self._base_loss
if not base_loss.in_y_true_range(y):
raise ValueError(
"Some value(s) of y are out of the valid range of the loss"
f" {base_loss.__name__}."
)
constant = np.average(
base_loss.constant_to_optimal_zero(y_true=y, sample_weight=None),
weights=sample_weight,
)
# Missing factor of 2 in deviance cancels out.
deviance = base_loss(
y_true=y,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
n_threads=1,
)
y_mean = base_loss.link.link(np.average(y, weights=sample_weight))
deviance_null = base_loss(
y_true=y,
raw_prediction=np.tile(y_mean, y.shape[0]),
sample_weight=sample_weight,
n_threads=1,
)
return 1 - (deviance + constant) / (deviance_null + constant)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
try:
# Create instance of BaseLoss if fit wasn't called yet. This is necessary as
# TweedieRegressor might set the used loss during fit different from
# self._base_loss.
base_loss = self._get_loss()
tags.target_tags.positive_only = not base_loss.in_y_true_range(-1.0)
except (ValueError, AttributeError, TypeError):
# This happens when the link or power parameter of TweedieRegressor is
# invalid. We fallback on the default tags in that case.
pass # pragma: no cover
return tags
def _get_loss(self):
"""This is only necessary because of the link and power arguments of the
TweedieRegressor.
Note that we do not need to pass sample_weight to the loss class as this is
only needed to set loss.constant_hessian on which GLMs do not rely.
"""
return HalfSquaredError()
|
_GeneralizedLinearRegressor
|
python
|
run-llama__llama_index
|
llama-index-integrations/embeddings/llama-index-embeddings-gemini/llama_index/embeddings/gemini/base.py
|
{
"start": 606,
"end": 5050
}
|
class ____(BaseEmbedding):
"""
Google Gemini embeddings.
Args:
model_name (str): Model for embedding.
Defaults to "models/embedding-001".
api_key (Optional[str]): API key to access the model. Defaults to None.
api_base (Optional[str]): API base to access the model. Defaults to Official Base.
transport (Optional[str]): Transport to access the model.
"""
_model: gemini = PrivateAttr()
_request_options: Optional[gemini.types.RequestOptions] = PrivateAttr()
title: Optional[str] = Field(
default="",
description="Title is only applicable for retrieval_document tasks, and is used to represent a document title. For other tasks, title is invalid.",
)
task_type: Optional[str] = Field(
default="retrieval_document",
description="The task for embedding model.",
)
api_key: Optional[str] = Field(
default=None,
description="API key to access the model. Defaults to None.",
)
def __init__(
self,
model_name: str = "models/embedding-001",
task_type: Optional[str] = "retrieval_document",
api_key: Optional[str] = None,
api_base: Optional[str] = None,
transport: Optional[str] = None,
title: Optional[str] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
request_options: Optional[gemini.types.RequestOptions] = None,
**kwargs: Any,
):
# API keys are optional. The API can be authorised via OAuth (detected
# environmentally) or by the GOOGLE_API_KEY environment variable.
config_params: Dict[str, Any] = {
"api_key": api_key or os.getenv("GOOGLE_API_KEY"),
}
if api_base:
config_params["client_options"] = {"api_endpoint": api_base}
if transport:
config_params["transport"] = transport
# transport: A string, one of: [`rest`, `grpc`, `grpc_asyncio`].
super().__init__(
api_key=api_key,
model_name=model_name,
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
title=title,
task_type=task_type,
**kwargs,
)
gemini.configure(**config_params)
self._model = gemini
self._request_options = request_options
@classmethod
def class_name(cls) -> str:
return "GeminiEmbedding"
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self._model.embed_content(
model=self.model_name,
content=query,
title=self.title,
task_type=self.task_type,
request_options=self._request_options,
)["embedding"]
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self._model.embed_content(
model=self.model_name,
content=text,
title=self.title,
task_type=self.task_type,
request_options=self._request_options,
)["embedding"]
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
return [
self._model.embed_content(
model=self.model_name,
content=text,
title=self.title,
task_type=self.task_type,
request_options=self._request_options,
)["embedding"]
for text in texts
]
async def _aget_query_embedding(self, query: str) -> List[float]:
"""The asynchronous version of _get_query_embedding."""
return (await self._aget_text_embeddings([query]))[0]
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Asynchronously get text embedding."""
return (await self._aget_text_embeddings([text]))[0]
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Asynchronously get text embeddings."""
response = await self._model.embed_content_async(
model=self.model_name,
content=texts,
title=self.title,
task_type=self.task_type,
request_options=self._request_options,
)
return response["embedding"]
|
GeminiEmbedding
|
python
|
TheAlgorithms__Python
|
other/lru_cache.py
|
{
"start": 137,
"end": 752
}
|
class ____[T, U]:
"""
Double Linked List Node built specifically for LRU Cache
>>> DoubleLinkedListNode(1,1)
Node: key: 1, val: 1, has next: False, has prev: False
"""
def __init__(self, key: T | None, val: U | None):
self.key = key
self.val = val
self.next: DoubleLinkedListNode[T, U] | None = None
self.prev: DoubleLinkedListNode[T, U] | None = None
def __repr__(self) -> str:
return (
f"Node: key: {self.key}, val: {self.val}, "
f"has next: {bool(self.next)}, has prev: {bool(self.prev)}"
)
|
DoubleLinkedListNode
|
python
|
openai__openai-python
|
src/openai/types/responses/response.py
|
{
"start": 1642,
"end": 12152
}
|
class ____(BaseModel):
id: str
"""Unique identifier for this Response."""
created_at: float
"""Unix timestamp (in seconds) of when this Response was created."""
error: Optional[ResponseError] = None
"""An error object returned when the model fails to generate a Response."""
incomplete_details: Optional[IncompleteDetails] = None
"""Details about why the response is incomplete."""
instructions: Union[str, List[ResponseInputItem], None] = None
"""A system (or developer) message inserted into the model's context.
When using along with `previous_response_id`, the instructions from a previous
response will not be carried over to the next response. This makes it simple to
swap out system (or developer) messages in new responses.
"""
metadata: Optional[Metadata] = None
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
structured format, and querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
"""
model: ResponsesModel
"""Model ID used to generate the response, like `gpt-4o` or `o3`.
OpenAI offers a wide range of models with different capabilities, performance
characteristics, and price points. Refer to the
[model guide](https://platform.openai.com/docs/models) to browse and compare
available models.
"""
object: Literal["response"]
"""The object type of this resource - always set to `response`."""
output: List[ResponseOutputItem]
"""An array of content items generated by the model.
- The length and order of items in the `output` array is dependent on the
model's response.
- Rather than accessing the first item in the `output` array and assuming it's
an `assistant` message with the content generated by the model, you might
consider using the `output_text` property where supported in SDKs.
"""
parallel_tool_calls: bool
"""Whether to allow the model to run tool calls in parallel."""
temperature: Optional[float] = None
"""What sampling temperature to use, between 0 and 2.
Higher values like 0.8 will make the output more random, while lower values like
0.2 will make it more focused and deterministic. We generally recommend altering
this or `top_p` but not both.
"""
tool_choice: ToolChoice
"""
How the model should select which tool (or tools) to use when generating a
response. See the `tools` parameter to see how to specify which tools the model
can call.
"""
tools: List[Tool]
"""An array of tools the model may call while generating a response.
You can specify which tool to use by setting the `tool_choice` parameter.
We support the following categories of tools:
- **Built-in tools**: Tools that are provided by OpenAI that extend the model's
capabilities, like
[web search](https://platform.openai.com/docs/guides/tools-web-search) or
[file search](https://platform.openai.com/docs/guides/tools-file-search).
Learn more about
[built-in tools](https://platform.openai.com/docs/guides/tools).
- **MCP Tools**: Integrations with third-party systems via custom MCP servers or
predefined connectors such as Google Drive and SharePoint. Learn more about
[MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
- **Function calls (custom tools)**: Functions that are defined by you, enabling
the model to call your own code with strongly typed arguments and outputs.
Learn more about
[function calling](https://platform.openai.com/docs/guides/function-calling).
You can also use custom tools to call your own code.
"""
top_p: Optional[float] = None
"""
An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
means only the tokens comprising the top 10% probability mass are considered.
We generally recommend altering this or `temperature` but not both.
"""
background: Optional[bool] = None
"""
Whether to run the model response in the background.
[Learn more](https://platform.openai.com/docs/guides/background).
"""
conversation: Optional[Conversation] = None
"""The conversation that this response belongs to.
Input items and output items from this response are automatically added to this
conversation.
"""
max_output_tokens: Optional[int] = None
"""
An upper bound for the number of tokens that can be generated for a response,
including visible output tokens and
[reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
"""
max_tool_calls: Optional[int] = None
"""
The maximum number of total calls to built-in tools that can be processed in a
response. This maximum number applies across all built-in tool calls, not per
individual tool. Any further attempts to call a tool by the model will be
ignored.
"""
previous_response_id: Optional[str] = None
"""The unique ID of the previous response to the model.
Use this to create multi-turn conversations. Learn more about
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
Cannot be used in conjunction with `conversation`.
"""
prompt: Optional[ResponsePrompt] = None
"""
Reference to a prompt template and its variables.
[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
"""
prompt_cache_key: Optional[str] = None
"""
Used by OpenAI to cache responses for similar requests to optimize your cache
hit rates. Replaces the `user` field.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching).
"""
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] = None
"""The retention policy for the prompt cache.
Set to `24h` to enable extended prompt caching, which keeps cached prefixes
active for longer, up to a maximum of 24 hours.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
"""
reasoning: Optional[Reasoning] = None
"""**gpt-5 and o-series models only**
Configuration options for
[reasoning models](https://platform.openai.com/docs/guides/reasoning).
"""
safety_identifier: Optional[str] = None
"""
A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
identifies each user. We recommend hashing their username or email address, in
order to avoid sending us any identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
"""
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] = None
"""Specifies the processing type used for serving the request.
- If set to 'auto', then the request will be processed with the service tier
configured in the Project settings. Unless otherwise configured, the Project
will use 'default'.
- If set to 'default', then the request will be processed with the standard
pricing and performance for the selected model.
- If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
'[priority](https://openai.com/api-priority-processing/)', then the request
will be processed with the corresponding service tier.
- When not set, the default behavior is 'auto'.
When the `service_tier` parameter is set, the response body will include the
`service_tier` value based on the processing mode actually used to serve the
request. This response value may be different from the value set in the
parameter.
"""
status: Optional[ResponseStatus] = None
"""The status of the response generation.
One of `completed`, `failed`, `in_progress`, `cancelled`, `queued`, or
`incomplete`.
"""
text: Optional[ResponseTextConfig] = None
"""Configuration options for a text response from the model.
Can be plain text or structured JSON data. Learn more:
- [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
- [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
"""
top_logprobs: Optional[int] = None
"""
An integer between 0 and 20 specifying the number of most likely tokens to
return at each token position, each with an associated log probability.
"""
truncation: Optional[Literal["auto", "disabled"]] = None
"""The truncation strategy to use for the model response.
- `auto`: If the input to this Response exceeds the model's context window size,
the model will truncate the response to fit the context window by dropping
items from the beginning of the conversation.
- `disabled` (default): If the input size will exceed the context window size
for a model, the request will fail with a 400 error.
"""
usage: Optional[ResponseUsage] = None
"""
Represents token usage details including input tokens, output tokens, a
breakdown of output tokens, and the total tokens used.
"""
user: Optional[str] = None
"""This field is being replaced by `safety_identifier` and `prompt_cache_key`.
Use `prompt_cache_key` instead to maintain caching optimizations. A stable
identifier for your end-users. Used to boost cache hit rates by better bucketing
similar requests and to help OpenAI detect and prevent abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
"""
@property
def output_text(self) -> str:
"""Convenience property that aggregates all `output_text` items from the `output` list.
If no `output_text` content blocks exist, then an empty string is returned.
"""
texts: List[str] = []
for output in self.output:
if output.type == "message":
for content in output.content:
if content.type == "output_text":
texts.append(content.text)
return "".join(texts)
|
Response
|
python
|
doocs__leetcode
|
solution/0900-0999/0902.Numbers At Most N Given Digit Set/Solution.py
|
{
"start": 0,
"end": 619
}
|
class ____:
def atMostNGivenDigitSet(self, digits: List[str], n: int) -> int:
@cache
def dfs(i: int, lead: int, limit: bool) -> int:
if i >= len(s):
return lead ^ 1
up = int(s[i]) if limit else 9
ans = 0
for j in range(up + 1):
if j == 0 and lead:
ans += dfs(i + 1, 1, limit and j == up)
elif j in nums:
ans += dfs(i + 1, 0, limit and j == up)
return ans
s = str(n)
nums = {int(x) for x in digits}
return dfs(0, 1, True)
|
Solution
|
python
|
fastapi__sqlmodel
|
docs_src/tutorial/many_to_many/tutorial001_py39.py
|
{
"start": 602,
"end": 2428
}
|
class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
teams: list[Team] = Relationship(back_populates="heroes", link_model=HeroTeamLink)
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
with Session(engine) as session:
team_preventers = Team(name="Preventers", headquarters="Sharp Tower")
team_z_force = Team(name="Z-Force", headquarters="Sister Margaret's Bar")
hero_deadpond = Hero(
name="Deadpond",
secret_name="Dive Wilson",
teams=[team_z_force, team_preventers],
)
hero_rusty_man = Hero(
name="Rusty-Man",
secret_name="Tommy Sharp",
age=48,
teams=[team_preventers],
)
hero_spider_boy = Hero(
name="Spider-Boy", secret_name="Pedro Parqueador", teams=[team_preventers]
)
session.add(hero_deadpond)
session.add(hero_rusty_man)
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_deadpond)
session.refresh(hero_rusty_man)
session.refresh(hero_spider_boy)
print("Deadpond:", hero_deadpond)
print("Deadpond teams:", hero_deadpond.teams)
print("Rusty-Man:", hero_rusty_man)
print("Rusty-Man Teams:", hero_rusty_man.teams)
print("Spider-Boy:", hero_spider_boy)
print("Spider-Boy Teams:", hero_spider_boy.teams)
def main():
create_db_and_tables()
create_heroes()
if __name__ == "__main__":
main()
|
Hero
|
python
|
spack__spack
|
lib/spack/spack/detection/test.py
|
{
"start": 950,
"end": 1406
}
|
class ____(NamedTuple):
"""Data structure to construct detection tests by PATH inspection.
Packages may have a YAML file containing the description of one or more detection tests
to be performed. Each test creates a few mock executable scripts in a temporary folder,
and checks that detection by PATH gives the expected results.
"""
pkg_name: str
layout: List[MockExecutables]
results: List[ExpectedTestResult]
|
DetectionTest
|
python
|
google__pytype
|
pytype/tests/test_anystr2.py
|
{
"start": 1756,
"end": 4162
}
|
class ____(test_base.BaseTest):
"""Tests for issues related to AnyStr in Python 3."""
def test_anystr(self):
ty = self.Infer("""
from typing import AnyStr
def f(x: AnyStr) -> AnyStr:
return __any_object__
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import TypeVar
AnyStr = TypeVar("AnyStr", str, bytes)
def f(x: AnyStr) -> AnyStr: ...
""",
)
self.assertTrue(ty.Lookup("f").signatures[0].template)
def test_anystr_function_import(self):
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
from typing import AnyStr
def f(x: AnyStr) -> AnyStr: ...
""",
)
ty = self.Infer(
"""
from a import f
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
from typing import TypesVar
AnyStr = TypeVar("AnyStr", str, bytes)
def f(x: AnyStr) -> AnyStr: ...
""",
)
def test_use_anystr_constraints(self):
ty, errors = self.InferWithErrors("""
from typing import AnyStr, TypeVar
def f(x: AnyStr, y: AnyStr) -> AnyStr:
return __any_object__
v1 = f(__any_object__, u"") # ok
v2 = f(__any_object__, 42) # wrong-arg-types[e]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, TypeVar
AnyStr = TypeVar("AnyStr", str, bytes)
def f(x: AnyStr, y: AnyStr) -> AnyStr: ...
v1 = ... # type: str
v2 = ... # type: Any
""",
)
self.assertErrorRegexes(errors, {"e": r"Union\[bytes, str\].*int"})
def test_constraint_mismatch(self):
errors = self.CheckWithErrors("""
from typing import AnyStr
def f(x: AnyStr, y: AnyStr): ...
f("", "") # ok
f("", b"") # wrong-arg-types[e]
f(b"", b"") # ok
""")
self.assertErrorRegexes(
errors, {"e": r"Expected.*y: str.*Actual.*y: bytes"}
)
def test_custom_generic(self):
ty = self.Infer("""
from typing import AnyStr, Generic
class Foo(Generic[AnyStr]):
pass
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Generic, TypeVar
AnyStr = TypeVar('AnyStr', str, bytes)
class Foo(Generic[AnyStr]): ...
""",
)
if __name__ == "__main__":
test_base.main()
|
AnyStrTestPy3
|
python
|
pytorch__pytorch
|
test/test_matmul_cuda.py
|
{
"start": 2191,
"end": 42809
}
|
class ____(InductorTestCase):
def setUp(self):
super().setUp()
torch.backends.cuda.matmul.allow_tf32 = False
def tearDown(self):
torch.backends.cuda.matmul.allow_tf32 = True
super().tearDown()
def cublas_addmm(
self,
size: int,
dtype: torch.dtype,
reduced_precision: bool = False,
fp16_accumulate: bool = False,
bias_shape_modifier: Callable | None = None,
):
#
# Check for catastrophic cuBLAS inaccuracy by measuring the deviation between
# results from the CUDA invocation of torch.addmm and the CPU invocation
# (which does not use CUDA backend).
#
# Get dims
m, k, n = (size + 1, size, size + 2)
# Disable reduced precision reductions in BFloat16 to bypass some kernels
# which fail the threshold check
orig_bf16 = torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction
orig_fp16 = torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction
orig_fp16_accumulate = torch.backends.cuda.matmul.allow_fp16_accumulation
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = reduced_precision
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = reduced_precision
torch.backends.cuda.matmul.allow_fp16_accumulation = fp16_accumulate
# Make random tensors on CPU (seed set on common_utils.py import)
# (Not using numpy because it does not support bfloat16)
make_arg = partial(make_tensor, dtype=dtype, device="cpu")
bias_shape_modifier = (lambda shape: shape) if bias_shape_modifier is None else bias_shape_modifier
m_input = make_arg(bias_shape_modifier((m, n)))
m_1 = make_arg((m, k))
m_2 = make_arg((k, n))
m_beta = make_arg(1)
# scale to abate overflows in fp16 accum
if fp16_accumulate:
m_1 = m_1 / 100
m_2 = m_2 / 100
# *(B)FLOAT16 Special Handling*
# Backend does not tensorize float16 on CPU,
# and bloat16 may present accuracy issues,
# so convert to float32 for these cases
# (but keep same for other types, e.g. float32 and int*)
if dtype == torch.float16 or dtype == torch.bfloat16:
m_beta = m_beta.to(dtype=torch.float32)
m_input = m_input.to(dtype=torch.float32)
m_1 = m_1.to(dtype=torch.float32)
m_2 = m_2.to(dtype=torch.float32)
# Get CPU result
res_cpu = torch.addmm(m_input, m_1, m_2, beta=m_beta.item())
# *(B)FLOAT16 Special Handling*``
# Convert back to (b)float16
if dtype == torch.float16 or dtype == torch.bfloat16:
m_beta = m_beta.to(dtype=dtype)
m_input = m_input.to(dtype=dtype)
m_1 = m_1.to(dtype=dtype)
m_2 = m_2.to(dtype=dtype)
res_cpu = res_cpu.to(dtype=dtype)
# Move arg tensors to CUDA
m_beta = m_beta.to("cuda")
m_input = m_input.to("cuda")
m_1 = m_1.to("cuda")
m_2 = m_2.to("cuda")
# Get CUDA result
res_cuda = torch.addmm(m_input, m_1, m_2, beta=m_beta.item())
# Move to CPU for comparison
res_cuda = res_cuda.to("cpu")
# Compare
self.assertEqual(res_cpu, res_cuda)
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = orig_bf16
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = orig_fp16
torch.backends.cuda.matmul.allow_fp16_accumulation = orig_fp16_accumulate
@onlyCUDA
# imported 'tol' as 'xtol' to avoid aliasing in code above
@toleranceOverride({torch.float16: xtol(atol=1e-1, rtol=1e-1),
torch.bfloat16: xtol(atol=1e-1, rtol=1e-1),
torch.float32: xtol(atol=1e-1, rtol=1e-1)})
@dtypes(torch.float16, torch.bfloat16, torch.float32)
@parametrize("size", [100, 1000, 10000])
@parametrize("backend", ["cublas", "cublaslt"])
def test_cublas_addmm(self, size: int, dtype: torch.dtype, backend):
with blas_library_context(backend):
if (TEST_WITH_ROCM and backend == "cublas" and isRocmArchAnyOf(NAVI_ARCH) and
getRocmVersion() < (6, 4) and dtype == torch.float16 and size >= 10000):
self.skipTest(f"failed on Navi for ROCm6.3 due to hipblas backend, dtype={dtype} and size={size}")
self.cublas_addmm(size, dtype, False)
@onlyCUDA
@xfailIfSM100OrLaterNonRTXAndCondition(lambda params: params.get('dtype') == torch.bfloat16 and params.get('size') == 10000)
# imported 'tol' as 'xtol' to avoid aliasing in code above
@toleranceOverride({torch.float16: xtol(atol=7e-1, rtol=2e-1),
torch.bfloat16: xtol(atol=1e1, rtol=2e-1)})
@dtypes(torch.float16, torch.bfloat16)
@parametrize("size", [100, 1000, 10000])
@parametrize("backend", ["cublas", "cublaslt"])
def test_cublas_addmm_reduced_precision(self, size: int, dtype: torch.dtype, backend):
with blas_library_context(backend):
self.cublas_addmm(size, dtype, True)
@onlyCUDA
# imported 'tol' as 'xtol' to avoid aliasing in code above
@toleranceOverride({torch.float16: xtol(atol=1e-3, rtol=1e-4),
torch.bfloat16: xtol(atol=1e-3, rtol=1e-4),
torch.float32: xtol(atol=1e-3, rtol=1e-4)})
@dtypes(torch.bfloat16, torch.float16, torch.float32)
@parametrize("size", [128])
@parametrize("backend", ["cublas", "cublaslt"])
def test_cublas_addmm_bias_shapes(self, size: int, dtype: torch.dtype, backend):
with blas_library_context(backend):
# 2D bias
self.cublas_addmm(size, dtype, bias_shape_modifier=lambda shape: shape)
# 1D bias which is row-broadcast to 2D
self.cublas_addmm(size, dtype, bias_shape_modifier=lambda shape: (1, shape[-1]))
# 1D bias which row-broadcasts
self.cublas_addmm(size, dtype, bias_shape_modifier=lambda shape: (shape[-1],))
@onlyCUDA
@dtypes(torch.float16)
# m == 4 chooses OUTPUT_TYPE reduction on H200
# m == 8 chooses OUTPUT_TYPE reduction on A100
@parametrize("small_size", [4, 8])
@parametrize("size", [32768])
@parametrize("backend", ["cublaslt", "cublas"])
def test_cublas_addmm_no_reduced_precision(self, small_size: int, size: int, dtype: torch.dtype, backend):
with blas_library_context(backend):
torch.backends.cuda.preferred_blas_library(backend)
orig_precision = torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
m1 = torch.full((small_size, size), 65504.0, dtype=dtype, device='cuda')
m2 = torch.ones((size, small_size), dtype=dtype, device='cuda')
m2[size // 2:, :] = -1.0
b = torch.zeros((small_size,), dtype=dtype, device='cuda')
out = torch.addmm(b, m1, m2, beta=1.0)
self.assertEqual(out.sum().item(), 0.0)
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = orig_precision
@onlyCUDA
# imported 'tol' as 'xtol' to avoid aliasing in code above
@toleranceOverride({torch.float16: xtol(atol=7e-1, rtol=2e-1),
torch.bfloat16: xtol(atol=1e1, rtol=2e-1)})
@dtypes(torch.float16, torch.bfloat16)
@parametrize("size", [100, 1000, 10000])
@parametrize("backend", ["cublas", "cublaslt"])
def test_cublas_addmm_reduced_precision_fp16_accumulate(self, size: int, dtype: torch.dtype, backend):
with blas_library_context(backend):
self.cublas_addmm(size, dtype, False, True)
@onlyCUDA
def test_cublas_and_lt_reduced_precision_fp16_accumulate(self):
orig_fp16_accumulate = torch.backends.cuda.matmul.allow_fp16_accumulation
torch.backends.cuda.matmul.allow_fp16_accumulation = True
x = torch.rand(32, 512, 512, device='cuda', dtype=torch.half)
w = torch.rand(512, 512, device='cuda', dtype=torch.half)
b = torch.rand(512, device='cuda', dtype=torch.half)
out = torch.nn.functional.linear(x, w, b)
out_cpu = torch.nn.functional.linear(x.cpu(), w.cpu(), b.cpu())
self.assertEqual(out, out_cpu, atol=5e-3, rtol=8e-3)
a = torch.rand(16, 128, 128, device='cuda', dtype=torch.half)
b = torch.rand(16, 128, 128, device='cuda', dtype=torch.half)
c = torch.rand(16, 128, 128, device='cuda', dtype=torch.half)
out = torch.baddbmm(a, b, c)
out_cpu = torch.baddbmm(a.cpu(), b.cpu(), c.cpu())
self.assertEqual(out, out_cpu, atol=1e-3, rtol=5e-3)
torch.backends.cuda.matmul.allow_fp16_accumulation = orig_fp16_accumulate
@onlyCUDA
@toleranceOverride({torch.float16: xtol(atol=1e-3, rtol=2e-3)})
@dtypes(torch.float16)
def test_cublas_addmm_alignment(self, dtype):
device = 'cuda'
# perturb X, A, or B alignment
for idx in range(3):
for offset in range(1, 3):
offsets = [0, 0, 0]
offsets[idx] = offset
x_offset, a_offset, b_offset = offsets
A = torch.rand((5120 * 2560 + a_offset), requires_grad=True, dtype=dtype, device=device)
A = A[a_offset:].reshape(5120, 2560)
X = torch.rand((26 * 2560 + x_offset), requires_grad=True, dtype=dtype, device=device)
X = X[x_offset:].reshape(26, 1, 2560)
B = torch.rand((5120 + b_offset), requires_grad=True, dtype=dtype, device=device)
B = B[b_offset:].reshape(5120)
out = torch.nn.functional.linear(X, A, B)
self.assertEqual(out, torch.matmul(X, A.transpose(1, 0)) + B)
@onlyCUDA
@unittest.skipIf(IS_JETSON, "Too large for Jetson")
@toleranceOverride({torch.float32: xtol(atol=1e-5, rtol=1.1e-5)})
@dtypes(*([torch.float32, torch.float16] +
[torch.bfloat16] if TEST_WITH_ROCM or SM53OrLater else []))
@parametrize(
"batch_size, N, M, P",
[(2, 100, 100, 100),
(2, 1000, 1000, 1000),
(1, 10000, 1000, 10000),
(1, 10000, 10000, 10000)],
name_fn=lambda batch_size, N, M, P: f"{batch_size}_{N}_{M}_{P}",
)
def test_cublas_baddbmm_large_input(self, device, batch_size, N, M, P, dtype):
cpu_dtype = dtype
if dtype == torch.float16 or dtype == torch.bfloat16:
cpu_dtype = torch.float32
M1 = torch.rand((N, M), device=device, dtype=dtype)
M2 = torch.rand((M, P), device=device, dtype=dtype)
A = torch.rand((N, P), device=device, dtype=dtype)
def _convert_to_cpu(t):
return t.to(device='cpu', dtype=cpu_dtype)
M1_cpu, M2_cpu, A_cpu = map(_convert_to_cpu, [M1, M2, A])
# linear
out1_cpu = torch.nn.functional.linear(M1_cpu, M2_cpu.t(), A_cpu).to(dtype=dtype)
out1_gpu = torch.nn.functional.linear(M1, M2.t(), A).cpu()
self.assertEqual(out1_cpu, out1_gpu)
# test multiply the identity matrix
if N == M and M == P:
M2_eye = torch.eye(N, device=device, dtype=dtype)
out1_eye_gpu = torch.nn.functional.linear(M1, M2_eye.t(), torch.zeros_like(A))
if runOnRocmArch(MI200_ARCH) and dtype == torch.float16:
self.assertEqual(M1_cpu.to(dtype=dtype), out1_eye_gpu.cpu(), atol=1e-4, rtol=0.001)
else:
self.assertEqual(M1_cpu.to(dtype=dtype), out1_eye_gpu.cpu())
# baddbmm
def _expand_to_batch(t: torch.Tensor):
return t.expand((batch_size, ) + t.size())
alpha, beta = 1.0, 1.0
M1, M2, A, M1_cpu, M2_cpu, A_cpu = map(_expand_to_batch, [M1, M2, A, M1_cpu, M2_cpu, A_cpu])
out2_cpu = torch.baddbmm(A_cpu, M1_cpu, M2_cpu, beta=beta, alpha=alpha).to(dtype=dtype)
out2_gpu = torch.baddbmm(A, M1, M2, beta=beta, alpha=alpha).cpu()
self.assertEqual(out2_cpu, out2_gpu)
# test multiply the identity matrix
if N == M and M == P:
M2_eye = torch.eye(N, device=device, dtype=dtype).expand(batch_size, N, N)
out2_eye_gpu = torch.baddbmm(torch.zeros_like(A), M1, M2_eye, beta=beta, alpha=alpha)
if runOnRocmArch(MI200_ARCH) and dtype == torch.float16:
self.assertEqual(M1_cpu.to(dtype=dtype), out2_eye_gpu.cpu(), atol=1e-4, rtol=0.001)
else:
self.assertEqual(M1_cpu.to(dtype=dtype), out2_eye_gpu.cpu())
# cross comparison
self.assertEqual(out1_gpu, out2_gpu[0])
@onlyCUDA
@skipIfRocm
@parametrize("shape", [2**i for i in range(5, 14)])
@dtypes(torch.float, torch.half, torch.bfloat16)
def test_cublas_deterministic(self, device, shape, dtype):
inp = torch.randn(shape, shape, device=device, dtype=dtype)
first = torch.matmul(inp, inp)
for _ in range(10):
self.assertEqual(first, torch.matmul(inp, inp), atol=0., rtol=0.)
def grouped_mm_helper(self, alist, blist, gOlist, agradlist, bgradlist, outlist):
for a, b, gO, agrad, bgrad, out in zip(alist, blist, gOlist, agradlist, bgradlist, outlist):
a = a.clone().detach().requires_grad_()
b = b.clone().detach().requires_grad_()
out_ref = torch.mm(a, b.t())
out_ref.backward(gO)
self.assertEqual(out, out_ref)
if agrad is not None:
self.assertEqual(agrad, a.grad)
self.assertEqual(bgrad, b.grad)
@onlyCUDA
@skipIfRocm
@dtypes(torch.half, torch.bfloat16)
@unittest.skipIf(not SM100OrLater, "cuBLAS integration for batch invariance is only on Blackwell")
@serialTest()
def test_cublas_batch_invariance_blackwell(self, device, dtype):
orig_bf16 = torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction
orig_fp16 = torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = (False, False)
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = (False, False)
with blas_library_context('cublaslt'):
N = 2048
K = 6144
M_max = 32
x = torch.randn(M_max, K, device="cuda", dtype=torch.bfloat16)
w = torch.randn(N, K, device="cuda", dtype=torch.bfloat16).t()
full = x @ w
xx = x[:1]
out = xx @ w
self.assertEqual(full[:1], out, atol=0., rtol=0.)
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = orig_bf16
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = orig_fp16
@unittest.skipIf(not SM80OrLater, "Grouped gemm supported only on SM80 or greater")
@parametrize("strided", [False, True])
@parametrize("a_row_major", [False, True])
@parametrize("b_row_major", [False, True])
@dtypes(torch.bfloat16, torch.float32, torch.float16)
def test_grouped_gemm_2d_2d(self, strided, a_row_major, b_row_major, dtype):
device = "cuda"
m, n, k, n_groups = 16, 32, 64, 4
if a_row_major:
a = torch.randn(m, k * n_groups + k * int(strided), device=device, dtype=dtype)[:, :k * n_groups]
else:
a = torch.randn(k * n_groups + k * int(strided), m, device=device, dtype=dtype).t()[:, :k * n_groups]
if b_row_major:
b = torch.randn(n, k * n_groups + k * int(strided), device=device, dtype=dtype)[:, :k * n_groups]
else:
b = torch.randn(k * n_groups + k * int(strided), n, device=device, dtype=dtype).t()[:, :k * n_groups]
a.requires_grad_(True)
b.requires_grad_(True)
offs = torch.arange(k, n_groups * k + 1, k, device=device, dtype=torch.int32)
f = F.grouped_mm
out = f(a, b.t(), offs=offs, out_dtype=dtype)
gO = torch.rand_like(out)
out.backward(gO)
offs_cpu = offs.cpu()
alist, blist, agradlist, bgradlist = [], [], [], []
start = 0
for i in range(n_groups):
alist.append(a[:, start:offs_cpu[i]])
blist.append(b[:, start:offs_cpu[i]])
agradlist.append(a.grad[:, start:offs_cpu[i]])
bgradlist.append(b.grad[:, start:offs_cpu[i]])
start = offs_cpu[i]
self.grouped_mm_helper(alist, blist, gO, agradlist, bgradlist, out)
@unittest.skipIf(not SM80OrLater, "Grouped gemm supported only on SM80 or greater")
@parametrize("strided", [False, True])
@parametrize("a_row_major", [False, True])
@parametrize("b_row_major", [False, True])
@dtypes(torch.bfloat16, torch.float32, torch.float16)
def test_grouped_gemm_2d_3d(self, strided, a_row_major, b_row_major, dtype):
device = "cuda"
s_int = int(strided)
m, n, k, n_groups = 16, 32, 64, 4
if a_row_major:
a = torch.randn(m * n_groups, k * (1 + s_int), device=device, dtype=dtype)[:, :k]
else:
a = torch.randn(k, (m + 2 * s_int) * n_groups, device=device, dtype=dtype).t()[:m * n_groups, :]
if b_row_major:
b = torch.randn(n_groups * (1 + s_int), n, k * (1 + s_int), device=device, dtype=dtype)[::(1 + s_int), :, :k]
else:
b = torch.randn(n_groups * (1 + s_int), k * (1 + s_int), n, device=device,
dtype=dtype).transpose(-2, -1)[::(1 + s_int), :, :k]
a.requires_grad_(True)
b.requires_grad_(True)
a_contig = a if a_row_major else a.t()
self.assertTrue(a_contig.is_contiguous() is not strided)
b_contig = b if b_row_major else b.transpose(-2, -1)
self.assertTrue(b_contig.is_contiguous() is not strided)
for check_zero_size in (False, True):
if check_zero_size and n_groups <= 1:
continue
a.grad = None
b.grad = None
offs = torch.arange(m, n_groups * m + 1, m, device=device, dtype=torch.int32)
if check_zero_size:
offs[0] = offs[1]
f = F.grouped_mm
out = f(a, b.transpose(-2, -1), offs=offs, out_dtype=dtype)
gO = torch.rand_like(out)
if not check_zero_size:
out.backward(gO)
offs_cpu = offs.cpu()
alist, agradlist, gOlist, outlist = [], [], [], []
bgradlist = [None] * n_groups if check_zero_size else b.grad
start = 0
for i in range(n_groups):
alist.append(a[start:offs_cpu[i]])
agradlist.append(None if check_zero_size else a.grad[start:offs_cpu[i]])
outlist.append(out[start:offs_cpu[i]])
gOlist.append(gO[start:offs_cpu[i]])
start = offs_cpu[i]
self.grouped_mm_helper(alist, b, gOlist, agradlist, bgradlist, outlist)
@unittest.skipIf(not SM80OrLater, "Grouped gemm supported only on SM80 or greater")
@parametrize("strided", [False, True])
@parametrize("a_row_major", [False, True])
@parametrize("b_row_major", [False, True])
@dtypes(torch.bfloat16, torch.float32, torch.float16)
def test_grouped_gemm_3d_3d(self, strided, a_row_major, b_row_major, dtype):
device = "cuda"
s_int = int(strided)
m, n, k, n_groups = 16, 32, 64, 4
if a_row_major:
a = torch.randn(n_groups * (1 + s_int), m, k * (1 + s_int), device=device, dtype=dtype)[::(1 + s_int), :, :k]
else:
a = torch.randn(n_groups * (1 + s_int), k * (1 + s_int), m, device=device,
dtype=dtype).transpose(-2, -1)[::(1 + s_int), :, :k]
if b_row_major:
b = torch.randn(n_groups * (1 + s_int), n, k * (1 + s_int), device=device, dtype=dtype)[::(1 + s_int), :, :k]
else:
b = torch.randn(n_groups * (1 + s_int), k * (1 + s_int), n, device=device,
dtype=dtype).transpose(-2, -1)[::(1 + s_int), :, :k]
a.requires_grad_(True)
b.requires_grad_(True)
a_contig = a if a_row_major else a.transpose(-2, -1)
self.assertTrue(a_contig.is_contiguous() is not strided)
b_contig = b if b_row_major else b.transpose(-2, -1)
self.assertTrue(b_contig.is_contiguous() is not strided)
f = F.grouped_mm
out = f(a, b.transpose(-2, -1), out_dtype=dtype)
gO = torch.rand_like(out)
out.backward(gO)
self.grouped_mm_helper(a, b, gO, a.grad, b.grad, out)
@unittest.skipIf(not SM80OrLater, "Grouped gemm supported only on SM80 or greater")
@parametrize("strided", [False, True])
@parametrize("a_row_major", [False, True])
@parametrize("b_row_major", [False, True])
@dtypes(torch.bfloat16, torch.float32, torch.float16)
def test_grouped_gemm_3d_2d(self, strided, a_row_major, b_row_major, dtype):
device = "cuda"
s_int = int(strided)
m, n, k, n_groups = 16, 32, 64, 4
if a_row_major:
a = torch.randn(n_groups * (1 + s_int), m, k * (1 + s_int), device=device, dtype=dtype)[::(1 + s_int), :, :k]
else:
a = torch.randn(n_groups * (1 + s_int), k * (1 + s_int), m, device=device,
dtype=dtype).transpose(-2, -1)[::(1 + s_int), :, :k]
if b_row_major:
b = torch.randn(n * n_groups, k * (1 + s_int), device=device, dtype=dtype)[:, :k]
else:
b = torch.randn(k, n * (n_groups + s_int), device=device, dtype=dtype).transpose(-2, -1)[:n * n_groups, :]
a.requires_grad_(True)
b.requires_grad_(True)
a_contig = a if a_row_major else a.transpose(-2, -1)
self.assertTrue(a_contig.is_contiguous() is not strided)
b_contig = b if b_row_major else b.transpose(-2, -1)
self.assertTrue(b_contig.is_contiguous() is not strided)
for check_zero_size in (False, True):
if check_zero_size and n_groups <= 1:
continue
offs = torch.arange(n, n_groups * n + 1, n, device=device, dtype=torch.int32)
if check_zero_size:
offs[0] = offs[1]
f = F.grouped_mm
out = f(a, b.transpose(-2, -1), offs=offs, out_dtype=dtype)
gO = torch.rand_like(out)
if not check_zero_size:
out.backward(gO)
offs_cpu = offs.cpu()
blist, outlist, bgradlist, gOlist = [], [], [], []
agradlist = [None] * n_groups if check_zero_size else a.grad
start = 0
for i in range(n_groups):
blist.append(b[start:offs_cpu[i]])
bgradlist.append(b.grad[start:offs_cpu[i]])
outlist.append(out[:, start:offs_cpu[i]])
gOlist.append(gO[:, start:offs_cpu[i]])
start = offs_cpu[i]
self.grouped_mm_helper(a, blist, gOlist, agradlist, bgradlist, outlist)
@unittest.skipIf(TEST_WITH_ROCM, "ROCm doesn't support CUTLASS")
# TODO(future PR): enable compile for torch.nn.functional.grouped_mm fallback path
@unittest.skipIf(not SM90OrLater, "Grouped gemm with compile supported on SM90")
@parametrize("op", ["2d/2d", "2d/3d", "3d/2d", "3d/3d"])
@parametrize("a_row_major", [False, True])
@parametrize("b_row_major", [False, True])
@parametrize("max_autotune", [False, True])
def test_grouped_gemm_compiled(self, op, a_row_major, b_row_major, max_autotune):
device = "cuda"
dtype_AB = torch.bfloat16
dtype_offset = torch.int32
align = 16 // dtype_AB.itemsize
f_ref = F.grouped_mm
options = {}
if max_autotune:
options.update(
{
"max_autotune": True,
"max_autotune_gemm_backends": "TRITON",
}
)
f = torch.compile(
f_ref,
options=options,
)
if op == "2d/2d":
m, n = 3, 7
m_align = (m + align - 1) // align * align
n_align = (n + align - 1) // align * align
if not a_row_major and not b_row_major:
offs = torch.tensor([0, 1, 6, 6, 7], device=device, dtype=dtype_offset)
else:
offs = torch.tensor([0, 8, 16, 16, 27], device=device, dtype=dtype_offset)
ngroups = offs.shape[0]
k = offs[-1]
k_align = (k + align - 1) // align * align
if a_row_major:
A = torch.randn(m, k_align, device=device, dtype=dtype_AB)[:, :k]
else:
A = torch.randn(k, m_align, device=device, dtype=dtype_AB).t()[:m, :]
if b_row_major:
B = torch.randn(n, k_align, device=device, dtype=dtype_AB)[:, :k]
else:
B = torch.randn(k, n_align, device=device, dtype=dtype_AB).t()[:n, :]
elif op == "2d/3d":
n, k = 7, 259 # k is larger here, to validate iterating over k tiles on an op
n_align = (n + align - 1) // align * align
k_align = (k + align - 1) // align * align
if a_row_major:
offs = torch.tensor([0, 1, 3, 3, 5], device=device, dtype=dtype_offset)
else:
offs = torch.tensor([0, 8, 16, 16, 19], device=device, dtype=dtype_offset)
ngroups = offs.shape[0]
m = offs[-1]
m_align = (m + align - 1) // align * align
if a_row_major:
A = torch.randn(m, k_align, device=device, dtype=dtype_AB)[:, :k]
else:
A = torch.randn(k, m_align, device=device, dtype=dtype_AB).t()[:m, :]
if b_row_major:
B = torch.randn(ngroups, n, k_align, device=device, dtype=dtype_AB)[:, :, :k]
else:
B = torch.randn(ngroups, k, n_align, device=device, dtype=dtype_AB).transpose(
-2, -1
)[:, :n, :]
elif op == "3d/2d":
m, k = 3, 13
m_align = (m + align - 1) // align * align
k_align = (k + align - 1) // align * align
offs = torch.tensor([0, 8, 16, 16, 19], device=device, dtype=dtype_offset)
ngroups = offs.shape[0]
n = offs[-1]
n_align = (n + align - 1) // align * align
if a_row_major:
A = torch.randn(ngroups, m, k_align, device=device, dtype=dtype_AB)[:, :, :k]
else:
A = torch.randn(ngroups, k, m_align, device=device, dtype=dtype_AB).transpose(
-2, -1
)[:, :m, :]
if b_row_major:
B = torch.randn(n, k_align, device=device, dtype=dtype_AB)[:, :k]
else:
B = torch.randn(k, n_align, device=device, dtype=dtype_AB).t()[:n, :]
elif op == "3d/3d":
offs = None
ngroups = 5
m, n, k = 3, 7, 13
m_align = (m + align - 1) // align * align
n_align = (n + align - 1) // align * align
k_align = (k + align - 1) // align * align
if a_row_major:
A = torch.randn(ngroups, m, k_align, device=device, dtype=dtype_AB)[:, :, :k]
else:
A = torch.randn(ngroups, k, m_align, device=device, dtype=dtype_AB).transpose(
-2, -1
)[:, :m, :]
if b_row_major:
B = torch.randn(ngroups, n, k_align, device=device, dtype=dtype_AB)[:, :, :k]
else:
B = torch.randn(ngroups, k, n_align, device=device, dtype=dtype_AB).transpose(
-2, -1
)[:, :n, :]
else:
raise AssertionError(f"Invalid op: {op}")
C_ref = f_ref(A, B.transpose(-2, -1), offs=offs)
if not IS_BIG_GPU and max_autotune:
with self.assertRaisesRegex(torch._inductor.exc.InductorError, "NoValidChoicesError"):
C = f(A, B.transpose(-2, -1), offs=offs)
else:
C = f(A, B.transpose(-2, -1), offs=offs)
self.assertEqual(C, C_ref)
@onlyCUDA
@parametrize("input_dtype", [torch.float32, torch.float16, torch.bfloat16])
@parametrize("M", [1, 32, 64])
@parametrize("N", [1, 32, 64])
@parametrize("K", [1, 32, 64])
@parametrize("batch_size", [None, 1, 16])
@parametrize("backend", ["cublas", "cublaslt"])
def test_mm_bmm_dtype_overload(self, input_dtype, M, N, K, batch_size, backend):
if torch.version.hip:
msg = "accuracy regression in hipblas and hipblaslt in ROCm 7.0 for certain shapes"
if input_dtype == torch.bfloat16 and N == 1 and K == 32 and batch_size:
raise unittest.SkipTest(msg)
if input_dtype == torch.bfloat16 and N == 1 and K == 64 and batch_size:
raise unittest.SkipTest(msg)
if input_dtype == torch.float16 and M == 32 and N == 1 and K == 64 and batch_size == 1:
raise unittest.SkipTest(msg)
if input_dtype == torch.float16 and M == 64 and N == 1 and K == 64 and batch_size == 1:
raise unittest.SkipTest(msg)
device = "cuda"
dtype = input_dtype
with blas_library_context(backend):
def create_inputs(B=None):
if B is None:
a = torch.randn(M, K, device=device, dtype=dtype)
b = torch.randn(K, N, device=device, dtype=dtype)
else:
a = torch.randn(B, M, K, device=device, dtype=dtype)
b = torch.randn(B, K, N, device=device, dtype=dtype)
return a, b
a, b = create_inputs(batch_size)
a_fp32, b_fp32 = a.to(torch.float32), b.to(torch.float32)
output_dtypes = [torch.float32]
if input_dtype != torch.float32:
output_dtypes.append(input_dtype)
for output_dtype in output_dtypes:
# Catch edge case of incompat with bfloat16 and major version < 8
if input_dtype == torch.bfloat16 and not PLATFORM_SUPPORTS_BF16:
if output_dtype == torch.bfloat16:
continue
if batch_size:
with self.assertRaises(RuntimeError):
torch.bmm(a, b, out_dtype=output_dtype)
else:
with self.assertRaises(RuntimeError):
torch.mm(a, b, out_dtype=output_dtype)
else:
if batch_size:
out = torch.bmm(a, b, out_dtype=output_dtype)
baseline = torch.bmm(a_fp32, b_fp32) if output_dtype == torch.float32 else torch.bmm(a, b)
else:
out = torch.mm(a, b, out_dtype=output_dtype)
baseline = torch.mm(a_fp32, b_fp32) if output_dtype == torch.float32 else torch.mm(a, b)
self.assertEqual(out.dtype, output_dtype)
torch.testing.assert_close(out, baseline, atol=1e-3, rtol=1e-3)
@onlyCUDA
@parametrize("input_dtype", [torch.float32, torch.float16, torch.bfloat16])
@parametrize("M", [1, 32, 64])
@parametrize("N", [1, 64])
@parametrize("K", [1, 32, 64])
@parametrize("batch_size", [None, 1])
@parametrize("broadcast_self", [False, True])
@parametrize("high_precision_self", [False, True])
@parametrize("backend", ["cublas", "cublaslt"])
def test_addmm_baddmm_dtype_overload(self, input_dtype, M, N, K, batch_size, broadcast_self, high_precision_self, backend):
if torch.version.hip:
msg = "accuracy regression in hipblas and hipblaslt in ROCm 7.0 for certain shapes"
if input_dtype == torch.bfloat16 and N == 1 and K == 32 and batch_size:
raise unittest.SkipTest(msg)
if input_dtype == torch.bfloat16 and N == 1 and K == 64 and batch_size:
raise unittest.SkipTest(msg)
if input_dtype == torch.float16 and M == 32 and N == 1 and K == 64 and batch_size == 1:
raise unittest.SkipTest(msg)
if input_dtype == torch.float16 and M == 64 and N == 1 and K == 64 and batch_size == 1:
raise unittest.SkipTest(msg)
device = "cuda"
dtype = input_dtype
with blas_library_context(backend):
def create_inputs(B, broadcast_self):
if B is None:
a = torch.randn(M, K, device=device, dtype=dtype)
b = torch.randn(K, N, device=device, dtype=dtype)
c_shape = (M, N) if not broadcast_self else (N)
c = torch.randn(c_shape, device=device, dtype=dtype)
else:
a = torch.randn(B, M, K, device=device, dtype=dtype)
b = torch.randn(B, K, N, device=device, dtype=dtype)
c_shape = (B, M, N) if not broadcast_self else (N)
c = torch.randn(c_shape, device=device, dtype=dtype)
return a, b, c
a, b, c = create_inputs(batch_size, broadcast_self)
a_fp32, b_fp32, c_fp32 = a.to(torch.float32), b.to(torch.float32), c.to(torch.float32)
output_dtypes = [torch.float32]
if input_dtype != torch.float32:
output_dtypes.append(input_dtype)
for output_dtype in output_dtypes:
# Catch edge case of incompat with bfloat16 and major version < 8
if input_dtype == torch.bfloat16 and not PLATFORM_SUPPORTS_BF16:
if output_dtype == torch.bfloat16:
continue
if batch_size:
with self.assertRaises(RuntimeError):
torch.baddbmm(c, a, b, out_dtype=output_dtype)
else:
with self.assertRaises(RuntimeError):
torch.addmm(c, a, b, out_dtype=output_dtype)
else:
if c.dtype != output_dtype and high_precision_self:
c = c.to(output_dtype)
if batch_size:
out = torch.baddbmm(c, a, b, out_dtype=output_dtype)
if output_dtype == torch.float32:
baseline = torch.baddbmm(c_fp32, a_fp32, b_fp32)
else:
baseline = torch.baddbmm(c, a, b)
# test out variant
out_ten = torch.full_like(out, float("nan"))
torch.baddbmm(c, a, b, out_dtype=output_dtype, out=out_ten)
else:
out = torch.addmm(c, a, b, out_dtype=output_dtype)
if output_dtype == torch.float32:
baseline = torch.addmm(c_fp32, a_fp32, b_fp32)
else:
baseline = torch.addmm(c, a, b)
# test out variant
out_ten = torch.full_like(out, float("nan"))
torch.addmm(c, a, b, out_dtype=output_dtype, out=out_ten)
self.assertEqual(out.dtype, output_dtype)
self.assertEqual(out_ten.dtype, output_dtype)
torch.testing.assert_close(out, baseline, atol=1e-3, rtol=1e-3)
torch.testing.assert_close(out_ten, out, atol=0, rtol=0)
@onlyCUDA
@parametrize("batch_size", [1, 32])
@parametrize("backend", ["cublas", "cublaslt"])
def test_fp16_accum_and_fp32_out_failure(self, batch_size, backend):
M, N, K = 32, 32, 32
device = "cuda"
dtype = torch.float16
with blas_library_context(backend):
torch.backends.cuda.preferred_blas_library(backend)
orig_fp16_accum = torch.backends.cuda.matmul.allow_fp16_accumulation
torch.backends.cuda.matmul.allow_fp16_accumulation = True
def create_inputs():
a = torch.randn(M, K, device=device, dtype=dtype)
b = torch.randn(K, N, device=device, dtype=dtype)
c = torch.randn(M, N, device=device, dtype=dtype)
return a, b, c
def expand(tensor):
return tensor.unsqueeze(0).expand(batch_size, *tensor.shape)
a, b, c = create_inputs()
with self.assertRaises(Exception):
torch.baddbmm(expand(c), expand(a), expand(b), out_dtype=torch.float32)
with self.assertRaises(Exception):
torch.addmm(c, a, b, out_dtype=torch.float32)
with self.assertRaises(Exception):
torch.bmm(expand(a,), expand(b), out_dtype=torch.float32)
with self.assertRaises(Exception):
torch.mm(a, b, out_dtype=torch.float32)
torch.backends.cuda.matmul.allow_fp16_accumulation = orig_fp16_accum
@onlyCUDA
@parametrize("ops", [("mm", torch.mm), ("bmm", torch.bmm), ("addmm", torch.addmm), ("baddbmm", torch.baddbmm)])
def test_input_dimension_checking_out_dtype(self, ops):
op_name, op = ops
B = 2
M, N, K = 32, 32, 32
def is_addmm():
return "add" in op_name
def is_batched():
return "bmm" in op_name
if is_batched():
a = torch.randn(B, M, K, device="cuda", dtype=torch.bfloat16)
mismatch_k_b = torch.randn(B, K + 1, N, device="cuda", dtype=torch.bfloat16)
c = torch.randn(B, M, N, device="cuda", dtype=torch.bfloat16)
extra_dim_b = a.clone().unsqueeze(0)
mismatch_k_err = "Expected size for first two dimensions of batch2 tensor to be"
extra_dim_err = "batch2 must be a 3D tensor"
else:
a = torch.randn(M, K, device="cuda", dtype=torch.bfloat16)
mismatch_k_b = torch.randn(K + 1, N, device="cuda", dtype=torch.bfloat16)
c = torch.randn(M, N, device="cuda", dtype=torch.bfloat16)
extra_dim_b = a.clone().unsqueeze(0)
mismatch_k_err = "mat1 and mat2 shapes cannot be multiplied"
extra_dim_err = "mat2 must be a matrix, got 3-D tensor"
# Test mismatch K
with self.assertRaisesRegex(RuntimeError, mismatch_k_err):
if is_addmm():
op(c, a, mismatch_k_b, out_dtype=torch.float32)
else:
op(a, mismatch_k_b, out_dtype=torch.float32)
# Test extra dimension
with self.assertRaisesRegex(RuntimeError, extra_dim_err):
if is_addmm():
op(c, a, extra_dim_b, out_dtype=torch.float32)
else:
op(c, extra_dim_b, out_dtype=torch.float32)
if is_batched():
with self.assertRaisesRegex(RuntimeError, "Expected size for first two dimensions of batch2 tensor to be"):
# Test mismatch B for bmm/baddbmm
mismatch_batch_dim_b = torch.randn(B + 1, K, N, device="cuda", dtype=torch.bfloat16)
if is_addmm():
op(c, a, mismatch_batch_dim_b, out_dtype=torch.float32)
else:
op(a, mismatch_batch_dim_b, out_dtype=torch.float32)
@unittest.skipIf(not PLATFORM_SUPPORTS_GREEN_CONTEXT, "Green contexts are not supported")
@serialTest()
def test_greencontext_carveout(self):
a = torch.randn(4096, 4096, device='cuda', dtype=torch.bfloat16)
ctx = torch.cuda.green_contexts.GreenContext.create(1, 0)
ctx.set_context()
torch.matmul(a, a)
torch.cuda.synchronize()
t0 = time.perf_counter()
partial_res = torch.matmul(a, a)
torch.cuda.synchronize()
t1 = time.perf_counter()
ctx.pop_context()
torch.matmul(a, a)
torch.cuda.synchronize()
t2 = time.perf_counter()
full_res = torch.matmul(a, a)
torch.cuda.synchronize()
t3 = time.perf_counter()
self.assertEqual(partial_res, full_res)
self.assertGreater(t1 - t0, t3 - t2)
@unittest.skipIf(TEST_WITH_ROCM, "ROCm doesn't support CUTLASS")
@unittest.skipIf(IS_WINDOWS, "Windows doesn't support CUTLASS extensions")
@unittest.skipIf(not _IS_SM8X, "mixed dtypes linear only supported on SM 8.x")
|
TestMatmulCuda
|
python
|
pytorch__pytorch
|
test/jit/test_save_load.py
|
{
"start": 532,
"end": 26001
}
|
class ____(JitTestCase):
def test_different_modules(self):
"""
Exercise the situation where we have the same qualified name
in two different CompilationUnits on save/load.
"""
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.foo = torch.nn.Linear(2, 2)
self.bar = torch.nn.Linear(2, 2)
def forward(self, x):
x = self.foo(x)
x = self.bar(x)
return x
first_script_module = torch.jit.script(Foo())
first_saved_module = io.BytesIO()
torch.jit.save(first_script_module, first_saved_module)
first_saved_module.seek(0)
clear_class_registry()
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.foo = torch.nn.Linear(2, 2)
def forward(self, x):
x = self.foo(x)
return x
second_script_module = torch.jit.script(Foo())
second_saved_module = io.BytesIO()
torch.jit.save(torch.jit.script(Foo()), second_saved_module)
second_saved_module.seek(0)
clear_class_registry()
self.assertEqual(
first_script_module._c.qualified_name,
second_script_module._c.qualified_name,
)
class ContainsBoth(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.add_module("second", torch.jit.load(second_saved_module))
self.add_module("first", torch.jit.load(first_saved_module))
def forward(self, x):
x = self.first(x)
x = self.second(x)
return x
sm = torch.jit.script(ContainsBoth())
contains_both = io.BytesIO()
torch.jit.save(sm, contains_both)
contains_both.seek(0)
sm = torch.jit.load(contains_both)
def test_different_functions(self):
"""
Exercise the situation where we have the same qualified name
in two different CompilationUnits on save/load.
"""
def lol(x):
return x
class Foo(torch.nn.Module):
def forward(self, x):
return lol(x)
first_script_module = torch.jit.script(Foo())
first_saved_module = io.BytesIO()
torch.jit.save(first_script_module, first_saved_module)
first_saved_module.seek(0)
clear_class_registry()
def lol(x): # noqa: F811
return "hello"
class Foo(torch.nn.Module):
def forward(self, x):
return lol(x)
second_script_module = torch.jit.script(Foo())
second_saved_module = io.BytesIO()
torch.jit.save(torch.jit.script(Foo()), second_saved_module)
second_saved_module.seek(0)
clear_class_registry()
self.assertEqual(
first_script_module._c.qualified_name,
second_script_module._c.qualified_name,
)
class ContainsBoth(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.add_module("second", torch.jit.load(second_saved_module))
self.add_module("first", torch.jit.load(first_saved_module))
def forward(self, x):
x = self.first(x)
x = self.second(x)
return x
sm = torch.jit.script(ContainsBoth())
contains_both = io.BytesIO()
torch.jit.save(sm, contains_both)
contains_both.seek(0)
sm = torch.jit.load(contains_both)
def test_different_interfaces(self):
"""
Exercise the situation where we have the same qualified name
in two different CompilationUnits on save/load.
"""
@torch.jit.interface
class MyInterface:
def bar(self, x: Tensor) -> Tensor:
pass
@torch.jit.script
class ImplementInterface:
def __init__(self) -> None:
pass
def bar(self, x):
return x
class Foo(torch.nn.Module):
__annotations__ = {"interface": MyInterface}
def __init__(self) -> None:
super().__init__()
self.interface = ImplementInterface()
def forward(self, x):
return self.interface.bar(x)
first_script_module = torch.jit.script(Foo())
first_saved_module = io.BytesIO()
torch.jit.save(first_script_module, first_saved_module)
first_saved_module.seek(0)
clear_class_registry()
@torch.jit.interface
class MyInterface:
def not_bar(self, x: Tensor) -> Tensor:
pass
@torch.jit.script # noqa: F811
class ImplementInterface: # noqa: F811
def __init__(self) -> None:
pass
def not_bar(self, x):
return x
class Foo(torch.nn.Module):
__annotations__ = {"interface": MyInterface}
def __init__(self) -> None:
super().__init__()
self.interface = ImplementInterface()
def forward(self, x):
return self.interface.not_bar(x)
second_script_module = torch.jit.script(Foo())
second_saved_module = io.BytesIO()
torch.jit.save(torch.jit.script(Foo()), second_saved_module)
second_saved_module.seek(0)
clear_class_registry()
self.assertEqual(
first_script_module._c.qualified_name,
second_script_module._c.qualified_name,
)
class ContainsBoth(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.add_module("second", torch.jit.load(second_saved_module))
self.add_module("first", torch.jit.load(first_saved_module))
def forward(self, x):
x = self.first(x)
x = self.second(x)
return x
sm = torch.jit.script(ContainsBoth())
contains_both = io.BytesIO()
torch.jit.save(sm, contains_both)
contains_both.seek(0)
sm = torch.jit.load(contains_both)
def test_many_collisions(self):
class MyCoolNamedTuple(NamedTuple):
a: int
@torch.jit.interface
class MyInterface:
def bar(self, x: Tensor) -> Tensor:
pass
@torch.jit.script
class ImplementInterface:
def __init__(self) -> None:
pass
def bar(self, x):
return x
def lol(x):
return x
class Foo(torch.nn.Module):
interface: MyInterface
def __init__(self) -> None:
super().__init__()
self.foo = torch.nn.Linear(2, 2)
self.bar = torch.nn.Linear(2, 2)
self.interface = ImplementInterface()
def forward(self, x):
x = self.foo(x)
x = self.bar(x)
x = lol(x)
x = self.interface.bar(x)
return x, MyCoolNamedTuple(a=5)
first_script_module = torch.jit.script(Foo())
first_saved_module = io.BytesIO()
torch.jit.save(first_script_module, first_saved_module)
first_saved_module.seek(0)
clear_class_registry()
@torch.jit.interface
class MyInterface:
def not_bar(self, x: Tensor) -> Tensor:
pass
@torch.jit.script # noqa: F811
class ImplementInterface: # noqa: F811
def __init__(self) -> None:
pass
def not_bar(self, x):
return x
def lol(x): # noqa: F811
return "asdofij"
class MyCoolNamedTuple(NamedTuple): # noqa: F811
a: str
class Foo(torch.nn.Module):
interface: MyInterface
def __init__(self) -> None:
super().__init__()
self.foo = torch.nn.Linear(2, 2)
self.interface = ImplementInterface()
def forward(self, x):
x = self.foo(x)
self.interface.not_bar(x)
x = lol(x)
return x, MyCoolNamedTuple(a="hello")
second_script_module = torch.jit.script(Foo())
second_saved_module = io.BytesIO()
torch.jit.save(second_script_module, second_saved_module)
second_saved_module.seek(0)
clear_class_registry()
self.assertEqual(
first_script_module._c.qualified_name,
second_script_module._c.qualified_name,
)
class ContainsBoth(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.add_module("second", torch.jit.load(second_saved_module))
self.add_module("first", torch.jit.load(first_saved_module))
def forward(self, x):
x, named_tuple_1 = self.first(x)
x, named_tuple_2 = self.second(x)
return len(x + named_tuple_2.a) + named_tuple_1.a
sm = torch.jit.script(ContainsBoth())
contains_both = io.BytesIO()
torch.jit.save(sm, contains_both)
contains_both.seek(0)
sm = torch.jit.load(contains_both)
def test_save_load_with_extra_files(self):
class MyMod(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return a
# specifically test binary data
value = b"bar\x00\xffbaz"
expected_extra_files = {}
expected_extra_files["foo"] = value
# verify that str to bytes conversion also works
expected_extra_files["foo2"] = "bar"
m = MyMod()
# Save to file.
with TemporaryFileName() as fname:
m.save(fname, _extra_files=expected_extra_files)
# values don't matter
extra_files = {"foo": "", "foo2": None}
torch.jit.load(fname, _extra_files=extra_files)
self.assertEqual(value, extra_files["foo"])
# results come back always as bytes
self.assertEqual(b"bar", extra_files["foo2"])
# Use torch.jit API
torch.jit.save(m, fname, _extra_files=expected_extra_files)
extra_files["foo"] = ""
torch.jit.load(fname, _extra_files=extra_files)
self.assertEqual(value, extra_files["foo"])
# Save to buffer.
buffer = io.BytesIO(m.save_to_buffer(_extra_files=expected_extra_files))
extra_files = {"foo": ""}
torch.jit.load(buffer, _extra_files=extra_files)
self.assertEqual(value, extra_files["foo"])
# Use torch.jit API
buffer = io.BytesIO()
torch.jit.save(m, buffer, _extra_files=expected_extra_files)
buffer.seek(0)
extra_files = {"foo": ""}
torch.jit.load(buffer, _extra_files=extra_files)
self.assertEqual(value, extra_files["foo"])
# Non-existent file 'bar'
with self.assertRaises(RuntimeError):
extra_files["bar"] = ""
torch.jit.load(buffer, _extra_files=extra_files)
def test_save_load_using_pathlib(self):
class MyMod(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return 2 * a
m = MyMod()
# Save then load.
with TemporaryFileName() as fname:
path = Path(fname)
m.save(path)
m2 = torch.jit.load(path)
x = torch.tensor([1.0, 2.0, 3.0, 4.0])
self.assertTrue(torch.equal(m(x), m2(x)))
def test_save_nonexit_file(self):
class Foo(torch.nn.Module):
def forward(self, x):
return 2 * x
script_module = torch.jit.script(Foo())
with self.assertRaises(RuntimeError):
script_module.save("NonExist/path/test.pt")
def test_save_namedtuple_input_only(self):
"""
Even if a NamedTuple is only used as an input argument, saving and
loading should work correctly.
"""
global FooTuple # see [local resolution in python]
class FooTuple(NamedTuple):
a: int
class MyModule(torch.nn.Module):
def forward(self, x: FooTuple) -> torch.Tensor:
return torch.tensor(3)
m_loaded = self.getExportImportCopy(torch.jit.script(MyModule()))
output = m_loaded(FooTuple(a=5))
self.assertEqual(output, torch.tensor(3))
def test_save_namedtuple_input_only_forwardref(self):
"""
Even if a NamedTuple is only used as an input argument, saving and
loading should work correctly.
"""
global FooTuple # see [local resolution in python]
class FooTuple(NamedTuple):
a: "int"
class MyModule(torch.nn.Module):
def forward(self, x: FooTuple) -> torch.Tensor:
return torch.tensor(3)
m_loaded = self.getExportImportCopy(torch.jit.script(MyModule()))
output = m_loaded(FooTuple(a=5))
self.assertEqual(output, torch.tensor(3))
def test_save_namedtuple_output_only(self):
"""
Even if a NamedTuple is only used as an output argument, saving and
loading should work correctly.
"""
global FooTuple # see [local resolution in python]
class FooTuple(NamedTuple):
a: int
class MyModule(torch.nn.Module):
def forward(self) -> Optional[FooTuple]:
return None
m_loaded = self.getExportImportCopy(torch.jit.script(MyModule()))
output = m_loaded()
self.assertEqual(output, None)
def test_save_load_params_buffers_submodules(self):
"""
Check that parameters, buffers, and submodules are the same after loading.
"""
class Submodule(torch.nn.Module):
pass
class TestModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.add_module("submodule_a", Submodule())
self.register_parameter(
"parameter_a", torch.nn.Parameter(torch.randn(4))
)
self.buffer = torch.nn.Buffer(torch.randn(4))
self.t = torch.rand(4) # not buffer
self.parameter_b = torch.nn.Parameter(torch.randn(4))
self.submodule_b = Submodule()
self.buffer_b = torch.nn.Buffer(torch.randn(4))
m = TestModule()
m_loaded = self.getExportImportCopy(torch.jit.script(m))
# Check submodules.
self.assertEqual(
len(list(m.named_modules())), len(list(m_loaded.named_modules()))
)
for m_s, loaded_s in zip(m.named_modules(), m_loaded.named_modules()):
m_name, _ = m_s
loaded_name, _ = loaded_s
self.assertEqual(m_name, loaded_name)
# Check parameters.
self.assertEqual(len(list(m.parameters())), len(list(m_loaded.parameters())))
for m_p, loaded_p in zip(m.parameters(), m_loaded.parameters()):
self.assertEqual(m_p, loaded_p)
# Check buffers.
self.assertEqual(
len(list(m.named_buffers())), len(list(m_loaded.named_buffers()))
)
for m_b, loaded_b in zip(m.named_buffers(), m_loaded.named_buffers()):
m_name, m_buffer = m_b
loaded_name, loaded_buffer = loaded_b
self.assertEqual(m_name, loaded_name)
self.assertEqual(m_buffer, loaded_buffer)
def test_save_load_meta_tensors(self):
"""
Check that parameters, buffers, and submodules are the same after loading
for a module with parameters and buffers that are meta tensors
"""
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.foo = torch.nn.Linear(2, 3, device="meta")
self.bar = torch.nn.Linear(3, 4)
self.buffer = torch.nn.Buffer(torch.randn(4, device="meta"))
def forward(self, x):
x = self.foo(x)
x = self.bar(x)
return x
m = Foo()
m_loaded = self.getExportImportCopy(torch.jit.script(m))
# Check submodules.
self.assertEqual(
len(list(m.named_modules())), len(list(m_loaded.named_modules()))
)
self.assertEqual(
{name for name, _ in m.named_modules()},
{name for name, _ in m_loaded.named_modules()},
)
# Check parameters.
m_params = dict(m.named_parameters())
m_loaded_params = dict(m_loaded.named_parameters())
self.assertEqual(len(m_params), len(m_loaded_params))
self.assertEqual(m_params, m_loaded_params)
# Check buffers.
m_buffers = dict(m.named_buffers())
m_loaded_buffers = dict(m_loaded.named_buffers())
self.assertEqual(len(m_buffers), len(m_loaded_buffers))
self.assertEqual(m_buffers, m_loaded_buffers)
# Check params and buffers that are/are not meta tensors
self.assertTrue(m_params["foo.weight"].is_meta)
self.assertTrue(m_loaded_params["foo.weight"].is_meta)
self.assertTrue(m_params["foo.bias"].is_meta)
self.assertTrue(m_loaded_params["foo.bias"].is_meta)
self.assertFalse(m_params["bar.weight"].is_meta)
self.assertFalse(m_loaded_params["bar.weight"].is_meta)
self.assertFalse(m_params["bar.bias"].is_meta)
self.assertFalse(m_loaded_params["bar.bias"].is_meta)
self.assertTrue(m_buffers["buffer"].is_meta)
self.assertTrue(m_loaded_buffers["buffer"].is_meta)
def test_save_load_meta_tensors_to_device(self):
"""
Check that when loading a module with meta tensors to device, the meta tensors
stay on meta, but non-meta tensors are set to the indicated device.
"""
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.foo = torch.nn.Linear(2, 3, device="meta")
self.bar = torch.nn.Linear(3, 4)
def forward(self, x):
x = self.foo(x)
x = self.bar(x)
return x
m = Foo()
m_loaded = self.getExportImportCopy(torch.jit.script(m), map_location="cpu")
# Check submodules.
self.assertEqual(
len(list(m.named_modules())), len(list(m_loaded.named_modules()))
)
self.assertEqual(
{name for name, _ in m.named_modules()},
{name for name, _ in m_loaded.named_modules()},
)
# Check parameters.
m_params = dict(m.named_parameters())
m_loaded_params = dict(m_loaded.named_parameters())
self.assertEqual(len(m_params), len(m_loaded_params))
self.assertEqual(m_params, m_loaded_params)
# Check params and buffers that are/are not meta tensors
self.assertTrue(m_params["foo.weight"].is_meta)
self.assertTrue(m_loaded_params["foo.weight"].is_meta)
self.assertTrue(m_params["foo.bias"].is_meta)
self.assertTrue(m_loaded_params["foo.bias"].is_meta)
self.assertTrue(m_params["bar.weight"].is_cpu)
self.assertTrue(m_loaded_params["bar.weight"].is_cpu)
self.assertTrue(m_params["bar.bias"].is_cpu)
self.assertTrue(m_loaded_params["bar.bias"].is_cpu)
def test_save_load_with_saved_traced_inputs(self):
"""
Check that saving and loading with traced inputs works as expected
"""
class Module(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x):
return torch.ones(1)
def get_loaded_inputs(inputs):
traced_module = torch.jit.trace(module, input1)
traced_inputs = list(traced_module.graph.inputs())
with TemporaryFileName() as fname:
path = Path(fname)
traced_module.save(path)
print(traced_module.graph)
loaded_module = torch.jit.load(path, _restore_shapes=True)
print(loaded_module.graph)
return traced_inputs, list(loaded_module.graph.inputs())
module = Module()
input_tensor = torch.rand(1, 3, 24, 24)
# Validate that with no input specified the traced inputs are stored
traced_module = torch.jit.trace(module, input_tensor)
traced_inputs = list(traced_module.graph.inputs())
self.assertEqual(
traced_module._c._retrieve_traced_inputs()["forward"], [input_tensor]
)
with TemporaryFileName() as fname:
path = Path(fname)
traced_module.save(path)
loaded_module = torch.jit.load(path, _restore_shapes=True)
loaded_inputs = list(loaded_module.graph.inputs())
self.assertEqual(traced_inputs[1].type(), loaded_inputs[1].type())
self.assertEqual(
traced_inputs[1].type().sizes(), loaded_inputs[1].type().sizes()
)
# Validate that if no shapes are requested previous functionality remains
loaded_module = torch.jit.load(path)
loaded_inputs = list(loaded_module.graph.inputs())
self.assertEqual(loaded_inputs[1].type().sizes(), None)
# Validate that inputs aren't saved when requested not to
traced_module = torch.jit.trace(module, input_tensor, _store_inputs=False)
traced_inputs = list(traced_module.graph.inputs())
self.assertEqual(len(traced_module._c._retrieve_traced_inputs()), 0)
with TemporaryFileName() as fname:
path = Path(fname)
traced_module.save(path)
loaded_module = torch.jit.load(path, _restore_shapes=True)
loaded_inputs = list(loaded_module.graph.inputs())
self.assertEqual(loaded_inputs[1].type().sizes(), None)
# Validate that if no shapes are requested previous functionality remains
loaded_module = torch.jit.load(path)
loaded_inputs = list(loaded_module.graph.inputs())
self.assertEqual(loaded_inputs[1].type().sizes(), None)
# Validate that complex inputs work
# Testing dict of list with empty tensors
input1 = {
"1000": (
torch.tensor([0]),
torch.tensor([], dtype=torch.int64),
torch.tensor([]),
)
}
traced_inputs, loaded_inputs = get_loaded_inputs(input1)
self.assertEqual(traced_inputs[1].type(), loaded_inputs[1].type())
# Testing dict of list
input2 = {
"1000": (
torch.tensor([0]),
torch.tensor([1500000, 1500004], dtype=torch.int64),
torch.tensor([2.0, 3.0]),
)
}
traced_inputs, loaded_inputs = get_loaded_inputs(input2)
self.assertEqual(traced_inputs[1].type(), loaded_inputs[1].type())
# Testing list
input3 = [
torch.tensor([0]),
torch.tensor([1500000, 1500004], dtype=torch.int64),
torch.tensor([2.0, 3.0]),
]
traced_inputs, loaded_inputs = get_loaded_inputs(input3)
self.assertEqual(traced_inputs[1].type(), loaded_inputs[1].type())
# Testing list of dict of list
input4 = [
{
"1000": (
torch.tensor([0]),
torch.tensor([1500000, 1500004], dtype=torch.int64),
torch.tensor([2.0, 3.0]),
)
}
]
traced_inputs, loaded_inputs = get_loaded_inputs(input4)
self.assertEqual(traced_inputs[1].type(), loaded_inputs[1].type())
@skipIfTorchDynamo("too slow")
def test_save_load_large_string_attribute(self):
"""
Check if the model with string > 4GB can be loaded.
"""
import psutil
if psutil.virtual_memory().available < 60 * 1024 * 1024 * 1024:
# Profiled the test execution, and got this number to be safe to run the test
self.skipTest(
"Doesn't have enough memory to run test_save_load_large_string_attribute"
)
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.x = "x" * (2**32 + 1)
def forward(self, i) -> int:
return len(self.x) + i.numel()
inp = torch.ones(0)
ts = torch.jit.script(Model())
ts_output = ts(inp)
b = io.BytesIO(ts.save_to_buffer())
del ts
loaded_ts = torch.jit.load(b)
del b
loaded_output = loaded_ts(inp)
self.assertEqual(ts_output, loaded_output)
def script_module_to_buffer(script_module):
module_buffer = io.BytesIO(
script_module._save_to_buffer_for_lite_interpreter(_use_flatbuffer=True)
)
module_buffer.seek(0)
return module_buffer
|
TestSaveLoad
|
python
|
allegroai__clearml
|
clearml/utilities/parallel.py
|
{
"start": 3665,
"end": 7659
}
|
class ____(object):
"""
FutureTaskCaller is used to create a class via a functions async, in another thread.
For example:
.. code-block:: py
future = FutureTaskCaller().call(func=max, func_cb=None, override_cls=None, 1, 2)
print('Running other code')
print(future.result()) # will print '2'
"""
__slots__ = ("__object", "__object_cls", "__executor", "__deferred_bkg_class")
@property
def __class__(self) -> Type:
return self.__object_cls
def __init__(
self,
func: Callable,
func_cb: Optional[Callable],
override_cls: Type,
*args: Any,
**kwargs: Any,
) -> None:
"""
__init__(*args, **kwargs) in another thread
:return: This FutureTaskCaller instance
"""
self.__object = None
self.__object_cls = override_cls
self.__deferred_bkg_class = _DeferredClass(self, "get_logger")
self.__executor = Thread(target=self.__submit__, args=(func, func_cb, args, kwargs))
self.__executor.daemon = True
self.__executor.start()
def __submit__(
self,
fn: Callable,
fn_cb: Optional[Callable],
args: Any,
kwargs: Any,
) -> None:
# background initialization call
_object = fn(*args, **kwargs)
# push all background calls (now that the initialization is complete)
if self.__deferred_bkg_class:
_deferred_bkg_class = self.__deferred_bkg_class
self.__deferred_bkg_class = None
# noinspection PyProtectedMember
_deferred_bkg_class._flush_into_logger(a_future_object=_object)
# store the initialized object
self.__object = _object
# callback function
if fn_cb is not None:
fn_cb(self.__object)
def __getattr__(self, item: str) -> Any:
# if we get here, by definition this is not a __slot__ entry, pass to the object
return getattr(self.__result__(), item)
def __setattr__(self, item: str, value: Any) -> None:
# make sure we can set the slots
if item in [
"_FutureTaskCaller__executor",
"_FutureTaskCaller__object",
"_FutureTaskCaller__object_cls",
"_FutureTaskCaller__deferred_bkg_class",
]:
return super(FutureTaskCaller, self).__setattr__(item, value)
setattr(self.__result__(), item, value)
def __result__(self, timeout: Optional[float] = None) -> Any:
"""
Wait and get the result of the function called with self.call()
:param timeout: The maximum number of seconds to wait for the result. If None,
there is no limit for the wait time.
:return: The result of the called function
"""
if self.__executor:
# since the test is not atomic, we assume that if we failed joining
# it is because someone else joined before us
# noinspection PyBroadException
try:
self.__executor.join(timeout=timeout)
except RuntimeError:
# this is probably calling ourselves from the same thread
raise
except Exception:
# wait until that someone else updated the __object
while self.__object is None:
sleep(1)
self.__executor = None
return self.__object
# This is the part where we are no longer generic, but __slots__
# inheritance is too cumbersome to actually inherit and make sure it works optimally
def get_logger(self) -> Union[LoggerRoot, _DeferredClass]:
if self.__object is not None:
return self.__object.get_logger()
if self.__deferred_bkg_class is None:
# we are shutting down, wait until object is available
return self.__result__().get_logger()
return self.__deferred_bkg_class
|
FutureTaskCaller
|
python
|
instagram__MonkeyType
|
tests/test_cli.py
|
{
"start": 1217,
"end": 15361
}
|
class ____(DefaultConfig):
@contextmanager
def cli_context(self, command: str) -> Iterator[None]:
print(f"IN SETUP: {command}")
yield
print(f"IN TEARDOWN: {command}")
@pytest.fixture
def store_data():
with tempfile.NamedTemporaryFile(prefix='monkeytype_tests') as db_file:
conn = sqlite3.connect(db_file.name)
create_call_trace_table(conn)
with mock.patch.dict(os.environ, {DefaultConfig.DB_PATH_VAR: db_file.name}):
yield SQLiteStore(conn), db_file
@pytest.fixture
def store(store_data):
store, __ = store_data
yield store
@pytest.fixture
def db_file(store_data):
__, db_file = store_data
yield db_file
@pytest.fixture
def stdout():
return io.StringIO()
@pytest.fixture
def stderr():
return io.StringIO()
def test_generate_stub(store, db_file, stdout, stderr):
traces = [
CallTrace(func, {'a': int, 'b': str}, NoneType),
CallTrace(func2, {'a': int, 'b': int}, NoneType),
]
store.add(traces)
ret = cli.main(['stub', func.__module__], stdout, stderr)
expected = """def func(a: int, b: str) -> None: ...
def func2(a: int, b: int) -> None: ...
"""
assert stdout.getvalue() == expected
assert stderr.getvalue() == ''
assert ret == 0
def test_print_stub_ignore_existing_annotations(store, db_file, stdout, stderr):
traces = [
CallTrace(func_anno, {'a': int, 'b': int}, int),
]
store.add(traces)
with mock.patch.dict(os.environ, {DefaultConfig.DB_PATH_VAR: db_file.name}):
ret = cli.main(['stub', func.__module__, '--ignore-existing-annotations'],
stdout, stderr)
expected = """def func_anno(a: int, b: int) -> int: ...
"""
assert stdout.getvalue() == expected
assert stderr.getvalue() == ''
assert ret == 0
def test_get_diff(store, db_file, stdout, stderr):
traces = [
CallTrace(func_anno, {'a': int, 'b': int}, int),
CallTrace(func_anno2, {'a': str, 'b': str}, None),
]
store.add(traces)
with mock.patch.dict(os.environ, {DefaultConfig.DB_PATH_VAR: db_file.name}):
ret = cli.main(['stub', func.__module__, '--diff'], stdout, stderr)
expected = """- def func_anno(a: int, b: str) -> None: ...
? ^ - ^^ ^
+ def func_anno(a: int, b: int) -> int: ...
? ^^ ^ ^
"""
assert stdout.getvalue() == expected
assert stderr.getvalue() == ''
assert ret == 0
def test_get_diff2(store, db_file, stdout, stderr):
traces = [
CallTrace(super_long_function_with_long_params, {
'long_param1': str,
'long_param2': str,
'long_param3': int,
'long_param4': str,
'long_param5': int,
}, None),
CallTrace(func_anno, {'a': int, 'b': int}, int),
]
store.add(traces)
with mock.patch.dict(os.environ, {DefaultConfig.DB_PATH_VAR: db_file.name}):
ret = cli.main(['stub', func.__module__, '--diff'], stdout, stderr)
expected = """- def func_anno(a: int, b: str) -> None: ...
? ^ - ^^ ^
+ def func_anno(a: int, b: int) -> int: ...
? ^^ ^ ^
def super_long_function_with_long_params(
long_param1: str,
long_param2: str,
- long_param3: str,
? ^ -
+ long_param3: int,
? ^^
long_param4: str,
- long_param5: str
? ^ -
+ long_param5: int
? ^^
) -> None: ...
"""
assert stdout.getvalue() == expected
assert stderr.getvalue() == ''
assert ret == 0
@pytest.mark.parametrize('arg, error', [
(func.__module__, f"No traces found for module {func.__module__}\n"),
(func.__module__ + ':foo', f"No traces found for specifier {func.__module__}:foo\n"),
])
def test_no_traces(store, db_file, stdout, stderr, arg, error):
with mock.patch.dict(os.environ, {DefaultConfig.DB_PATH_VAR: db_file.name}):
ret = cli.main(['stub', arg], stdout, stderr)
assert stderr.getvalue() == error
assert stdout.getvalue() == ''
assert ret == 0
def test_display_list_of_modules(store, db_file, stdout, stderr):
traces = [
CallTrace(func, {'a': int, 'b': str}, NoneType),
]
store.add(traces)
with mock.patch.dict(os.environ, {DefaultConfig.DB_PATH_VAR: db_file.name}):
ret = cli.main(['list-modules'], stdout, stderr)
expected = ""
assert stderr.getvalue() == expected
expected = "tests.test_cli\n"
assert stdout.getvalue() == expected
assert ret == 0
def test_display_list_of_modules_no_modules(store, db_file, stdout, stderr):
with mock.patch.dict(os.environ, {DefaultConfig.DB_PATH_VAR: db_file.name}):
ret = cli.main(['list-modules'], stdout, stderr)
expected = ""
assert stderr.getvalue() == expected
expected = "\n"
assert stdout.getvalue() == expected
assert ret == 0
def test_display_sample_count(stderr):
traces = [
CallTrace(func, {'a': int, 'b': str}, NoneType),
CallTrace(func, {'a': str, 'b': str}, NoneType),
CallTrace(func2, {'a': str, 'b': int}, NoneType),
CallTrace(func2, {'a': int, 'b': str}, NoneType),
CallTrace(func2, {'a': str, 'b': int}, NoneType)
]
cli.display_sample_count(traces, stderr)
expected = """Annotation for tests.test_cli.func based on 2 call trace(s).
Annotation for tests.test_cli.func2 based on 3 call trace(s).
"""
assert stderr.getvalue() == expected
def test_display_sample_count_from_cli(store, db_file, stdout, stderr):
traces = [
CallTrace(func, {'a': int, 'b': str}, NoneType),
CallTrace(func2, {'a': int, 'b': int}, NoneType),
]
store.add(traces)
with mock.patch.dict(os.environ, {DefaultConfig.DB_PATH_VAR: db_file.name}):
ret = cli.main(['stub', func.__module__, '--sample-count'], stdout, stderr)
expected = """Annotation for tests.test_cli.func based on 1 call trace(s).
Annotation for tests.test_cli.func2 based on 1 call trace(s).
"""
assert stderr.getvalue() == expected
assert ret == 0
def test_quiet_failed_traces(store, db_file, stdout, stderr):
traces = [
CallTrace(func, {'a': int, 'b': str}, NoneType),
CallTrace(func2, {'a': int, 'b': int}, NoneType),
]
store.add(traces)
with mock.patch("monkeytype.encoding.CallTraceRow.to_trace", side_effect=MonkeyTypeError("the-trace")):
ret = cli.main(['stub', func.__module__], stdout, stderr)
assert "2 traces failed to decode" in stderr.getvalue()
assert ret == 0
def test_verbose_failed_traces(store, db_file, stdout, stderr):
traces = [
CallTrace(func, {'a': int, 'b': str}, NoneType),
CallTrace(func2, {'a': int, 'b': int}, NoneType),
]
store.add(traces)
with mock.patch("monkeytype.encoding.CallTraceRow.to_trace", side_effect=MonkeyTypeError("the-trace")):
ret = cli.main(['-v', 'stub', func.__module__], stdout, stderr)
assert "WARNING: Failed decoding trace: the-trace" in stderr.getvalue()
assert ret == 0
def test_cli_context_manager_activated(capsys, stdout, stderr):
ret = cli.main(['-c', f'{__name__}:LoudContextConfig()', 'stub', 'some.module'], stdout, stderr)
out, err = capsys.readouterr()
assert out == "IN SETUP: stub\nIN TEARDOWN: stub\n"
assert err == ""
assert ret == 0
def test_pathlike_parameter(store, db_file, capsys, stdout, stderr):
with mock.patch.dict(os.environ, {DefaultConfig.DB_PATH_VAR: db_file.name}):
with pytest.raises(SystemExit):
cli.main(['stub', 'test/foo.py:bar'], stdout, stderr)
out, err = capsys.readouterr()
assert "test/foo.py does not look like a valid Python import path" in err
def test_toplevel_filename_parameter(store, db_file, stdout, stderr):
filename = 'foo.py'
with mock.patch.dict(os.environ, {DefaultConfig.DB_PATH_VAR: db_file.name}):
orig_exists = os.path.exists
def side_effect(x):
return True if x == filename else orig_exists(x)
with mock.patch('os.path.exists', side_effect=side_effect) as mock_exists:
ret = cli.main(['stub', filename], stdout, stderr)
mock_exists.assert_called_with(filename)
err_msg = f"No traces found for {filename}; did you pass a filename instead of a module name? " \
f"Maybe try just '{os.path.splitext(filename)[0]}'.\n"
assert stderr.getvalue() == err_msg
assert stdout.getvalue() == ''
assert ret == 0
@pytest.mark.usefixtures("collector")
def test_apply_stub_init(store, db_file, stdout, stderr, collector):
"""Regression test for applying stubs to testmodule/__init__.py style module layout"""
with trace_calls(collector, max_typed_dict_size=0):
func_foo()
store.add(collector.traces)
with mock.patch.dict(os.environ, {DefaultConfig.DB_PATH_VAR: db_file.name}):
ret = cli.main(['apply', Foo.__module__], stdout, stderr)
assert ret == 0
assert 'def __init__(self, arg1: str, arg2: int) -> None:' in stdout.getvalue()
def test_apply_stub_file_with_spaces(store, db_file, stdout, stderr):
"""Regression test for applying a stub to a filename containing spaces"""
src = """
def my_test_function(a, b):
return a + b
"""
with tempfile.TemporaryDirectory(prefix='monkey type') as tempdir:
module = 'my_test_module'
src_path = os.path.join(tempdir, module + '.py')
with open(src_path, 'w+') as f:
f.write(src)
with mock.patch('sys.path', sys.path + [tempdir]):
import my_test_module as mtm
traces = [CallTrace(mtm.my_test_function, {'a': int, 'b': str}, NoneType)]
store.add(traces)
with mock.patch.dict(os.environ, {DefaultConfig.DB_PATH_VAR: db_file.name}):
ret = cli.main(['apply', 'my_test_module'], stdout, stderr)
assert ret == 0
assert 'warning:' not in stdout.getvalue()
def test_apply_stub_using_libcst():
source = """
def my_test_function(a, b):
return True
def has_return_type(a, b) -> bool:
return True
def uses_forward_ref(d):
return None
def no_stub(a):
return True
def uses_union(d):
return None
"""
stub = """
from mypy_extensions import TypedDict
from typing import Union
def my_test_function(a: int, b: str) -> bool: ...
def has_return_type(a: int, b: int) -> bool: ...
def uses_forward_ref(d: 'Foo') -> None: ...
def uses_union(d: Union[int, bool]) -> None: ...
class Foo: ...
class Movie(TypedDict):
name: str
year: int
"""
expected = """
from mypy_extensions import TypedDict
from typing import Union
class Foo: ...
class Movie(TypedDict):
name: str
year: int
def my_test_function(a: int, b: str) -> bool:
return True
def has_return_type(a: int, b: int) -> bool:
return True
def uses_forward_ref(d: 'Foo') -> None:
return None
def no_stub(a):
return True
def uses_union(d: Union[int, bool]) -> None:
return None
"""
assert cli.apply_stub_using_libcst(
textwrap.dedent(stub),
textwrap.dedent(source),
overwrite_existing_annotations=False,
) == textwrap.dedent(expected)
def test_apply_stub_using_libcst__exception(stdout, stderr):
erroneous_source = """
def my_test_function(
"""
stub = """
def my_test_function(a: int, b: str) -> bool: ...
"""
with pytest.raises(cli.HandlerError):
cli.apply_stub_using_libcst(
textwrap.dedent(stub),
textwrap.dedent(erroneous_source),
overwrite_existing_annotations=False,
)
def test_apply_stub_using_libcst__overwrite_existing_annotations():
source = """
def has_annotations(x: int) -> str:
return 1 in x
"""
stub = """
from typing import List
def has_annotations(x: List[int]) -> bool: ...
"""
expected = """
from typing import List
def has_annotations(x: List[int]) -> bool:
return 1 in x
"""
assert cli.apply_stub_using_libcst(
textwrap.dedent(stub),
textwrap.dedent(source),
overwrite_existing_annotations=True,
) == textwrap.dedent(expected)
def test_apply_stub_using_libcst__confine_new_imports_in_type_checking_block():
source = """
def spoof(x):
return x.get_some_object()
"""
stub = """
from some.module import (
AnotherObject,
SomeObject,
)
def spoof(x: AnotherObject) -> SomeObject: ...
"""
expected = """
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from some.module import AnotherObject, SomeObject
def spoof(x: AnotherObject) -> SomeObject:
return x.get_some_object()
"""
assert cli.apply_stub_using_libcst(
textwrap.dedent(stub),
textwrap.dedent(source),
overwrite_existing_annotations=True,
confine_new_imports_in_type_checking_block=True,
) == textwrap.dedent(expected)
def test_get_newly_imported_items():
source = """
import q
from x import Y
"""
stub = """
from a import (
B,
C,
)
import d
import q, w, e
from x import (
Y,
Z,
)
import z as t
"""
expected = {
ImportItem('a', 'B'),
ImportItem('a', 'C'),
ImportItem('d'),
ImportItem('w'),
ImportItem('e'),
ImportItem('x', 'Z'),
ImportItem('z', None, 't'),
}
assert expected == set(cli.get_newly_imported_items(
parse_module(textwrap.dedent(stub)),
parse_module(textwrap.dedent(source)),
))
|
LoudContextConfig
|
python
|
pytorch__pytorch
|
torch/_inductor/template_heuristics/triton.py
|
{
"start": 76625,
"end": 76947
}
|
class ____(AddMMConfigMixin, CUDAMMTemplateConfigHeuristic):
"""Addmm specific mixin for CUDA"""
# TODO(coconutruben): deprecate once autoheuristic is deprecated
@register_template_heuristic(
mm_template.uid,
"cuda",
register=torch.version.hip is None,
op_name="mm-ah",
)
|
CUDAAddMMTemplateConfigHeuristic
|
python
|
spack__spack
|
lib/spack/spack/buildcache_prune.py
|
{
"start": 19830,
"end": 19958
}
|
class ____(spack.error.SpackError):
"""
Raised when pruning fails irrevocably
"""
pass
|
BuildcachePruningException
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.