language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
apache__airflow
|
dev/breeze/src/airflow_breeze/utils/parallel.py
|
{
"start": 3692,
"end": 3958
}
|
class ____(metaclass=ABCMeta):
@abstractmethod
def get_best_matching_lines(self, output: Output) -> list[str] | None:
"""
Return best matching lines of the output.
:return: array of lines to print
"""
|
AbstractProgressInfoMatcher
|
python
|
walkccc__LeetCode
|
solutions/1476. Subrectangle Queries/1476.py
|
{
"start": 0,
"end": 517
}
|
class ____:
def __init__(self, rectangle: list[list[int]]):
self.rectangle = rectangle
self.updates = []
def updateSubrectangle(self, row1: int, col1: int, row2: int, col2: int,
newValue: int) -> None:
self.updates.append((row1, col1, row2, col2, newValue))
def getValue(self, row: int, col: int) -> int:
for r1, c1, r2, c2, v in reversed(self.updates):
if r1 <= row <= r2 and c1 <= col <= c2:
return v
return self.rectangle[row][col]
|
SubrectangleQueries
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_size04.py
|
{
"start": 315,
"end": 1392
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_size04.xlsx")
def test_create_file(self):
"""Test XlsxWriter chartarea properties."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [73773440, 73774976]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_size({"x_offset": 8, "y_offset": 9})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
mlflow__mlflow
|
mlflow/spark/__init__.py
|
{
"start": 44357,
"end": 54631
}
|
class ____:
"""
Wrapper around Spark MLlib PipelineModel providing interface for scoring pandas DataFrame.
"""
def __init__(self, spark, spark_model, signature):
self.spark = spark
self.spark_model = spark_model
self.signature = signature
def get_raw_model(self):
"""
Returns the underlying model.
"""
return self.spark_model
def predict(
self,
pandas_df,
params: dict[str, Any] | None = None,
):
"""
Generate predictions given input data in a pandas DataFrame.
Args:
pandas_df: pandas DataFrame containing input data.
params: Additional parameters to pass to the model for inference.
Returns:
List with model predictions.
"""
if _is_spark_connect_model(self.spark_model):
# Spark connect ML model directly appends prediction result column to input pandas
# dataframe. To make input dataframe intact, make a copy first.
# TODO: apache/spark master has made a change to do shallow copy before
# calling `spark_model.transform`, so once spark 4.0 releases, we can
# remove this line.
pandas_df = pandas_df.copy(deep=False)
# Assuming the model output column name is "prediction".
# Spark model uses "prediction" as default model inference output column name.
return self.spark_model.transform(pandas_df)["prediction"]
# Convert List[np.float64] / np.array[np.float64] type to List[float] type,
# otherwise it will break `spark.createDataFrame` column type inferring.
if self.signature and self.signature.inputs:
for col_spec in self.signature.inputs.inputs:
if isinstance(col_spec.type, SparkMLVector):
col_name = col_spec.name or pandas_df.columns[0]
pandas_df[col_name] = pandas_df[col_name].map(
lambda array: [float(elem) for elem in array]
)
spark_df = self.spark.createDataFrame(pandas_df)
# Convert Array[Double] column to spark ML vector type according to signature
if self.signature and self.signature.inputs:
for col_spec in self.signature.inputs.inputs:
if isinstance(col_spec.type, SparkMLVector):
from pyspark.ml.functions import array_to_vector
col_name = col_spec.name or spark_df.columns[0]
spark_df = spark_df.withColumn(col_name, array_to_vector(col_name))
# For the case of no signature or signature logged by old version MLflow,
# the signature does not support spark ML vector type, in this case,
# automatically infer vector type input columns and do the conversion
# using `_find_and_set_features_col_as_vector_if_needed` utility function.
spark_df = _find_and_set_features_col_as_vector_if_needed(spark_df, self.spark_model)
prediction_column = mlflow.pyspark.ml._check_or_set_model_prediction_column(
self.spark_model, spark_df
)
prediction_df = self.spark_model.transform(spark_df).select(prediction_column)
# If signature output schema exists and it contains vector type columns,
# Convert spark ML vector type column to Array[Double] otherwise it will
# break enforce_schema checking
if self.signature and self.signature.outputs:
for col_spec in self.signature.outputs.inputs:
if isinstance(col_spec.type, SparkMLVector):
from pyspark.ml.functions import vector_to_array
col_name = col_spec.name or prediction_df.columns[0]
prediction_df = prediction_df.withColumn(col_name, vector_to_array(col_name))
return [x.prediction for x in prediction_df.collect()]
@autologging_integration(FLAVOR_NAME)
def autolog(disable=False, silent=False):
"""
Enables (or disables) and configures logging of Spark datasource paths, versions
(if applicable), and formats when they are read. This method is not threadsafe and assumes a
`SparkSession
<https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.SparkSession.html>`_
already exists with the
`mlflow-spark JAR
<https://www.mlflow.org/docs/latest/tracking.html#spark>`_
attached. It should be called on the Spark driver, not on the executors (i.e. do not call
this method within a function parallelized by Spark).
The mlflow-spark JAR used must match the Scala version of Spark. Please see the
`Maven Repository
<https://mvnrepository.com/artifact/org.mlflow/mlflow-spark>`_
for available versions. This API requires Spark 3.0 or above.
Datasource information is cached in memory and logged to all subsequent MLflow runs,
including the active MLflow run (if one exists when the data is read). Note that autologging of
Spark ML (MLlib) models is not currently supported via this API. Datasource autologging is
best-effort, meaning that if Spark is under heavy load or MLflow logging fails for any reason
(e.g., if the MLflow server is unavailable), logging may be dropped.
For any unexpected issues with autologging, check Spark driver and executor logs in addition
to stderr & stdout generated from your MLflow code - datasource information is pulled from
Spark, so logs relevant to debugging may show up amongst the Spark logs.
.. Note:: Spark datasource autologging only supports logging to MLflow runs in a single thread
.. code-block:: python
:caption: Example
import mlflow.spark
import os
import shutil
from pyspark.sql import SparkSession
# Create and persist some dummy data
# Note: the 2.12 in 'org.mlflow:mlflow-spark_2.12:2.16.2' below indicates the Scala
# version, please match this with that of Spark. The 2.16.2 indicates the mlflow version.
# Note: On environments like Databricks with pre-created SparkSessions,
# ensure the org.mlflow:mlflow-spark_2.12:2.16.2 is attached as a library to
# your cluster
spark = (
SparkSession.builder.config(
"spark.jars.packages",
"org.mlflow:mlflow-spark_2.12:2.16.2",
)
.master("local[*]")
.getOrCreate()
)
df = spark.createDataFrame(
[(4, "spark i j k"), (5, "l m n"), (6, "spark hadoop spark"), (7, "apache hadoop")],
["id", "text"],
)
import tempfile
tempdir = tempfile.mkdtemp()
df.write.csv(os.path.join(tempdir, "my-data-path"), header=True)
# Enable Spark datasource autologging.
mlflow.spark.autolog()
loaded_df = spark.read.csv(
os.path.join(tempdir, "my-data-path"), header=True, inferSchema=True
)
# Call toPandas() to trigger a read of the Spark datasource. Datasource info
# (path and format) is logged to the current active run, or the
# next-created MLflow run if no run is currently active
with mlflow.start_run() as active_run:
pandas_df = loaded_df.toPandas()
Args:
disable: If ``True``, disables the Spark datasource autologging integration.
If ``False``, enables the Spark datasource autologging integration.
silent: If ``True``, suppress all event logs and warnings from MLflow during Spark
datasource autologging. If ``False``, show all events and warnings during Spark
datasource autologging.
"""
from pyspark import __version__ as pyspark_version
from pyspark.sql import SparkSession
from mlflow.spark.autologging import (
_listen_for_spark_activity,
_stop_listen_for_spark_activity,
)
from mlflow.utils import databricks_utils
from mlflow.utils._spark_utils import _get_active_spark_session
if (
databricks_utils.is_in_databricks_serverless_runtime()
or databricks_utils.is_in_databricks_shared_cluster_runtime()
):
if disable:
return
raise MlflowException(
"MLflow Spark dataset autologging is not supported on Databricks shared clusters "
"or Databricks serverless clusters."
)
# Check if environment variable PYSPARK_PIN_THREAD is set to false.
# The "Pin thread" concept was introduced since Pyspark 3.0.0 and set to default to true
# since Pyspark 3.2.0 (https://issues.apache.org/jira/browse/SPARK-35303). When pin thread
# is enabled, Pyspark manages Python and JVM threads in a 1:1, meaning that when one thread
# is terminated, the corresponding thread in the other side will be terminated as well.
# However, this causes an issue in spark autologging as our event listener thread may be
# terminated before receiving the datasource event.
# Hence, we have to disable it and decouple the thread management between Python and JVM.
if (
Version(pyspark_version) >= Version("3.2.0")
and os.environ.get("PYSPARK_PIN_THREAD", "").lower() != "false"
):
_logger.warning(
"With Pyspark >= 3.2, PYSPARK_PIN_THREAD environment variable must be set to false "
"for Spark datasource autologging to work."
)
def __init__(original, self, *args, **kwargs):
original(self, *args, **kwargs)
_listen_for_spark_activity(self._sc)
safe_patch(FLAVOR_NAME, SparkSession, "__init__", __init__, manage_run=False)
def patched_session_stop(original, self, *args, **kwargs):
_stop_listen_for_spark_activity(self.sparkContext)
original(self, *args, **kwargs)
safe_patch(FLAVOR_NAME, SparkSession, "stop", patched_session_stop, manage_run=False)
active_session = _get_active_spark_session()
if active_session is not None:
# We know SparkContext exists here already, so get it
sc = active_session.sparkContext
if disable:
_stop_listen_for_spark_activity(sc)
else:
_listen_for_spark_activity(sc)
|
_PyFuncModelWrapper
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1229609,
"end": 1229876
}
|
class ____(sgqlc.types.Type, Node, AuditEntry, EnterpriseAuditEntryData, OrganizationAuditEntryData):
"""Audit log entry for a members_can_delete_repos.disable event."""
__schema__ = github_schema
__field_names__ = ()
|
MembersCanDeleteReposDisableAuditEntry
|
python
|
networkx__networkx
|
networkx/classes/tests/test_graph_historical.py
|
{
"start": 108,
"end": 258
}
|
class ____(HistoricalTests):
@classmethod
def setup_class(cls):
HistoricalTests.setup_class()
cls.G = nx.Graph
|
TestGraphHistorical
|
python
|
conda__conda
|
conda/models/dist.py
|
{
"start": 715,
"end": 855
}
|
class ____(NamedTuple):
name: str
version: str
build_string: str
build_number: str
dist_name: str
fmt: str
|
DistDetails
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 48746,
"end": 49283
}
|
class ____(sgqlc.types.Enum):
"""The reason an outside collaborator was removed from an
Organization.
Enumeration Choices:
* `SAML_EXTERNAL_IDENTITY_MISSING`: SAML external identity missing
* `TWO_FACTOR_REQUIREMENT_NON_COMPLIANCE`: The organization
required 2FA of its billing managers and this user did not have
2FA enabled.
"""
__schema__ = github_schema
__choices__ = ("SAML_EXTERNAL_IDENTITY_MISSING", "TWO_FACTOR_REQUIREMENT_NON_COMPLIANCE")
|
OrgRemoveOutsideCollaboratorAuditEntryReason
|
python
|
pytorch__pytorch
|
torch/ao/nn/intrinsic/modules/fused.py
|
{
"start": 9891,
"end": 10317
}
|
class ____(_FusedModule):
r"""This is a sequential container which calls the Conv2d, add, Relu.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, add, relu):
super().__init__(conv)
self.add = add
self.relu = relu
def forward(self, x1, x2): # type: ignore[override]
return self.relu(self.add(self[0](x1), x2))
|
ConvAddReLU2d
|
python
|
huggingface__transformers
|
src/transformers/models/zoedepth/modeling_zoedepth.py
|
{
"start": 19878,
"end": 23127
}
|
class ____(nn.Module):
def __init__(self, config, n_bins=16, mlp_dim=256, min_depth=1e-3, max_depth=10):
"""Bin center regressor network.
Can be "normed" or "unnormed". If "normed", bin centers are bounded on the (min_depth, max_depth) interval.
Args:
config (`int`):
Model configuration.
n_bins (`int`, *optional*, defaults to 16):
Number of bin centers.
mlp_dim (`int`, *optional*, defaults to 256):
Hidden dimension.
min_depth (`float`, *optional*, defaults to 1e-3):
Min depth value.
max_depth (`float`, *optional*, defaults to 10):
Max depth value.
"""
super().__init__()
self.in_features = config.bottleneck_features
self.bin_centers_type = config.bin_centers_type
self.min_depth = min_depth
self.max_depth = max_depth
self.conv1 = nn.Conv2d(self.in_features, mlp_dim, 1, 1, 0)
self.act1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(mlp_dim, n_bins, 1, 1, 0)
self.act2 = nn.ReLU(inplace=True) if self.bin_centers_type == "normed" else nn.Softplus()
def forward(self, x):
"""
Returns tensor of bin_width vectors (centers). One vector b for every pixel
"""
x = self.conv1(x)
x = self.act1(x)
x = self.conv2(x)
bin_centers = self.act2(x)
if self.bin_centers_type == "normed":
bin_centers = bin_centers + 1e-3
bin_widths_normed = bin_centers / bin_centers.sum(dim=1, keepdim=True)
# shape (batch_size, num_channels, height, width)
bin_widths = (self.max_depth - self.min_depth) * bin_widths_normed
# pad has the form (left, right, top, bottom, front, back)
bin_widths = nn.functional.pad(bin_widths, (0, 0, 0, 0, 1, 0), mode="constant", value=self.min_depth)
# shape (batch_size, num_channels, height, width)
bin_edges = torch.cumsum(bin_widths, dim=1)
bin_centers = 0.5 * (bin_edges[:, :-1, ...] + bin_edges[:, 1:, ...])
return bin_widths_normed, bin_centers
else:
return bin_centers, bin_centers
@torch.jit.script
def inv_attractor(dx, alpha: float = 300, gamma: int = 2):
"""Inverse attractor: dc = dx / (1 + alpha*dx^gamma), where dx = a - c, a = attractor point, c = bin center, dc = shift in bin center
This is the default one according to the accompanying paper.
Args:
dx (`torch.Tensor`):
The difference tensor dx = Ai - Cj, where Ai is the attractor point and Cj is the bin center.
alpha (`float`, *optional*, defaults to 300):
Proportional Attractor strength. Determines the absolute strength. Lower alpha = greater attraction.
gamma (`int`, *optional*, defaults to 2):
Exponential Attractor strength. Determines the "region of influence" and indirectly number of bin centers affected.
Lower gamma = farther reach.
Returns:
torch.Tensor: Delta shifts - dc; New bin centers = Old bin centers + dc
"""
return dx.div(1 + alpha * dx.pow(gamma))
|
ZoeDepthSeedBinRegressor
|
python
|
apache__thrift
|
lib/py/src/server/TServer.py
|
{
"start": 1033,
"end": 3016
}
|
class ____(object):
"""Base interface for a server, which must have a serve() method.
Three constructors for all servers:
1) (processor, serverTransport)
2) (processor, serverTransport, transportFactory, protocolFactory)
3) (processor, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory)
"""
def __init__(self, *args):
if (len(args) == 2):
self.__initArgs__(args[0], args[1],
TTransport.TTransportFactoryBase(),
TTransport.TTransportFactoryBase(),
TBinaryProtocol.TBinaryProtocolFactory(),
TBinaryProtocol.TBinaryProtocolFactory())
elif (len(args) == 4):
self.__initArgs__(args[0], args[1], args[2], args[2], args[3], args[3])
elif (len(args) == 6):
self.__initArgs__(args[0], args[1], args[2], args[3], args[4], args[5])
def __initArgs__(self, processor, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory):
self.processor = processor
self.serverTransport = serverTransport
self.inputTransportFactory = inputTransportFactory
self.outputTransportFactory = outputTransportFactory
self.inputProtocolFactory = inputProtocolFactory
self.outputProtocolFactory = outputProtocolFactory
input_is_header = isinstance(self.inputProtocolFactory, THeaderProtocolFactory)
output_is_header = isinstance(self.outputProtocolFactory, THeaderProtocolFactory)
if any((input_is_header, output_is_header)) and input_is_header != output_is_header:
raise ValueError("THeaderProtocol servers require that both the input and "
"output protocols are THeaderProtocol.")
def serve(self):
pass
|
TServer
|
python
|
wandb__wandb
|
tests/system_tests/test_importers/test_mlflow/conftest.py
|
{
"start": 1394,
"end": 9414
}
|
class ____:
# experiments and metrics
n_experiments: int
n_runs_per_experiment: int
n_steps_per_run: int
# artifacts
n_artifacts: int
n_root_files: int
n_subdirs: int
n_subdir_files: int
# batching
logging_batch_size: int = 50
@property
def total_runs(self):
return self.n_experiments * self.n_runs_per_experiment
@property
def total_files(self):
return self.n_artifacts * (
self.n_root_files + self.n_subdirs * self.n_subdir_files
)
# def make_nested_run():
# with mlflow.start_run():
# for _ in range(NUM_RUNS_PER_NESTED_EXPERIMENT):
# make_run(batch_size=50)
def batch_metrics(metrics, bs: int) -> Iterable[List[Metric]]:
step = 0
for i, batch in enumerate(batched(bs, metrics)):
batched_metrics = []
for step, metric in enumerate(batch, start=i * bs):
for k, v in metric.items():
batched_metrics.append(
Metric(k, v, step=step, timestamp=SECONDS_FROM_2023_01_01)
)
yield batched_metrics
def make_tags():
return st.dictionaries(
st.text(
min_size=1,
max_size=20,
alphabet="abcdefghijklmnopqrstuvwxyz1234567890_- ",
),
st.text(max_size=20),
max_size=10,
).example()
def make_params():
# Older versions have trouble handling certain kinds of strings and larger dicts
if mlflow_version < Version("2.0.0"):
param_str = st.text(
max_size=20, alphabet="abcdefghijklmnopqrstuvwxyz1234567890_- "
).example()
param_dict = st.dictionaries(
st.text(max_size=4, alphabet="abcdefghijklmnopqrstuvwxyz1234567890_- "),
st.integers(),
max_size=2,
).example()
else:
param_str = st.text(max_size=20).example()
param_dict = st.dictionaries(
st.text(max_size=20),
st.integers(),
max_size=10,
).example()
return {
"param_int": st.integers().example(),
"param_float": st.floats().example(),
"param_str": param_str,
"param_bool": st.booleans().example(),
"param_list": st.lists(st.integers()).example(),
"param_dict": param_dict,
"param_tuple": st.tuples(st.integers(), st.integers()).example(),
"param_set": st.sets(st.integers()).example(),
"param_none": None,
}
def make_metrics(n_steps):
for _ in range(n_steps):
yield {
"metric_int": st.integers(min_value=0, max_value=100).example(),
"metric_float": st.floats(min_value=0, max_value=100).example(),
"metric_bool": st.booleans().example(),
}
def make_artifacts_dir(
root_dir: str, n_root_files: int, n_subdirs: int, n_subdir_files: int
) -> str:
# Ensure root_dir exists
os.makedirs(root_dir, exist_ok=True)
for i in range(n_root_files):
file_path = os.path.join(root_dir, f"file{i}.txt")
with open(file_path, "w") as f:
f.write(f"text from {file_path}")
for i in range(n_subdirs):
subdir_path = os.path.join(root_dir, f"subdir{i}")
os.makedirs(subdir_path, exist_ok=True)
for j in range(n_subdir_files):
file_path = os.path.join(subdir_path, f"file{j}.txt")
with open(file_path, "w") as f:
f.write(f"text from {file_path}")
return root_dir
def _check_mlflow_server_health(
base_url: str, endpoint: str, num_retries: int = 1, sleep_time: int = 1
):
for _ in range(num_retries):
try:
response = requests.get(urllib.parse.urljoin(base_url, endpoint))
if response.status_code == 200:
return True
time.sleep(sleep_time)
except requests.exceptions.ConnectionError:
time.sleep(sleep_time)
return False
@pytest.fixture
def mssql_backend(): ...
@pytest.fixture
def mysql_backend(): ...
@pytest.fixture
def postgres_backend(): ...
@pytest.fixture
def file_backend(tmp_path):
yield tmp_path / "mlruns"
@pytest.fixture
def sqlite_backend():
yield "sqlite:///mlflow.db"
# https://github.com/pytest-dev/pytest/issues/349
@pytest.fixture(
params=[
# "mssql_backend",
# "mysql_backend",
# "postgres_backend",
"file_backend",
"sqlite_backend",
]
)
def mlflow_backend(request):
yield request.getfixturevalue(request.param)
@pytest.fixture
def file_artifacts(tmp_path):
yield tmp_path / "mlartifacts"
@pytest.fixture
def s3_artifacts():
yield ...
@pytest.fixture(
params=[
"file_artifacts",
# "s3_artifacts",
]
)
def mlflow_artifacts_destination(request):
yield request.getfixturevalue(request.param)
@pytest.fixture
def mlflow_server_settings(mlflow_artifacts_destination, mlflow_backend):
return MlflowServerSettings(
metrics_backend=mlflow_backend,
artifacts_backend=mlflow_artifacts_destination,
)
@pytest.fixture
def mlflow_logging_config():
return MlflowLoggingConfig(
# run settings
n_experiments=1,
n_runs_per_experiment=2,
n_steps_per_run=100,
# artifact settings
n_artifacts=2,
n_root_files=5,
n_subdirs=3,
n_subdir_files=2,
)
@pytest.fixture
def mlflow_server(mlflow_server_settings):
if mlflow_version < Version("2.0.0"):
start_cmd = [
"mlflow",
"server",
"-p",
mlflow_server_settings.new_port,
# no sqlite
# no --artifacts-destination flag
]
else:
start_cmd = [
"mlflow",
"server",
"-p",
mlflow_server_settings.new_port,
"--backend-store-uri",
mlflow_server_settings.metrics_backend,
"--artifacts-destination",
mlflow_server_settings.artifacts_backend,
]
_ = subprocess.Popen(start_cmd) # process
healthy = _check_mlflow_server_health(
mlflow_server_settings.base_url,
mlflow_server_settings.health_endpoint,
num_retries=30,
)
if healthy:
yield mlflow_server_settings
else:
raise Exception("MLflow server is not healthy")
@pytest.fixture
def prelogged_mlflow_server(mlflow_server, mlflow_logging_config):
config = mlflow_logging_config
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=NonInteractiveExampleWarning)
mlflow.set_tracking_uri(mlflow_server.base_url)
# Experiments
for _ in range(config.n_experiments):
exp_name = "Experiment " + str(uuid.uuid4())
mlflow.set_experiment(exp_name)
# Runs
for _ in range(config.n_runs_per_experiment):
run_name = "Run " + str(uuid.uuid4())
client = MlflowClient()
with mlflow.start_run() as run:
mlflow.set_tag("mlflow.runName", run_name)
mlflow.set_tags(make_tags())
mlflow.set_tag("longTag", "abcd" * 100)
mlflow.log_params(make_params())
metrics = make_metrics(config.n_steps_per_run)
for batch in batch_metrics(metrics, config.logging_batch_size):
client.log_batch(run.info.run_id, metrics=batch)
for _ in range(config.n_artifacts):
with tempfile.TemporaryDirectory() as temp_path:
artifacts_dir = make_artifacts_dir(
temp_path,
config.n_root_files,
config.n_subdirs,
config.n_subdir_files,
)
mlflow.log_artifact(artifacts_dir)
return mlflow_server
|
MlflowLoggingConfig
|
python
|
ray-project__ray
|
rllib/utils/schedules/piecewise_schedule.py
|
{
"start": 439,
"end": 4172
}
|
class ____(Schedule):
"""Implements a Piecewise Scheduler."""
def __init__(
self,
endpoints: List[Tuple[int, float]],
framework: Optional[str] = None,
interpolation: Callable[
[TensorType, TensorType, TensorType], TensorType
] = _linear_interpolation,
outside_value: Optional[float] = None,
):
"""Initializes a PiecewiseSchedule instance.
Args:
endpoints: A list of tuples
`(t, value)` such that the output
is an interpolation (given by the `interpolation` callable)
between two values.
E.g.
t=400 and endpoints=[(0, 20.0),(500, 30.0)]
output=20.0 + 0.8 * (30.0 - 20.0) = 28.0
NOTE: All the values for time must be sorted in an increasing
order.
framework: The framework descriptor string, e.g. "tf",
"torch", or None.
interpolation: A function that takes the left-value,
the right-value and an alpha interpolation parameter
(0.0=only left value, 1.0=only right value), which is the
fraction of distance from left endpoint to right endpoint.
outside_value: If t in call to `value` is
outside of all the intervals in `endpoints` this value is
returned. If None then an AssertionError is raised when outside
value is requested.
"""
super().__init__(framework=framework)
idxes = [e[0] for e in endpoints]
assert idxes == sorted(idxes)
self.interpolation = interpolation
self.outside_value = outside_value
self.endpoints = [(int(e[0]), float(e[1])) for e in endpoints]
@override(Schedule)
def _value(self, t: TensorType) -> TensorType:
# Find t in our list of endpoints.
for (l_t, l), (r_t, r) in zip(self.endpoints[:-1], self.endpoints[1:]):
# When found, return an interpolation (default: linear).
if l_t <= t < r_t:
alpha = float(t - l_t) / (r_t - l_t)
return self.interpolation(l, r, alpha)
# t does not belong to any of the pieces, return `self.outside_value`.
assert self.outside_value is not None
return self.outside_value
@override(Schedule)
def _tf_value_op(self, t: TensorType) -> TensorType:
assert self.outside_value is not None, (
"tf-version of PiecewiseSchedule requires `outside_value` to be "
"provided!"
)
endpoints = tf.cast(tf.stack([e[0] for e in self.endpoints] + [-1]), tf.int64)
# Create all possible interpolation results.
results_list = []
for (l_t, l), (r_t, r) in zip(self.endpoints[:-1], self.endpoints[1:]):
alpha = tf.cast(t - l_t, tf.float32) / tf.cast(r_t - l_t, tf.float32)
results_list.append(self.interpolation(l, r, alpha))
# If t does not belong to any of the pieces, return `outside_value`.
results_list.append(self.outside_value)
results_list = tf.stack(results_list)
# Return correct results tensor depending on where we find t.
def _cond(i, x):
x = tf.cast(x, tf.int64)
return tf.logical_not(
tf.logical_or(
tf.equal(endpoints[i + 1], -1),
tf.logical_and(endpoints[i] <= x, x < endpoints[i + 1]),
)
)
def _body(i, x):
return (i + 1, t)
idx_and_t = tf.while_loop(_cond, _body, [tf.constant(0, dtype=tf.int64), t])
return results_list[idx_and_t[0]]
|
PiecewiseSchedule
|
python
|
catalyst-team__catalyst
|
catalyst/contrib/datasets/imagenette.py
|
{
"start": 942,
"end": 1470
}
|
class ____(ImageClassificationDataset):
"""
`Imagenette <https://github.com/fastai/imagenette#imagenette-1>`_ Dataset
with images resized so that the shortest size is 320 px.
.. note::
catalyst[cv] required for this dataset.
"""
name = "imagenette2-320"
resources = [
(
"https://s3.amazonaws.com/fast-ai-imageclas/imagenette2-320.tgz",
"3df6f0d01a2c9592104656642f5e78a3",
)
]
__all__ = ["Imagenette", "Imagenette160", "Imagenette320"]
|
Imagenette320
|
python
|
Lightning-AI__lightning
|
tests/tests_pytorch/plugins/test_amp_plugins.py
|
{
"start": 884,
"end": 1944
}
|
class ____(MixedPrecision):
pass
@RunIf(mps=False)
@mock.patch.dict(
os.environ,
{
"CUDA_VISIBLE_DEVICES": "0,1",
"SLURM_NTASKS": "2",
"SLURM_NTASKS_PER_NODE": "1",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"LOCAL_RANK": "0",
"SLURM_PROCID": "0",
"SLURM_LOCALID": "0",
},
)
@pytest.mark.parametrize(("strategy", "devices"), [("ddp", 2), ("ddp_spawn", 2)])
@pytest.mark.parametrize(
("custom_plugin", "plugin_cls"),
[
(False, MixedPrecision),
(True, MyAMP),
],
)
def test_amp_ddp(cuda_count_2, strategy, devices, custom_plugin, plugin_cls):
plugin = None
precision = None
if custom_plugin:
plugin = plugin_cls("16-mixed", "cpu")
else:
precision = "16-mixed"
trainer = Trainer(
fast_dev_run=True,
precision=precision,
accelerator="gpu",
devices=devices,
strategy=strategy,
plugins=plugin,
)
assert isinstance(trainer.precision_plugin, plugin_cls)
|
MyAMP
|
python
|
astropy__astropy
|
astropy/io/fits/diff.py
|
{
"start": 42833,
"end": 45994
}
|
class ____(ImageDataDiff):
"""
`RawDataDiff` is just a special case of `ImageDataDiff` where the images
are one-dimensional, and the data is treated as a 1-dimensional array of
bytes instead of pixel values. This is used to compare the data of two
non-standard extension HDUs that were not recognized as containing image or
table data.
`ImageDataDiff` objects have the following diff attributes:
- ``diff_dimensions``: Same as the ``diff_dimensions`` attribute of
`ImageDataDiff` objects. Though the "dimension" of each array is just an
integer representing the number of bytes in the data.
- ``diff_bytes``: Like the ``diff_pixels`` attribute of `ImageDataDiff`
objects, but renamed to reflect the minor semantic difference that these
are raw bytes and not pixel values. Also the indices are integers
instead of tuples.
- ``diff_total`` and ``diff_ratio``: Same as `ImageDataDiff`.
"""
def __init__(self, a, b, numdiffs=10):
"""
Parameters
----------
a : BaseHDU
An HDU object.
b : BaseHDU
An HDU object to compare to the first HDU object.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
"""
self.diff_dimensions = ()
self.diff_bytes = []
super().__init__(a, b, numdiffs=numdiffs)
def _diff(self):
super()._diff()
if self.diff_dimensions:
self.diff_dimensions = (
self.diff_dimensions[0][0],
self.diff_dimensions[1][0],
)
self.diff_bytes = [(x[0], y) for x, y in self.diff_pixels]
del self.diff_pixels
def _report(self):
if self.diff_dimensions:
self._writeln(" Data sizes differ:")
self._writeln(f" a: {self.diff_dimensions[0]} bytes")
self._writeln(f" b: {self.diff_dimensions[1]} bytes")
# For now we don't do any further comparison if the dimensions
# differ; though in the future it might be nice to be able to
# compare at least where the images intersect
self._writeln(" No further data comparison performed.")
return
if not self.diff_bytes:
return
for index, values in self.diff_bytes:
self._writeln(f" Data differs at byte {index}:")
report_diff_values(
values[0],
values[1],
fileobj=self._fileobj,
indent_width=self._indent + 1,
rtol=self.rtol,
atol=self.atol,
)
self._writeln(" ...")
self._writeln(
f" {self.diff_total} different bytes found "
f"({self.diff_ratio:.2%} different)."
)
|
RawDataDiff
|
python
|
prompt-toolkit__python-prompt-toolkit
|
src/prompt_toolkit/input/base.py
|
{
"start": 2587,
"end": 4030
}
|
class ____(Input):
"""
Input for use in a `DummyApplication`
If used in an actual application, it will make the application render
itself once and exit immediately, due to an `EOFError`.
"""
def fileno(self) -> int:
raise NotImplementedError
def typeahead_hash(self) -> str:
return f"dummy-{id(self)}"
def read_keys(self) -> list[KeyPress]:
return []
@property
def closed(self) -> bool:
# This needs to be true, so that the dummy input will trigger an
# `EOFError` immediately in the application.
return True
def raw_mode(self) -> ContextManager[None]:
return _dummy_context_manager()
def cooked_mode(self) -> ContextManager[None]:
return _dummy_context_manager()
def attach(self, input_ready_callback: Callable[[], None]) -> ContextManager[None]:
# Call the callback immediately once after attaching.
# This tells the callback to call `read_keys` and check the
# `input.closed` flag, after which it won't receive any keys, but knows
# that `EOFError` should be raised. This unblocks `read_from_input` in
# `application.py`.
input_ready_callback()
return _dummy_context_manager()
def detach(self) -> ContextManager[None]:
return _dummy_context_manager()
@contextmanager
def _dummy_context_manager() -> Generator[None, None, None]:
yield
|
DummyInput
|
python
|
getsentry__sentry
|
tests/sentry/runner/commands/test_init.py
|
{
"start": 105,
"end": 1029
}
|
class ____(CliTestCase):
command = init
def test_simple(self) -> None:
with self.runner.isolated_filesystem():
rv = self.invoke("config")
assert rv.exit_code == 0, rv.output
contents = os.listdir("config")
assert set(contents) == {"sentry.conf.py", "config.yml"}
# Make sure the python file is valid
ctx = {"__file__": "sentry.conf.py"}
with open("config/sentry.conf.py") as fp:
exec(fp.read(), ctx)
assert "DEBUG" in ctx
# Make sure the yaml file is valid
from sentry.utils.yaml import safe_load
with open("config/config.yml", "rb") as fp:
ctx = safe_load(fp)
assert "system.secret-key" in ctx
def test_no_directory(self) -> None:
rv = self.invoke("sentry.conf.py")
assert rv.exit_code != 0, rv.output
|
InitTest
|
python
|
google__jax
|
docs/autodidax.py
|
{
"start": 25723,
"end": 25806
}
|
class ____: pass
not_mapped = NotMapped()
BatchAxis = Union[NotMapped, int]
|
NotMapped
|
python
|
wandb__wandb
|
wandb/automations/_filters/run_metrics.py
|
{
"start": 4931,
"end": 6969
}
|
class ____(BaseMetricFilter): # from: RunMetricChangeFilter
"""Filter that compares a **change** in a metric value to a user-defined threshold.
The change is calculated over "tumbling" windows, i.e. the difference
between the current window and the non-overlapping prior window.
"""
name: str
agg: Annotated[Optional[Agg], Field(alias="agg_op")] = None
window: Annotated[PositiveInt, Field(alias="current_window_size")] = 1
# `prior_window` is only for `RUN_METRIC_CHANGE` events
prior_window: Annotated[
PositiveInt,
# By default, set `window -> prior_window` if the latter wasn't provided.
Field(alias="prior_window_size", default_factory=lambda data: data["window"]),
]
"""Size of the "prior" metric aggregation window (ignored if `agg` is ``None``).
If omitted, defaults to the size of the current window.
"""
# ------------------------------------------------------------------------------
# NOTE:
# - The "comparison" operator isn't actually part of the backend schema,
# but it's defined here for consistency -- and ignored otherwise.
# - In the backend, it's effectively "$gte" or "$lte", depending on the sign
# (change_dir), though again, this is not explicit in the schema.
cmp: Annotated[None, Field(frozen=True, exclude=True, repr=False)] = None
"""Ignored."""
# ------------------------------------------------------------------------------
change_type: ChangeType
change_dir: ChangeDir
threshold: Annotated[PosNum, Field(alias="change_amount")]
def __repr__(self) -> str:
metric = f"{self.agg.value}({self.name})" if self.agg else self.name
verb = (
"changes"
if (self.change_dir is ChangeDir.ANY)
else f"{self.change_dir.value.lower()}s"
)
fmt_spec = ".2%" if (self.change_type is ChangeType.REL) else ""
amt = f"{self.threshold:{fmt_spec}}"
return repr(rf"{metric} {verb} {amt}")
|
MetricChangeFilter
|
python
|
rapidsai__cudf
|
cpp/scripts/gdb-pretty-printers.py
|
{
"start": 547,
"end": 1000
}
|
class ____(gdb.printing.PrettyPrinter):
"""Print a cudf::host_span"""
def __init__(self, val):
self.val = val
self.pointer = val["_data"]
self.size = int(val["_size"])
def children(self):
return HostIterator(self.pointer, self.size)
def to_string(self):
return f"{self.val.type} of length {self.size} at {hex(self.pointer)}"
def display_hint(self):
return "array"
|
CudfHostSpanPrinter
|
python
|
google__jax
|
jax/_src/pallas/mosaic_gpu/lowering.py
|
{
"start": 6690,
"end": 12954
}
|
class ____(Protocol):
def __call__(
self, ctx: ResourceEstimatorContext, *args: Any, **params: Any
) -> Resources:
...
_resource_estimators: dict[jax_core.Primitive, ResourceEstimator] = {}
def _register_resource_estimator(primitive: jax_core.Primitive):
def deco(fn):
_resource_estimators[primitive] = fn
return fn
return deco
def _estimate_resources(
ctx: ResourceEstimatorContext, jaxpr: jax_core.Jaxpr
) -> Resources:
"""Estimates the resources required by the kernel."""
rs = Resources(smem_scratch_bytes=0)
for eqn in jaxpr.eqns:
# TODO(slebedev): Add support for other primitives, notably control flow.
if rule := _resource_estimators.get(eqn.primitive):
rs = rs.or_(
rule(ctx, *(invar.aval for invar in eqn.invars), **eqn.params),
ctx.axis_names,
)
continue
# Assume that unsupported primitives are neutral wrt resource usage,
# unless they have a jaxpr in their params.
if any(
isinstance(v, (jax_core.Jaxpr, jax_core.ClosedJaxpr))
for v in eqn.params.values()
):
raise NotImplementedError(
f"Resource estimation does not support {eqn.primitive}"
)
return rs
@_register_resource_estimator(lax.cond_p)
def _cond_resource_estimator(
ctx: ResourceEstimatorContext, *args, branches
) -> Resources:
del args # Unused.
return functools.reduce(
lambda a, b: a.or_(b, ctx.axis_names),
(_estimate_resources(ctx, branch.jaxpr) for branch in branches),
)
@_register_resource_estimator(lax.scan_p)
def _scan_resource_estimator(
ctx: ResourceEstimatorContext, *args, jaxpr: jax_core.ClosedJaxpr, **params
) -> Resources:
del args, params # Unused.
return _estimate_resources(ctx, jaxpr.jaxpr)
@_register_resource_estimator(lax.while_p)
def _while_resource_estimator(
ctx: ResourceEstimatorContext,
*args,
cond_jaxpr: jax_core.ClosedJaxpr,
body_jaxpr: jax_core.ClosedJaxpr,
**params,
) -> Resources:
del args, params # Unused.
return _estimate_resources(ctx, cond_jaxpr.jaxpr).or_(
_estimate_resources(ctx, body_jaxpr.jaxpr), ctx.axis_names
)
@_register_resource_estimator(pjit.jit_p)
def _pjit_resource_estimator(
ctx: ResourceEstimatorContext,
*args,
jaxpr: jax_core.ClosedJaxpr,
**params,
) -> Resources:
del args, params # Unused.
return _estimate_resources(ctx, jaxpr.jaxpr)
@_register_resource_estimator(pallas_core.core_map_p)
def _core_map_resource_estimator(
ctx: ResourceEstimatorContext, *args, jaxpr: jax_core.Jaxpr, **params
) -> Resources:
del args, params # Unused.
return _estimate_resources(ctx, jaxpr)
@_register_resource_estimator(discharge.run_state_p)
def _run_state_resource_estimator(
ctx: ResourceEstimatorContext, *args, jaxpr: jax_core.Jaxpr, **params
) -> Resources:
del args, params # Unused.
return _estimate_resources(ctx, jaxpr)
@_register_resource_estimator(primitives.run_scoped_p)
def _run_scoped_resource_estimator(
ctx: ResourceEstimatorContext,
*consts,
jaxpr: jax_core.Jaxpr,
collective_axes,
) -> Resources:
# NOTE: This rule assumes that the allocation happens collectively, although
# it can't be checked here due to limited context. We check this in the actual
# lowering rule.
del consts # Unused.
rs = Resources()
for v in jaxpr.invars:
aval = cast(ShapedAbstractValue, v.aval)
if isinstance(aval.dtype, gpu_core.BarrierType):
multiplier = 1 if aval.dtype.orders_tensor_core else ctx.arrival_multiplier
rs += Resources(
barrier_counts=collections.Counter([
mgpu.Barrier(
aval.dtype.num_arrivals * multiplier, *aval.shape
)
])
)
continue
if isinstance(aval.dtype, gpu_core.ClusterBarrierType):
collective_dims = jax.tree.map(
lambda axis: _resolve_cluster_axis(ctx.axis_names, axis),
aval.dtype.collective_axes,
)
rs += Resources(
barrier_counts=collections.Counter(
[mgpu.ClusterBarrier(collective_dims, aval.dtype.num_arrivals, *aval.shape)]
)
)
continue
assert isinstance(aval, state_types.AbstractRef)
if aval.memory_space == gpu_core.TMEM:
if len(aval.shape) != 2:
raise ValueError(f"TMEM allocations must be 2D. Got {aval.shape}")
# Estimate columns used.
if isinstance(aval, gpu_core.AbstractRefUnion):
assert aval.shape[0] == 128
cols_used = aval.shape[1]
else:
cols_used = aval.layout.cols_in_shape(
aval.shape, dtypes.itemsize_bits(aval.dtype)
)
if aval.collective:
rs += Resources(tmem_collective_scratch_cols=cols_used)
else:
rs += Resources(tmem_scratch_cols=cols_used)
elif aval.memory_space == gpu_core.SMEM:
rs += Resources(
smem_scratch_bytes=aval.size * dtypes.itemsize_bits(aval.dtype) // 8
)
elif aval.memory_space == gpu_core.REGS:
# Don't need to allocate anything.
pass
elif aval.memory_space == gpu_core.GMEM and jnp.issubdtype(aval.dtype, pallas_core.semaphore):
if _is_block_local_scope(collective_axes, ctx.axis_names):
rs += Resources(scoped_gmem_semaphores={collective_axes: aval.size})
else:
raise ValueError(
"Only thread-collective allocations are supported in run_scoped. To"
" allocate global semaphores, use pl.get_global."
)
else:
raise NotImplementedError(
f"Unsupported memory space: {aval.memory_space}")
return rs + _estimate_resources(ctx, jaxpr)
REDUCE_SCRATCH_ELEMS = 128 * 2 # vector of 2 elements per lane in each WG
@_register_resource_estimator(lax.reduce_sum_p)
@_register_resource_estimator(lax.reduce_max_p)
def _reduce_resource_estimator(
ctx: ResourceEstimatorContext, x_aval: jax_core.ShapedArray, *, axes,
**kwargs
) -> Resources:
del ctx, axes # Unused.
# We don't need SMEM for some reductions, but it depends on the layout, so we
# conservatively request the maximum scratch space we might need.
return Resources(smem_scratch_bytes=REDUCE_SCRATCH_ELEMS * x_aval.dtype.itemsize)
@dataclasses.dataclass(frozen=True)
|
ResourceEstimator
|
python
|
encode__httpx
|
httpx/_exceptions.py
|
{
"start": 3754,
"end": 3848
}
|
class ____(NetworkError):
"""
Failed to receive data from the network.
"""
|
ReadError
|
python
|
scikit-learn__scikit-learn
|
sklearn/ensemble/_bagging.py
|
{
"start": 8550,
"end": 23682
}
|
class ____(BaseEnsemble, metaclass=ABCMeta):
"""Base class for Bagging meta-estimator.
Warning: This class should not be used directly. Use derived classes
instead.
"""
_parameter_constraints: dict = {
"estimator": [HasMethods(["fit", "predict"]), None],
"n_estimators": [Interval(Integral, 1, None, closed="left")],
"max_samples": [
Interval(Integral, 1, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="right"),
],
"max_features": [
Interval(Integral, 1, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="right"),
],
"bootstrap": ["boolean"],
"bootstrap_features": ["boolean"],
"oob_score": ["boolean"],
"warm_start": ["boolean"],
"n_jobs": [None, Integral],
"random_state": ["random_state"],
"verbose": ["verbose"],
}
@abstractmethod
def __init__(
self,
estimator=None,
n_estimators=10,
*,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
oob_score=False,
warm_start=False,
n_jobs=None,
random_state=None,
verbose=0,
):
super().__init__(
estimator=estimator,
n_estimators=n_estimators,
)
self.max_samples = max_samples
self.max_features = max_features
self.bootstrap = bootstrap
self.bootstrap_features = bootstrap_features
self.oob_score = oob_score
self.warm_start = warm_start
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
@_fit_context(
# BaseBagging.estimator is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y, sample_weight=None, **fit_params):
"""Build a Bagging ensemble of estimators from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
y : array-like of shape (n_samples,)
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Used as
probabilities to sample the training set. Note that the expected
frequency semantics for the `sample_weight` parameter are only
fulfilled when sampling with replacement `bootstrap=True`.
**fit_params : dict
Parameters to pass to the underlying estimators.
.. versionadded:: 1.5
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Fitted estimator.
"""
_raise_for_params(fit_params, self, "fit")
# Convert data (X is required to be 2d and indexable)
X, y = validate_data(
self,
X,
y,
accept_sparse=["csr", "csc"],
dtype=None,
ensure_all_finite=False,
multi_output=True,
)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=None)
if not self.bootstrap:
warn(
f"When fitting {self.__class__.__name__} with sample_weight "
f"it is recommended to use bootstrap=True, got {self.bootstrap}."
)
return self._fit(
X,
y,
max_samples=self.max_samples,
sample_weight=sample_weight,
**fit_params,
)
def _parallel_args(self):
return {}
def _fit(
self,
X,
y,
max_samples=None,
max_depth=None,
check_input=True,
sample_weight=None,
**fit_params,
):
"""Build a Bagging ensemble of estimators from the training
set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
y : array-like of shape (n_samples,)
The target values (class labels in classification, real numbers in
regression).
max_samples : int or float, default=None
Argument to use instead of self.max_samples.
max_depth : int, default=None
Override value used when constructing base estimator. Only
supported if the base estimator has a max_depth parameter.
check_input : bool, default=True
Override value used when fitting base estimator. Only supported
if the base estimator has a check_input parameter for fit function.
If the meta-estimator already checks the input, set this value to
False to prevent redundant input validation.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
**fit_params : dict, default=None
Parameters to pass to the :term:`fit` method of the underlying
estimator.
Returns
-------
self : object
Fitted estimator.
"""
random_state = check_random_state(self.random_state)
# Remap output
n_samples = X.shape[0]
self._n_samples = n_samples
y = self._validate_y(y)
# Check parameters
self._validate_estimator(self._get_estimator())
if _routing_enabled():
routed_params = process_routing(self, "fit", **fit_params)
else:
routed_params = Bunch()
routed_params.estimator = Bunch(fit=fit_params)
if max_depth is not None:
self.estimator_.max_depth = max_depth
# Validate max_samples
if max_samples is None:
max_samples = self.max_samples
if not isinstance(max_samples, numbers.Integral):
if sample_weight is None:
max_samples = max(int(max_samples * X.shape[0]), 1)
else:
sw_sum = np.sum(sample_weight)
if sw_sum <= 1:
raise ValueError(
f"The total sum of sample weights is {sw_sum}, which prevents "
"resampling with a fractional value for max_samples="
f"{max_samples}. Either pass max_samples as an integer or "
"use a larger sample_weight."
)
max_samples = max(int(max_samples * sw_sum), 1)
if not self.bootstrap and max_samples > X.shape[0]:
raise ValueError(
f"Effective max_samples={max_samples} must be <= n_samples="
f"{X.shape[0]} to be able to sample without replacement."
)
# Store validated integer row sampling value
self._max_samples = max_samples
# Validate max_features
if isinstance(self.max_features, numbers.Integral):
max_features = self.max_features
elif isinstance(self.max_features, float):
max_features = int(self.max_features * self.n_features_in_)
if max_features > self.n_features_in_:
raise ValueError("max_features must be <= n_features")
max_features = max(1, int(max_features))
# Store validated integer feature sampling value
self._max_features = max_features
# Store sample_weight (needed in _get_estimators_indices). Note that
# we intentionally do not materialize `sample_weight=None` as an array
# of ones to avoid unnecessarily cluttering trained estimator pickles.
self._sample_weight = sample_weight
# Other checks
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available if bootstrap=True")
if self.warm_start and self.oob_score:
raise ValueError("Out of bag estimate only available if warm_start=False")
if hasattr(self, "oob_score_") and self.warm_start:
del self.oob_score_
if not self.warm_start or not hasattr(self, "estimators_"):
# Free allocated memory, if any
self.estimators_ = []
self.estimators_features_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError(
"n_estimators=%d must be larger or equal to "
"len(estimators_)=%d when warm_start==True"
% (self.n_estimators, len(self.estimators_))
)
elif n_more_estimators == 0:
warn(
"Warm-start fitting without increasing n_estimators does not "
"fit new trees."
)
return self
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(
n_more_estimators, self.n_jobs
)
total_n_estimators = sum(n_estimators)
# Advance random state to state after training
# the first n_estimators
if self.warm_start and len(self.estimators_) > 0:
random_state.randint(MAX_INT, size=len(self.estimators_))
seeds = random_state.randint(MAX_INT, size=n_more_estimators)
self._seeds = seeds
all_results = Parallel(
n_jobs=n_jobs, verbose=self.verbose, **self._parallel_args()
)(
delayed(_parallel_build_estimators)(
n_estimators[i],
self,
X,
y,
sample_weight,
seeds[starts[i] : starts[i + 1]],
total_n_estimators,
verbose=self.verbose,
check_input=check_input,
fit_params=routed_params.estimator.fit,
)
for i in range(n_jobs)
)
# Reduce
self.estimators_ += list(
itertools.chain.from_iterable(t[0] for t in all_results)
)
self.estimators_features_ += list(
itertools.chain.from_iterable(t[1] for t in all_results)
)
if self.oob_score:
self._set_oob_score(X, y)
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y(self, y):
if len(y.shape) == 1 or y.shape[1] == 1:
return column_or_1d(y, warn=True)
return y
def _get_estimators_indices(self):
# Get drawn indices along both sample and feature axes
for seed in self._seeds:
# Operations accessing random_state must be performed identically
# to those in `_parallel_build_estimators()`
feature_indices, sample_indices = _generate_bagging_indices(
seed,
self.bootstrap_features,
self.bootstrap,
self.n_features_in_,
self._n_samples,
self._max_features,
self._max_samples,
self._sample_weight,
)
yield feature_indices, sample_indices
@property
def estimators_samples_(self):
"""
The subset of drawn samples for each base estimator.
Returns a dynamically generated list of indices identifying
the samples used for fitting each member of the ensemble, i.e.,
the in-bag samples.
Note: the list is re-created at each call to the property in order
to reduce the object memory footprint by not storing the sampling
data. Thus fetching the property may be slower than expected.
"""
return [sample_indices for _, sample_indices in self._get_estimators_indices()]
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.5
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self)
method_mapping = MethodMapping()
method_mapping.add(caller="fit", callee="fit").add(
caller="decision_function", callee="decision_function"
)
# the router needs to be built depending on whether the sub-estimator has a
# `predict_proba` method (as BaggingClassifier decides dynamically at runtime):
if hasattr(self._get_estimator(), "predict_proba"):
(
method_mapping.add(caller="predict", callee="predict_proba").add(
caller="predict_proba", callee="predict_proba"
)
)
else:
(
method_mapping.add(caller="predict", callee="predict").add(
caller="predict_proba", callee="predict"
)
)
# the router needs to be built depending on whether the sub-estimator has a
# `predict_log_proba` method (as BaggingClassifier decides dynamically at
# runtime):
if hasattr(self._get_estimator(), "predict_log_proba"):
method_mapping.add(caller="predict_log_proba", callee="predict_log_proba")
else:
# if `predict_log_proba` is not available in BaggingClassifier's
# sub-estimator, the routing should go to its `predict_proba` if it is
# available or else to its `predict` method; according to how
# `sample_weight` is passed to the respective methods dynamically at
# runtime:
if hasattr(self._get_estimator(), "predict_proba"):
method_mapping.add(caller="predict_log_proba", callee="predict_proba")
else:
method_mapping.add(caller="predict_log_proba", callee="predict")
router.add(estimator=self._get_estimator(), method_mapping=method_mapping)
return router
@abstractmethod
def _get_estimator(self):
"""Resolve which estimator to return."""
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = get_tags(self._get_estimator()).input_tags.sparse
tags.input_tags.allow_nan = get_tags(self._get_estimator()).input_tags.allow_nan
return tags
|
BaseBagging
|
python
|
django-guardian__django-guardian
|
example_project/articles/tests.py
|
{
"start": 372,
"end": 3840
}
|
class ____(TestCase):
def setUp(self):
self.article = Article.objects.create(title="foo-title", slug="foo-slug", content="bar-content")
self.factory = RequestFactory()
self.user = get_user_model().objects.create_user("joe", "joe@doe.com", "doe")
self.client.login(username="joe", password="doe")
def test_list_permitted(self):
request = self.factory.get("/")
request.user = self.user
assign_perm("articles.view_article", self.user, self.article)
assign_perm("articles.delete_article", self.user, self.article)
view = ArticleListView.as_view()
response = view(request)
response.render()
self.assertContains(response, "foo-title")
def test_list_denied(self):
request = self.factory.get("/")
request.user = self.user
view = ArticleListView.as_view()
response = view(request)
response.render()
self.assertNotContains(response, "foo-title")
def test_create_permitted(self):
request = self.factory.get("/~create")
request.user = self.user
assign_perm("articles.add_article", self.user)
view = ArticleCreateView.as_view()
response = view(request)
self.assertEqual(response.status_code, 200)
def test_create_denied(self):
request = self.factory.get("/~create")
request.user = self.user
view = ArticleCreateView.as_view()
response = view(request)
self.assertEqual(response.status_code, 302)
def test_detail_permitted(self):
request = self.factory.get("/foo/")
request.user = self.user
assign_perm("articles.view_article", self.user, self.article)
view = ArticleDetailView.as_view()
response = view(request, slug="foo-slug")
self.assertEqual(response.status_code, 200)
def test_detail_denied(self):
request = self.factory.get("/foo/")
request.user = self.user
view = ArticleDetailView.as_view()
response = view(request, slug="foo-slug")
self.assertEqual(response.status_code, 302)
def test_update_permitted(self):
request = self.factory.get("/")
request.user = self.user
assign_perm("articles.view_article", self.user, self.article)
assign_perm("articles.change_article", self.user, self.article)
view = ArticleUpdateView.as_view()
response = view(request, slug="foo-slug")
self.assertEqual(response.status_code, 200)
def test_update_denied(self):
request = self.factory.get("/")
request.user = self.user
view = ArticleUpdateView.as_view()
response = view(request, slug="foo-slug")
self.assertEqual(response.status_code, 302)
def test_delete_permitted(self):
request = self.factory.get("/foo-slug/~delete")
request.user = self.user
assign_perm("articles.view_article", self.user, self.article)
assign_perm("articles.delete_article", self.user, self.article)
view = ArticleDeleteView.as_view()
response = view(request, slug="foo-slug")
self.assertEqual(response.status_code, 200)
def test_delete_denied(self):
request = self.factory.get("/foo/~delete")
request.user = self.user
view = ArticleDeleteView.as_view()
response = view(request, slug="foo-slug")
self.assertEqual(response.status_code, 302)
|
ViewUserTestCase
|
python
|
getsentry__sentry
|
src/sentry/runner/commands/backup.py
|
{
"start": 7733,
"end": 8059
}
|
class ____(Printer):
"""
A printer that only asks for confirmations, and is otherwise silent.
"""
def confirm(
self,
text: str,
*,
default: bool | None = None,
err: bool = False,
) -> bool:
return click.confirm(text, default=default, err=err)
|
InputOnlyPrinter
|
python
|
zarr-developers__zarr-python
|
src/zarr/core/indexing.py
|
{
"start": 41324,
"end": 46673
}
|
class ____(Indexer):
sel_shape: tuple[int, ...]
selection: CoordinateSelectionNormalized
sel_sort: npt.NDArray[np.intp] | None
chunk_nitems_cumsum: npt.NDArray[np.intp]
chunk_rixs: npt.NDArray[np.intp]
chunk_mixs: tuple[npt.NDArray[np.intp], ...]
shape: tuple[int, ...]
chunk_shape: tuple[int, ...]
drop_axes: tuple[int, ...]
def __init__(
self, selection: CoordinateSelection, shape: tuple[int, ...], chunk_grid: ChunkGrid
) -> None:
chunk_shape = get_chunk_shape(chunk_grid)
cdata_shape: tuple[int, ...]
if shape == ():
cdata_shape = (1,)
else:
cdata_shape = tuple(math.ceil(s / c) for s, c in zip(shape, chunk_shape, strict=True))
nchunks = reduce(operator.mul, cdata_shape, 1)
# some initial normalization
selection_normalized = cast("CoordinateSelectionNormalized", ensure_tuple(selection))
selection_normalized = tuple(
np.asarray([i]) if is_integer(i) else i for i in selection_normalized
)
selection_normalized = cast(
"CoordinateSelectionNormalized", replace_lists(selection_normalized)
)
# validation
if not is_coordinate_selection(selection_normalized, shape):
raise IndexError(
"invalid coordinate selection; expected one integer "
"(coordinate) array per dimension of the target array, "
f"got {selection!r}"
)
# handle wraparound, boundscheck
for dim_sel, dim_len in zip(selection_normalized, shape, strict=True):
# handle wraparound
wraparound_indices(dim_sel, dim_len)
# handle out of bounds
boundscheck_indices(dim_sel, dim_len)
# compute chunk index for each point in the selection
chunks_multi_index = tuple(
dim_sel // dim_chunk_len
for (dim_sel, dim_chunk_len) in zip(selection_normalized, chunk_shape, strict=True)
)
# broadcast selection - this will raise error if array dimensions don't match
selection_broadcast = tuple(np.broadcast_arrays(*selection_normalized))
chunks_multi_index_broadcast = np.broadcast_arrays(*chunks_multi_index)
# remember shape of selection, because we will flatten indices for processing
sel_shape = selection_broadcast[0].shape or (1,)
# flatten selection
selection_broadcast = tuple(dim_sel.reshape(-1) for dim_sel in selection_broadcast)
chunks_multi_index_broadcast = tuple(
dim_chunks.reshape(-1) for dim_chunks in chunks_multi_index_broadcast
)
# ravel chunk indices
chunks_raveled_indices = np.ravel_multi_index(
chunks_multi_index_broadcast, dims=cdata_shape
)
# group points by chunk
if np.any(np.diff(chunks_raveled_indices) < 0):
# optimisation, only sort if needed
sel_sort = np.argsort(chunks_raveled_indices)
selection_broadcast = tuple(dim_sel[sel_sort] for dim_sel in selection_broadcast)
else:
sel_sort = None
shape = selection_broadcast[0].shape or (1,)
# precompute number of selected items for each chunk
chunk_nitems = np.bincount(chunks_raveled_indices, minlength=nchunks)
chunk_nitems_cumsum = np.cumsum(chunk_nitems)
# locate the chunks we need to process
chunk_rixs = np.nonzero(chunk_nitems)[0]
# unravel chunk indices
chunk_mixs = np.unravel_index(chunk_rixs, cdata_shape)
object.__setattr__(self, "sel_shape", sel_shape)
object.__setattr__(self, "selection", selection_broadcast)
object.__setattr__(self, "sel_sort", sel_sort)
object.__setattr__(self, "chunk_nitems_cumsum", chunk_nitems_cumsum)
object.__setattr__(self, "chunk_rixs", chunk_rixs)
object.__setattr__(self, "chunk_mixs", chunk_mixs)
object.__setattr__(self, "chunk_shape", chunk_shape)
object.__setattr__(self, "shape", shape)
object.__setattr__(self, "drop_axes", ())
def __iter__(self) -> Iterator[ChunkProjection]:
# iterate over chunks
for i, chunk_rix in enumerate(self.chunk_rixs):
chunk_coords = tuple(m[i] for m in self.chunk_mixs)
if chunk_rix == 0:
start = 0
else:
start = self.chunk_nitems_cumsum[chunk_rix - 1]
stop = self.chunk_nitems_cumsum[chunk_rix]
out_selection: slice | npt.NDArray[np.intp]
if self.sel_sort is None:
out_selection = slice(start, stop)
else:
out_selection = self.sel_sort[start:stop]
chunk_offsets = tuple(
dim_chunk_ix * dim_chunk_len
for dim_chunk_ix, dim_chunk_len in zip(chunk_coords, self.chunk_shape, strict=True)
)
chunk_selection = tuple(
dim_sel[start:stop] - dim_chunk_offset
for (dim_sel, dim_chunk_offset) in zip(self.selection, chunk_offsets, strict=True)
)
is_complete_chunk = False # TODO
yield ChunkProjection(chunk_coords, chunk_selection, out_selection, is_complete_chunk)
@dataclass(frozen=True)
|
CoordinateIndexer
|
python
|
scipy__scipy
|
scipy/interpolate/tests/test_bsplines.py
|
{
"start": 65121,
"end": 71946
}
|
class ____:
#
# Test make_lsq_spline
#
rng = np.random.RandomState(1234)
n, k = 13, 3
x = np.sort(rng.random(n))
y = rng.random(n)
t = _augknt(np.linspace(x[0], x[-1], 7), k)
@parametrize_lsq_methods
def test_lstsq(self, method):
# check LSQ construction vs a full matrix version
x, y, t, k = self.x, self.y, self.t, self.k
c0, AY = make_lsq_full_matrix(x, y, t, k)
b = make_lsq_spline(x, y, t, k, method=method)
xp_assert_close(b.c, c0)
assert b.c.shape == (t.size - k - 1,)
# also check against numpy.lstsq
aa, yy = AY
c1, _, _, _ = np.linalg.lstsq(aa, y, rcond=-1)
xp_assert_close(b.c, c1)
@parametrize_lsq_methods
def test_weights(self, method, xp):
# weights = 1 is same as None
x, y, t, k = *map(xp.asarray, (self.x, self.y, self.t)), self.k
w = xp.ones_like(x)
b = make_lsq_spline(x, y, t, k, method=method)
b_w = make_lsq_spline(x, y, t, k, w=w, method=method)
xp_assert_close(b.t, b_w.t, atol=1e-14)
xp_assert_close(b.c, b_w.c, atol=1e-14)
assert b.k == b_w.k
def test_weights_same(self, xp):
# both methods treat weights
x, y, t, k = *map(xp.asarray, (self.x, self.y, self.t)), self.k
w = np.random.default_rng(1234).uniform(size=x.shape[0])
w = xp.asarray(w)
b_ne = make_lsq_spline(x, y, t, k, w=w, method="norm-eq")
b_qr = make_lsq_spline(x, y, t, k, w=w, method="qr")
b_no_w = make_lsq_spline(x, y, t, k, method="qr")
xp_assert_close(b_ne.c, b_qr.c, atol=1e-14)
assert not xp.all(xp.abs(b_no_w.c - b_qr.c) < 1e-14)
@parametrize_lsq_methods
def test_multiple_rhs(self, method, xp):
x, t, k, n = *map(xp.asarray, (self.x, self.t)), self.k, self.n
rng = np.random.RandomState(1234)
y = rng.random(size=(n, 5, 6, 7))
y = xp.asarray(y)
b = make_lsq_spline(x, y, t, k, method=method)
assert b.c.shape == (t.shape[0] - k - 1, 5, 6, 7)
@parametrize_lsq_methods
def test_multiple_rhs_2(self, method, xp):
x, t, k, n = *map(xp.asarray, (self.x, self.t)), self.k, self.n
nrhs = 3
rng = np.random.RandomState(1234)
y = rng.random(size=(n, nrhs))
y = xp.asarray(y)
b = make_lsq_spline(x, y, t, k, method=method)
bb = [make_lsq_spline(x, y[:, i], t, k, method=method)
for i in range(nrhs)]
coefs = xp.stack([bb[i].c for i in range(nrhs)]).T
xp_assert_close(coefs, b.c, atol=1e-15)
def test_multiple_rhs_3(self, xp):
x, t, k, n = *map(xp.asarray, (self.x, self.t)), self.k, self.n
nrhs = 3
y = np.random.random(size=(n, nrhs))
y = xp.asarray(y)
b_qr = make_lsq_spline(x, y, t, k, method="qr")
b_neq = make_lsq_spline(x, y, t, k, method="norm-eq")
xp_assert_close(b_qr.c, b_neq.c, atol=1e-15)
@parametrize_lsq_methods
def test_complex(self, method, xp):
# cmplx-valued `y`
x, t, k = *map(xp.asarray, (self.x, self.t)), self.k
yc = xp.asarray(self.y * (1. + 2.j))
b = make_lsq_spline(x, yc, t, k, method=method)
b_re = make_lsq_spline(x, xp.real(yc), t, k, method=method)
b_im = make_lsq_spline(x, xp.imag(yc), t, k, method=method)
xp_assert_close(b(x), b_re(x) + 1.j*b_im(x), atol=1e-15, rtol=1e-15)
def test_complex_2(self, xp):
# test complex-valued y with y.ndim > 1
x, t, k = *map(xp.asarray, (self.x, self.t)), self.k
yc = xp.asarray(self.y * (1. + 2.j))
yc = xp.stack((yc, yc), axis=1)
b = make_lsq_spline(x, yc, t, k)
b_re = make_lsq_spline(x, xp.real(yc), t, k)
b_im = make_lsq_spline(x, xp.imag(yc), t, k)
xp_assert_close(b(x), b_re(x) + 1.j*b_im(x), atol=1e-15, rtol=1e-15)
# repeat with num_trailing_dims > 1 : yc.shape[1:] = (2, 2)
yc = xp.stack((yc, yc), axis=1)
b = make_lsq_spline(x, yc, t, k)
b_re = make_lsq_spline(x, xp.real(yc), t, k)
b_im = make_lsq_spline(x, xp.imag(yc), t, k)
xp_assert_close(b(x), b_re(x) + 1.j*b_im(x), atol=1e-15, rtol=1e-15)
@parametrize_lsq_methods
def test_int_xy(self, method):
x = np.arange(10).astype(int)
y = np.arange(10).astype(int)
t = _augknt(x, k=1)
# Cython chokes on "buffer type mismatch"
make_lsq_spline(x, y, t, k=1, method=method)
@parametrize_lsq_methods
def test_f32_xy(self, method):
x = np.arange(10, dtype=np.float32)
y = np.arange(10, dtype=np.float32)
t = _augknt(x, k=1)
spl_f32 = make_lsq_spline(x, y, t, k=1, method=method)
spl_f64 = make_lsq_spline(
x.astype(float), y.astype(float), t.astype(float), k=1, method=method
)
x2 = (x[1:] + x[:-1]) / 2.0
xp_assert_close(spl_f32(x2), spl_f64(x2), atol=1e-15)
@parametrize_lsq_methods
def test_sliced_input(self, method):
# Cython code chokes on non C contiguous arrays
xx = np.linspace(-1, 1, 100)
x = xx[::3]
y = xx[::3]
t = _augknt(x, 1)
make_lsq_spline(x, y, t, k=1, method=method)
@parametrize_lsq_methods
def test_checkfinite(self, method):
# check_finite defaults to True; nans and such trigger a ValueError
x = np.arange(12).astype(float)
y = x**2
t = _augknt(x, 3)
for z in [np.nan, np.inf, -np.inf]:
y[-1] = z
assert_raises(ValueError, make_lsq_spline, x, y, t, method=method)
@parametrize_lsq_methods
def test_read_only(self, method):
# Check that make_lsq_spline works with read only arrays
x, y, t = self.x, self.y, self.t
x.setflags(write=False)
y.setflags(write=False)
t.setflags(write=False)
make_lsq_spline(x=x, y=y, t=t, method=method)
@pytest.mark.parametrize('k', list(range(1, 7)))
def test_qr_vs_norm_eq(self, k):
# check that QR and normal eq solutions match
x, y = self.x, self.y
t = _augknt(np.linspace(x[0], x[-1], 7), k)
spl_norm_eq = make_lsq_spline(x, y, t, k=k, method='norm-eq')
spl_qr = make_lsq_spline(x, y, t, k=k, method='qr')
xx = (x[1:] + x[:-1]) / 2.0
xp_assert_close(spl_norm_eq(xx), spl_qr(xx), atol=1e-15)
def test_duplicates(self):
# method="qr" can handle duplicated data points
x = np.repeat(self.x, 2)
y = np.repeat(self.y, 2)
spl_1 = make_lsq_spline(self.x, self.y, self.t, k=3, method='qr')
spl_2 = make_lsq_spline(x, y, self.t, k=3, method='qr')
xx = (x[1:] + x[:-1]) / 2.0
xp_assert_close(spl_1(xx), spl_2(xx), atol=1e-15)
|
TestLSQ
|
python
|
pytorch__pytorch
|
test/torch_np/numpy_tests/core/test_numeric.py
|
{
"start": 26880,
"end": 34529
}
|
class ____(TestCase):
def check_promotion_cases(self, promote_func):
# tests that the scalars get coerced correctly.
b = np.bool_(0)
i8, i16, i32, i64 = np.int8(0), np.int16(0), np.int32(0), np.int64(0)
u8 = np.uint8(0)
f32, f64 = np.float32(0), np.float64(0)
c64, c128 = np.complex64(0), np.complex128(0)
# coercion within the same kind
assert_equal(promote_func(i8, i16), np.dtype(np.int16))
assert_equal(promote_func(i32, i8), np.dtype(np.int32))
assert_equal(promote_func(i16, i64), np.dtype(np.int64))
assert_equal(promote_func(f32, f64), np.dtype(np.float64))
assert_equal(promote_func(c128, c64), np.dtype(np.complex128))
# coercion between kinds
assert_equal(promote_func(b, i32), np.dtype(np.int32))
assert_equal(promote_func(b, u8), np.dtype(np.uint8))
assert_equal(promote_func(i8, u8), np.dtype(np.int16))
assert_equal(promote_func(u8, i32), np.dtype(np.int32))
assert_equal(promote_func(f32, i16), np.dtype(np.float32))
assert_equal(promote_func(f32, c64), np.dtype(np.complex64))
assert_equal(promote_func(c128, f32), np.dtype(np.complex128))
# coercion between scalars and 1-D arrays
assert_equal(promote_func(np.array([b]), i8), np.dtype(np.int8))
assert_equal(promote_func(np.array([b]), u8), np.dtype(np.uint8))
assert_equal(promote_func(np.array([b]), i32), np.dtype(np.int32))
assert_equal(promote_func(c64, np.array([f64])), np.dtype(np.complex128))
assert_equal(
promote_func(np.complex64(3j), np.array([f64])), np.dtype(np.complex128)
)
# coercion between scalars and 1-D arrays, where
# the scalar has greater kind than the array
assert_equal(promote_func(np.array([b]), f64), np.dtype(np.float64))
assert_equal(promote_func(np.array([b]), i64), np.dtype(np.int64))
assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64))
def check_promotion_cases_2(self, promote_func):
# these are failing because of the "scalars do not upcast arrays" rule
# Two first tests (i32 + f32 -> f64, and i64+f32 -> f64) xfail
# until ufuncs implement the proper type promotion (ufunc loops?)
i8, i32, i64 = np.int8(0), np.int32(0), np.int64(0)
f32, f64 = np.float32(0), np.float64(0)
c128 = np.complex128(0)
assert_equal(promote_func(i32, f32), np.dtype(np.float64))
assert_equal(promote_func(i64, f32), np.dtype(np.float64))
assert_equal(promote_func(np.array([i8]), i64), np.dtype(np.int8))
assert_equal(promote_func(f64, np.array([f32])), np.dtype(np.float32))
# float and complex are treated as the same "kind" for
# the purposes of array-scalar promotion, so that you can do
# (0j + float32array) to get a complex64 array instead of
# a complex128 array.
assert_equal(promote_func(np.array([f32]), c128), np.dtype(np.complex64))
def test_coercion(self):
def res_type(a, b):
return np.add(a, b).dtype
self.check_promotion_cases(res_type)
# Use-case: float/complex scalar * bool/int8 array
# shouldn't narrow the float/complex type
for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]:
b = 1.234 * a
assert_equal(b.dtype, np.dtype("f8"), f"array type {a.dtype}")
b = np.float64(1.234) * a
assert_equal(b.dtype, np.dtype("f8"), f"array type {a.dtype}")
b = np.float32(1.234) * a
assert_equal(b.dtype, np.dtype("f4"), f"array type {a.dtype}")
b = np.float16(1.234) * a
assert_equal(b.dtype, np.dtype("f2"), f"array type {a.dtype}")
b = 1.234j * a
assert_equal(b.dtype, np.dtype("c16"), f"array type {a.dtype}")
b = np.complex128(1.234j) * a
assert_equal(b.dtype, np.dtype("c16"), f"array type {a.dtype}")
b = np.complex64(1.234j) * a
assert_equal(b.dtype, np.dtype("c8"), f"array type {a.dtype}")
# The following use-case is problematic, and to resolve its
# tricky side-effects requires more changes.
#
# Use-case: (1-t)*a, where 't' is a boolean array and 'a' is
# a float32, shouldn't promote to float64
#
# a = np.array([1.0, 1.5], dtype=np.float32)
# t = np.array([True, False])
# b = t*a
# assert_equal(b, [1.0, 0.0])
# assert_equal(b.dtype, np.dtype('f4'))
# b = (1-t)*a
# assert_equal(b, [0.0, 1.5])
# assert_equal(b.dtype, np.dtype('f4'))
#
# Probably ~t (bitwise negation) is more proper to use here,
# but this is arguably less intuitive to understand at a glance, and
# would fail if 't' is actually an integer array instead of boolean:
#
# b = (~t)*a
# assert_equal(b, [0.0, 1.5])
# assert_equal(b.dtype, np.dtype('f4'))
@xpassIfTorchDynamo_np # (reason="'Scalars do not upcast arrays' rule")
def test_coercion_2(self):
def res_type(a, b):
return np.add(a, b).dtype
self.check_promotion_cases_2(res_type)
def test_result_type(self):
self.check_promotion_cases(np.result_type)
@skip(reason="array(None) not supported")
def test_tesult_type_2(self):
assert_(np.result_type(None) == np.dtype(None))
@skip(reason="no endianness in dtypes")
def test_promote_types_endian(self):
# promote_types should always return native-endian types
assert_equal(np.promote_types("<i8", "<i8"), np.dtype("i8"))
assert_equal(np.promote_types(">i8", ">i8"), np.dtype("i8"))
def test_can_cast(self):
assert_(np.can_cast(np.int32, np.int64))
assert_(np.can_cast(np.float64, complex))
assert_(not np.can_cast(complex, float))
assert_(np.can_cast("i8", "f8"))
assert_(not np.can_cast("i8", "f4"))
assert_(np.can_cast("i8", "i8", "no"))
@skip(reason="no endianness in dtypes")
def test_can_cast_2(self):
assert_(not np.can_cast("<i8", ">i8", "no"))
assert_(np.can_cast("<i8", ">i8", "equiv"))
assert_(not np.can_cast("<i4", ">i8", "equiv"))
assert_(np.can_cast("<i4", ">i8", "safe"))
assert_(not np.can_cast("<i8", ">i4", "safe"))
assert_(np.can_cast("<i8", ">i4", "same_kind"))
assert_(not np.can_cast("<i8", ">u4", "same_kind"))
assert_(np.can_cast("<i8", ">u4", "unsafe"))
assert_raises(TypeError, np.can_cast, "i4", None)
assert_raises(TypeError, np.can_cast, None, "i4")
# Also test keyword arguments
assert_(np.can_cast(from_=np.int32, to=np.int64))
@xpassIfTorchDynamo_np # (reason="value-based casting?")
def test_can_cast_values(self):
# gh-5917
for dt in [np.int8, np.int16, np.int32, np.int64] + [
np.uint8,
np.uint16,
np.uint32,
np.uint64,
]:
ii = np.iinfo(dt)
assert_(np.can_cast(ii.min, dt))
assert_(np.can_cast(ii.max, dt))
assert_(not np.can_cast(ii.min - 1, dt))
assert_(not np.can_cast(ii.max + 1, dt))
for dt in [np.float16, np.float32, np.float64, np.longdouble]:
fi = np.finfo(dt)
assert_(np.can_cast(fi.min, dt))
assert_(np.can_cast(fi.max, dt))
# Custom exception class to test exception propagation in fromiter
|
TestTypes
|
python
|
wandb__wandb
|
wandb/vendor/pygments/formatters/img.py
|
{
"start": 1420,
"end": 7329
}
|
class ____(object):
"""
Manages a set of fonts: normal, italic, bold, etc...
"""
def __init__(self, font_name, font_size=14):
self.font_name = font_name
self.font_size = font_size
self.fonts = {}
self.encoding = None
if sys.platform.startswith('win'):
if not font_name:
self.font_name = DEFAULT_FONT_NAME_WIN
self._create_win()
elif sys.platform.startswith('darwin'):
if not font_name:
self.font_name = DEFAULT_FONT_NAME_MAC
self._create_mac()
else:
if not font_name:
self.font_name = DEFAULT_FONT_NAME_NIX
self._create_nix()
def _get_nix_font_path(self, name, style):
proc = subprocess.Popen(['fc-list', "%s:style=%s" % (name, style), 'file'],
stdout=subprocess.PIPE, stderr=None)
stdout, _ = proc.communicate()
if proc.returncode == 0:
lines = stdout.splitlines()
for line in lines:
if line.startswith(b'Fontconfig warning:'):
continue
path = line.decode().strip().strip(':')
if path:
return path
return None
def _create_nix(self):
for name in STYLES['NORMAL']:
path = self._get_nix_font_path(self.font_name, name)
if path is not None:
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
break
else:
raise FontNotFound('No usable fonts named: "%s"' %
self.font_name)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
for stylename in STYLES[style]:
path = self._get_nix_font_path(self.font_name, stylename)
if path is not None:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
break
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
def _get_mac_font_path(self, font_map, name, style):
return font_map.get((name + ' ' + style).strip().lower())
def _create_mac(self):
font_map = {}
for font_dir in (os.path.join(os.getenv("HOME"), 'Library/Fonts/'),
'/Library/Fonts/', '/System/Library/Fonts/'):
font_map.update(
((os.path.splitext(f)[0].lower(), os.path.join(font_dir, f))
for f in os.listdir(font_dir) if f.lower().endswith('ttf')))
for name in STYLES['NORMAL']:
path = self._get_mac_font_path(font_map, self.font_name, name)
if path is not None:
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
break
else:
raise FontNotFound('No usable fonts named: "%s"' %
self.font_name)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
for stylename in STYLES[style]:
path = self._get_mac_font_path(font_map, self.font_name, stylename)
if path is not None:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
break
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
def _lookup_win(self, key, basename, styles, fail=False):
for suffix in ('', ' (TrueType)'):
for style in styles:
try:
valname = '%s%s%s' % (basename, style and ' '+style, suffix)
val, _ = _winreg.QueryValueEx(key, valname)
return val
except EnvironmentError:
continue
else:
if fail:
raise FontNotFound('Font %s (%s) not found in registry' %
(basename, styles[0]))
return None
def _create_win(self):
try:
key = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\Windows NT\CurrentVersion\Fonts')
except EnvironmentError:
try:
key = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\Windows\CurrentVersion\Fonts')
except EnvironmentError:
raise FontNotFound('Can\'t open Windows font registry key')
try:
path = self._lookup_win(key, self.font_name, STYLES['NORMAL'], True)
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
path = self._lookup_win(key, self.font_name, STYLES[style])
if path:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
finally:
_winreg.CloseKey(key)
def get_char_size(self):
"""
Get the character size.
"""
return self.fonts['NORMAL'].getsize('M')
def get_font(self, bold, oblique):
"""
Get the font based on bold and italic flags.
"""
if bold and oblique:
return self.fonts['BOLDITALIC']
elif bold:
return self.fonts['BOLD']
elif oblique:
return self.fonts['ITALIC']
else:
return self.fonts['NORMAL']
|
FontManager
|
python
|
rapidsai__cudf
|
python/cudf/cudf/core/dtypes.py
|
{
"start": 5273,
"end": 12720
}
|
class ____(_BaseDtype):
"""
Type for categorical data with the categories and orderedness.
Parameters
----------
categories : sequence, optional
Must be unique, and must not contain any nulls.
The categories are stored in an Index,
and if an index is provided the dtype of that index will be used.
ordered : bool or None, default False
Whether or not this categorical is treated as a ordered categorical.
None can be used to maintain the ordered value of existing categoricals
when used in operations that combine categoricals, e.g. astype, and
will resolve to False if there is no existing ordered to maintain.
Attributes
----------
categories
ordered
Methods
-------
from_pandas
to_pandas
Examples
--------
>>> import cudf
>>> dtype = cudf.CategoricalDtype(categories=['b', 'a'], ordered=True)
>>> cudf.Series(['a', 'b', 'a', 'c'], dtype=dtype)
0 a
1 b
2 a
3 <NA>
dtype: category
Categories (2, object): ['b' < 'a']
"""
def __init__(self, categories=None, ordered: bool | None = False) -> None:
if not (ordered is None or isinstance(ordered, bool)):
raise ValueError("ordered must be a boolean or None")
self._categories = self._init_categories(categories)
self._ordered = ordered
@property
def categories(self) -> Index:
"""
An ``Index`` containing the unique categories allowed.
Examples
--------
>>> import cudf
>>> dtype = cudf.CategoricalDtype(categories=['b', 'a'], ordered=True)
>>> dtype.categories
Index(['b', 'a'], dtype='object')
"""
if self._categories is None:
col = cudf.core.column.column_empty(0, dtype=CUDF_STRING_DTYPE)
else:
col = self._categories
return cudf.Index._from_column(col)
@property
def type(self):
return self._categories.dtype.type
@property
def name(self):
return "category"
@property
def str(self):
return "|O08"
@property
def ordered(self) -> bool | None:
"""
Whether the categories have an ordered relationship.
"""
return self._ordered
@classmethod
def from_pandas(cls, dtype: pd.CategoricalDtype) -> "CategoricalDtype":
"""
Convert a ``pandas.CategrocialDtype`` to ``cudf.CategoricalDtype``
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> pd_dtype = pd.CategoricalDtype(categories=['b', 'a'], ordered=True)
>>> pd_dtype
CategoricalDtype(categories=['b', 'a'], ordered=True, categories_dtype=object)
>>> cudf_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
>>> cudf_dtype
CategoricalDtype(categories=['b', 'a'], ordered=True, categories_dtype=object)
"""
warnings.warn(
"from_pandas is deprecated and will be removed in a future version. "
"Pass the pandas.CategoricalDtype categories and ordered to the CategoricalDtype constructor instead.",
FutureWarning,
)
return CategoricalDtype(
categories=dtype.categories, ordered=dtype.ordered
)
def to_pandas(self) -> pd.CategoricalDtype:
"""
Convert a ``cudf.CategoricalDtype`` to ``pandas.CategoricalDtype``
Examples
--------
>>> import cudf
>>> dtype = cudf.CategoricalDtype(categories=['b', 'a'], ordered=True)
>>> dtype
CategoricalDtype(categories=['b', 'a'], ordered=True, categories_dtype=object)
>>> dtype.to_pandas()
CategoricalDtype(categories=['b', 'a'], ordered=True, categories_dtype=object)
"""
if self._categories is None:
categories = None
elif self._categories.dtype.kind == "f":
categories = self._categories.dropna().to_pandas()
else:
categories = self._categories.to_pandas()
return pd.CategoricalDtype(categories=categories, ordered=self.ordered)
def _init_categories(self, categories: Any) -> ColumnBase | None:
if categories is None:
return categories
from cudf.api.types import is_scalar
if is_scalar(categories):
raise ValueError("categories must be a list-like object")
if len(categories) == 0 and not isinstance(
getattr(categories, "dtype", None),
(IntervalDtype, pd.IntervalDtype),
):
dtype = CUDF_STRING_DTYPE
else:
dtype = None
column = cudf.core.column.as_column(categories, dtype=dtype)
if isinstance(column.dtype, CategoricalDtype):
return column.categories # type: ignore[attr-defined]
else:
return column
def _internal_eq(self, other: Dtype, strict=True) -> bool:
if isinstance(other, str):
return other == self.name
elif other is self:
return True
elif not isinstance(other, self.__class__):
return False
elif other.ordered is None and other._categories is None:
# other is equivalent to the string "category"
return True
elif self._categories is None or other._categories is None:
return self._categories is other._categories
elif self.ordered or other.ordered:
return (self.ordered == other.ordered) and self._categories.equals(
other._categories
)
else:
left_cats = self._categories
right_cats = other._categories
if left_cats.dtype != right_cats.dtype:
return False
if len(left_cats) != len(right_cats):
return False
if self.ordered in {None, False} and other.ordered in {
None,
False,
}:
if strict:
return left_cats.equals(right_cats)
else:
return left_cats.sort_values().equals(
right_cats.sort_values()
)
return self.ordered == other.ordered and left_cats.equals(
right_cats
)
def __eq__(self, other: Dtype) -> bool:
return self._internal_eq(other, strict=False)
def construct_from_string(self):
raise NotImplementedError()
def serialize(self):
header = {}
header["ordered"] = self.ordered
frames = []
if self.categories is not None:
categories_header, categories_frames = (
self.categories.device_serialize()
)
header["categories"] = categories_header
frames.extend(categories_frames)
header["frame_count"] = len(frames)
return header, frames
@classmethod
def deserialize(cls, header, frames):
_check_type(cls, header, frames)
ordered = header["ordered"]
categories_header = header["categories"]
categories_frames = frames
categories = Serializable.device_deserialize(
categories_header, categories_frames
)
return cls(categories=categories, ordered=ordered)
def __repr__(self):
return self.to_pandas().__repr__()
|
CategoricalDtype
|
python
|
pytest-dev__pytest
|
src/_pytest/reports.py
|
{
"start": 16483,
"end": 23230
}
|
class ____(TerminalRepr):
def __init__(self, msg: str) -> None:
self.longrepr = msg
def toterminal(self, out: TerminalWriter) -> None:
out.line(self.longrepr, red=True)
def pytest_report_to_serializable(
report: CollectReport | TestReport,
) -> dict[str, Any] | None:
if isinstance(report, TestReport | CollectReport):
data = report._to_json()
data["$report_type"] = report.__class__.__name__
return data
# TODO: Check if this is actually reachable.
return None # type: ignore[unreachable]
def pytest_report_from_serializable(
data: dict[str, Any],
) -> CollectReport | TestReport | None:
if "$report_type" in data:
if data["$report_type"] == "TestReport":
return TestReport._from_json(data)
elif data["$report_type"] == "CollectReport":
return CollectReport._from_json(data)
assert False, "Unknown report_type unserialize data: {}".format(
data["$report_type"]
)
return None
def _report_to_json(report: BaseReport) -> dict[str, Any]:
"""Return the contents of this report as a dict of builtin entries,
suitable for serialization.
This was originally the serialize_report() function from xdist (ca03269).
"""
def serialize_repr_entry(
entry: ReprEntry | ReprEntryNative,
) -> dict[str, Any]:
data = dataclasses.asdict(entry)
for key, value in data.items():
if hasattr(value, "__dict__"):
data[key] = dataclasses.asdict(value)
entry_data = {"type": type(entry).__name__, "data": data}
return entry_data
def serialize_repr_traceback(reprtraceback: ReprTraceback) -> dict[str, Any]:
result = dataclasses.asdict(reprtraceback)
result["reprentries"] = [
serialize_repr_entry(x) for x in reprtraceback.reprentries
]
return result
def serialize_repr_crash(
reprcrash: ReprFileLocation | None,
) -> dict[str, Any] | None:
if reprcrash is not None:
return dataclasses.asdict(reprcrash)
else:
return None
def serialize_exception_longrepr(rep: BaseReport) -> dict[str, Any]:
assert rep.longrepr is not None
# TODO: Investigate whether the duck typing is really necessary here.
longrepr = cast(ExceptionRepr, rep.longrepr)
result: dict[str, Any] = {
"reprcrash": serialize_repr_crash(longrepr.reprcrash),
"reprtraceback": serialize_repr_traceback(longrepr.reprtraceback),
"sections": longrepr.sections,
}
if isinstance(longrepr, ExceptionChainRepr):
result["chain"] = []
for repr_traceback, repr_crash, description in longrepr.chain:
result["chain"].append(
(
serialize_repr_traceback(repr_traceback),
serialize_repr_crash(repr_crash),
description,
)
)
else:
result["chain"] = None
return result
d = report.__dict__.copy()
if hasattr(report.longrepr, "toterminal"):
if hasattr(report.longrepr, "reprtraceback") and hasattr(
report.longrepr, "reprcrash"
):
d["longrepr"] = serialize_exception_longrepr(report)
else:
d["longrepr"] = str(report.longrepr)
else:
d["longrepr"] = report.longrepr
for name in d:
if isinstance(d[name], os.PathLike):
d[name] = os.fspath(d[name])
elif name == "result":
d[name] = None # for now
return d
def _report_kwargs_from_json(reportdict: dict[str, Any]) -> dict[str, Any]:
"""Return **kwargs that can be used to construct a TestReport or
CollectReport instance.
This was originally the serialize_report() function from xdist (ca03269).
"""
def deserialize_repr_entry(entry_data):
data = entry_data["data"]
entry_type = entry_data["type"]
if entry_type == "ReprEntry":
reprfuncargs = None
reprfileloc = None
reprlocals = None
if data["reprfuncargs"]:
reprfuncargs = ReprFuncArgs(**data["reprfuncargs"])
if data["reprfileloc"]:
reprfileloc = ReprFileLocation(**data["reprfileloc"])
if data["reprlocals"]:
reprlocals = ReprLocals(data["reprlocals"]["lines"])
reprentry: ReprEntry | ReprEntryNative = ReprEntry(
lines=data["lines"],
reprfuncargs=reprfuncargs,
reprlocals=reprlocals,
reprfileloc=reprfileloc,
style=data["style"],
)
elif entry_type == "ReprEntryNative":
reprentry = ReprEntryNative(data["lines"])
else:
_report_unserialization_failure(entry_type, TestReport, reportdict)
return reprentry
def deserialize_repr_traceback(repr_traceback_dict):
repr_traceback_dict["reprentries"] = [
deserialize_repr_entry(x) for x in repr_traceback_dict["reprentries"]
]
return ReprTraceback(**repr_traceback_dict)
def deserialize_repr_crash(repr_crash_dict: dict[str, Any] | None):
if repr_crash_dict is not None:
return ReprFileLocation(**repr_crash_dict)
else:
return None
if (
reportdict["longrepr"]
and "reprcrash" in reportdict["longrepr"]
and "reprtraceback" in reportdict["longrepr"]
):
reprtraceback = deserialize_repr_traceback(
reportdict["longrepr"]["reprtraceback"]
)
reprcrash = deserialize_repr_crash(reportdict["longrepr"]["reprcrash"])
if reportdict["longrepr"]["chain"]:
chain = []
for repr_traceback_data, repr_crash_data, description in reportdict[
"longrepr"
]["chain"]:
chain.append(
(
deserialize_repr_traceback(repr_traceback_data),
deserialize_repr_crash(repr_crash_data),
description,
)
)
exception_info: ExceptionChainRepr | ReprExceptionInfo = ExceptionChainRepr(
chain
)
else:
exception_info = ReprExceptionInfo(
reprtraceback=reprtraceback,
reprcrash=reprcrash,
)
for section in reportdict["longrepr"]["sections"]:
exception_info.addsection(*section)
reportdict["longrepr"] = exception_info
return reportdict
|
CollectErrorRepr
|
python
|
explosion__spaCy
|
spacy/lang/fo/__init__.py
|
{
"start": 375,
"end": 471
}
|
class ____(Language):
lang = "fo"
Defaults = FaroeseDefaults
__all__ = ["Faroese"]
|
Faroese
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/queues.py
|
{
"start": 58854,
"end": 61173
}
|
class ____(Request):
"""
Gets the next task from the top of the queue (FIFO). The task entry is removed from the queue.
:param queue: Queue id
:type queue: str
:param get_task_info: If set then additional task info is returned
:type get_task_info: bool
:param task: Task company ID
:type task: str
"""
_service = "queues"
_action = "get_next_task"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"get_task_info": {
"default": False,
"description": "If set then additional task info is returned",
"type": "boolean",
},
"queue": {"description": "Queue id", "type": "string"},
"task": {"description": "Task company ID", "type": "string"},
},
"required": ["queue"],
"type": "object",
}
def __init__(
self, queue: str, get_task_info: Optional[bool] = False, task: Optional[str] = None, **kwargs: Any
) -> None:
super(GetNextTaskRequest, self).__init__(**kwargs)
self.queue = queue
self.get_task_info = get_task_info
self.task = task
@schema_property("queue")
def queue(self) -> str:
return self._property_queue
@queue.setter
def queue(self, value: str) -> None:
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property("get_task_info")
def get_task_info(self) -> Optional[bool]:
return self._property_get_task_info
@get_task_info.setter
def get_task_info(self, value: Optional[bool]) -> None:
if value is None:
self._property_get_task_info = None
return
self.assert_isinstance(value, "get_task_info", (bool,))
self._property_get_task_info = value
@schema_property("task")
def task(self) -> Optional[str]:
return self._property_task
@task.setter
def task(self, value: Optional[str]) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
|
GetNextTaskRequest
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/roots/mutation.py
|
{
"start": 32186,
"end": 32746
}
|
class ____(graphene.Mutation):
"""Toggle asset auto materializing on or off."""
Output = graphene.NonNull(graphene.Boolean)
class Meta:
name = "SetAutoMaterializedPausedMutation"
class Arguments:
paused = graphene.Argument(graphene.NonNull(graphene.Boolean))
@capture_error
@check_permission(Permissions.TOGGLE_AUTO_MATERIALIZE)
def mutate(self, graphene_info, paused: bool):
set_auto_materialize_paused(graphene_info.context.instance, paused)
return paused
|
GrapheneSetAutoMaterializePausedMutation
|
python
|
walkccc__LeetCode
|
solutions/2192. All Ancestors of a Node in a Directed Acyclic Graph/2192-2.py
|
{
"start": 0,
"end": 674
}
|
class ____:
def getAncestors(self, n: int, edges: list[list[int]]) -> list[list[int]]:
ans = [set() for _ in range(n)]
graph = [[] for _ in range(n)]
inDegrees = [0] * n
# Build the graph.
for u, v in edges:
graph[u].append(v)
inDegrees[v] += 1
# Perform topological sorting.
q = collections.deque([i for i, d in enumerate(inDegrees) if d == 0])
while q:
for _ in range(len(q)):
u = q.popleft()
for v in graph[u]:
ans[v].add(u)
ans[v].update(ans[u])
inDegrees[v] -= 1
if inDegrees[v] == 0:
q.append(v)
return [sorted(nodes) for nodes in ans]
|
Solution
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/nn_ops/morphological_ops_test.py
|
{
"start": 12198,
"end": 20997
}
|
class ____(test.TestCase):
def _VerifyValues(self, image, kernel, strides, rates, padding, out, use_gpu):
"""Verifies the output values of the erosion function.
Args:
image: Input tensor with shape: [batch, in_height, in_width, channels].
kernel: Filter tensor with shape: [filter_height, filter_width, channels].
strides: Output strides, specified as [stride_height, stride_width].
rates: Atrous rates, specified as [rate_height, rate_width].
padding: Padding type.
out: Expected output.
use_gpu: Whether we are running on GPU.
"""
strides = [1] + strides + [1]
rates = [1] + rates + [1]
with self.cached_session(use_gpu=use_gpu):
out_tensor = nn_ops.erosion2d(
constant_op.constant(image),
constant_op.constant(kernel),
strides=strides,
rates=rates,
padding=padding,
name="erosion2d")
self.assertAllClose(out, self.evaluate(out_tensor))
def _testErosionValidPadding(self, use_gpu):
# [1, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [1, 1, 1, 1]
out = [[[[.0]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="VALID",
out=out,
use_gpu=use_gpu)
def _testErosionSamePadding(self, use_gpu):
# [1, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [1, 2, 2, 1]
out = [[[[.0], [.1]], [[.3], [.4]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testErosionSamePaddingDepth(self, use_gpu):
# [1, 2, 2, 3]
image = [[[[.1, .2, .0], [.2, .3, .1]], [[.3, .4, .2], [.4, .5, .3]]]]
# [2, 2, 3]
kernel = [[[.4, .5, .3], [.3, .4, .2]], [[.1, .2, .0], [.0, .1, -.1]]]
# [1, 2, 2, 3]
out = [[[[.0, .0, .0], [.1, .1, .1]], [[.3, .3, .3], [.4, .4, .4]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testErosionSamePaddingBatch(self, use_gpu):
# [2, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]], [[[.2], [.3]], [[.4], [.5]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [2, 2, 2, 1]
out = [[[[.0], [.1]], [[.3], [.4]]], [[[.1], [.2]], [[.4], [.5]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testErosionValidPaddingNonSquareWindow(self, use_gpu):
# [1, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]]]
# [1, 2, 1]
kernel = [[[.4], [.3]]]
# [1, 2, 1, 1]
out = [[[[-.2]], [[.0]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="VALID",
out=out,
use_gpu=use_gpu)
def _testErosionSamePaddingRate(self, use_gpu):
# [1, 3, 3, 1]
image = [[[[.1], [.2], [.3]], [[.4], [.5], [.6]], [[.7], [.8], [.9]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.2]]]
# Because rate = 2.0, the effective kernel is [3, 3, 1]:
# kernel_eff = [[[.4], [.0], [.3]],
# [[.0], [.0], [.0]],
# [[.1], [.0], [.2]]]
# [1, 3, 3, 1]
out = [[[[.1], [.1], [.2]], [[0.1], [-.1], [.0]], [[.4], [.2], [.3]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[2, 2],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testErosionValidPaddingUnevenStride(self, use_gpu):
# [1, 3, 3, 1]
image = [[[[.1], [.2], [.3], [.4]], [[.5], [.6], [.7], [.8]],
[[.9], [1.0], [1.1], [1.2]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.2]]]
# [1, 2, 2, 1]
out = [[[[-.1], [.1]], [[.3], [.5]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 2],
rates=[1, 1],
padding="VALID",
out=out,
use_gpu=use_gpu)
def testErosion(self):
for use_gpu in True, False:
self._testErosionValidPadding(use_gpu)
self._testErosionSamePadding(use_gpu)
self._testErosionSamePaddingDepth(use_gpu)
self._testErosionSamePaddingBatch(use_gpu)
self._testErosionValidPaddingNonSquareWindow(use_gpu)
self._testErosionSamePaddingRate(use_gpu)
self._testErosionValidPaddingUnevenStride(use_gpu)
def _ConstructAndTestGradient(self, image_shape, kernel_shape, strides, rates,
padding, use_gpu):
"""Verifies the gradients of the erosion function.
Args:
image_shape: Input shape, [batch, in_height, in_width, channels].
kernel_shape: Filter shape, [filter_height, filter_width, channels].
strides: Output strides, specified as [stride_height, stride_width].
rates: Atrous rates, specified as [rate_height, rate_width].
padding: Padding type.
use_gpu: Whether we are running on GPU.
"""
assert image_shape[3] == kernel_shape[2]
np.random.seed(1) # Make it reproducible.
image = np.random.random_sample(image_shape).astype(np.float32)
kernel = np.random.random_sample(kernel_shape).astype(np.float32)
strides = [1] + strides + [1]
rates = [1] + rates + [1]
image_tensor = constant_op.constant(image, shape=image_shape, name="input")
kernel_tensor = constant_op.constant(
kernel, shape=kernel_shape, name="filter")
def compute_erosion2d(image_tensor, kernel_tensor):
return nn_ops.erosion2d(
image_tensor,
kernel_tensor,
strides=strides,
rates=rates,
padding=padding,
name="erosion2d")
with test_util.device(use_gpu=use_gpu):
with self.cached_session():
# Small delta is necessary for argmax to remain the same.
err1 = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(
lambda x: compute_erosion2d(x, kernel_tensor), [image_tensor]))
err2 = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(
lambda x: compute_erosion2d(image_tensor, x), [kernel_tensor]))
err = max(err1, err2)
print("Erosion gradient error = %f" % err)
self.assertLess(err, 1e-4)
def _testErosionGradValidPadding_1x1x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[1, 1, 1],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
use_gpu=use_gpu)
def _testErosionGradSamePadding_1x1x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[1, 1, 1],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testErosionGradSamePadding_1x1x2(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 2],
kernel_shape=[1, 1, 2],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testErosionGradValidPadding_2x2x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[2, 2, 1],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
use_gpu=use_gpu)
def _testErosionGradSamePadding_2x2x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[2, 2, 1],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testErosionGradSamePaddingBatch_2x2x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[4, 3, 3, 1],
kernel_shape=[2, 2, 1],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testErosionGradSamePadding_2x2x4(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 4],
kernel_shape=[2, 2, 4],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def testErosionGrad(self):
for use_gpu in True, False:
self._testErosionGradValidPadding_1x1x1(use_gpu)
self._testErosionGradSamePadding_1x1x1(use_gpu)
self._testErosionGradSamePadding_1x1x2(use_gpu)
self._testErosionGradValidPadding_2x2x1(use_gpu)
self._testErosionGradSamePadding_2x2x1(use_gpu)
self._testErosionGradSamePaddingBatch_2x2x1(use_gpu)
self._testErosionGradSamePadding_2x2x4(use_gpu)
if __name__ == "__main__":
test.main()
|
ErosionTest
|
python
|
realpython__materials
|
gemini-cli/todolist/src/todolist/exporter.py
|
{
"start": 282,
"end": 417
}
|
class ____(TypedDict, total=False):
indent: int | str | None
separators: tuple[str, str] | None
sort_keys: bool
|
FormatOptions
|
python
|
tensorflow__tensorflow
|
tensorflow/python/training/server_lib.py
|
{
"start": 18031,
"end": 21276
}
|
class ____:
"""Represent a collection of device filters for the remote workers in cluster.
NOTE: this is an experimental API and subject to changes.
Set device filters for selective jobs and tasks. For each remote worker, the
device filters are a list of strings. When any filters are present, the remote
worker will ignore all devices which do not match any of its filters. Each
filter can be partially specified, e.g. "/job:ps", "/job:worker/replica:3",
etc. Note that a device is always visible to the worker it is located on.
For example, to set the device filters for a parameter server cluster:
```python
cdf = tf.config.experimental.ClusterDeviceFilters()
for i in range(num_workers):
cdf.set_device_filters('worker', i, ['/job:ps'])
for i in range(num_ps):
cdf.set_device_filters('ps', i, ['/job:worker'])
tf.config.experimental_connect_to_cluster(cluster_def,
cluster_device_filters=cdf)
```
The device filters can be partically specified. For remote tasks that do not
have device filters specified, all devices will be visible to them.
"""
def __init__(self):
# `_device_filters` is a dict mapping job names to job device filters.
# Job device filters further maps task IDs to task device filters.
# Task device filters are a list of strings, each one is a device filter.
self._device_filters = {}
# Serialized protobuf for cluster device filters.
self._cluster_device_filters = None
def set_device_filters(self, job_name, task_index, device_filters):
"""Set the device filters for given job name and task id."""
assert all(isinstance(df, str) for df in device_filters)
self._device_filters.setdefault(job_name, {})
self._device_filters[job_name][task_index] = [df for df in device_filters]
# Due to updates in data, invalidate the serialized proto cache.
self._cluster_device_filters = None
def _as_cluster_device_filters(self):
"""Returns a serialized protobuf of cluster device filters."""
if self._cluster_device_filters:
return self._cluster_device_filters
self._make_cluster_device_filters()
return self._cluster_device_filters
def _make_cluster_device_filters(self):
"""Creates `ClusterDeviceFilters` proto based on the `_device_filters`.
Raises:
TypeError: If `_device_filters` is not a dictionary mapping strings to
a map of task indices and device filters.
"""
self._cluster_device_filters = device_filters_pb2.ClusterDeviceFilters()
# Sort by job_name to produce deterministic protobufs.
for job_name, tasks in sorted(self._device_filters.items()):
try:
job_name = compat.as_bytes(job_name)
except TypeError:
raise TypeError("Job name %r must be bytes or unicode" % job_name)
jdf = self._cluster_device_filters.jobs.add()
jdf.name = job_name
for i, task_device_filters in sorted(tasks.items()):
for tdf in task_device_filters:
try:
tdf = compat.as_bytes(tdf)
except TypeError:
raise TypeError("Device filter %r must be bytes or unicode" % tdf)
jdf.tasks[i].device_filters.append(tdf)
|
ClusterDeviceFilters
|
python
|
getsentry__sentry
|
src/sentry_plugins/heroku/plugin.py
|
{
"start": 5175,
"end": 7807
}
|
class ____(CorePluginMixin, ReleaseTrackingPlugin):
author = "Sentry Team"
author_url = "https://github.com/getsentry"
title = "Heroku"
slug = "heroku"
description = "Integrate Heroku release tracking."
required_field = "repository"
feature_descriptions = [
FeatureDescription(
"""
Integrate Heroku release tracking.
""",
IntegrationFeatures.DEPLOYMENT,
)
]
def can_enable_for_projects(self) -> bool:
return True
def can_configure_for_project(self, project) -> bool:
return True
def has_project_conf(self) -> bool:
return True
def get_conf_key(self) -> str:
return "heroku"
def get_config(self, project, user=None, initial=None, add_additional_fields: bool = False):
repo_list = list(Repository.objects.filter(organization_id=project.organization_id))
if not ProjectOption.objects.get_value(project=project, key="heroku:repository"):
choices = [("", "select a repo")]
else:
choices = []
choices.extend([(repo.name, repo.name) for repo in repo_list])
webhook_secret = self.get_option("webhook_secret", project)
secret_field = get_secret_field_config(
webhook_secret,
"Enter the webhook signing secret shown after running the Heroku CLI command.",
)
secret_field.update(
{
"name": "webhook_secret",
"label": "Webhook Secret",
"required": False,
}
)
return [
{
"name": "repository",
"label": "Respository",
"type": "select",
"required": True,
"choices": choices,
"help": "Select which repository you would like to be associated with this project",
},
{
"name": "environment",
"label": "Deploy Environment",
"type": "text",
"required": False,
"default": "production",
"help": "Specify an environment name for your Heroku deploys",
},
secret_field,
]
def get_release_doc_html(self, hook_url):
return f"""
<p>Add a Sentry release webhook to automatically track new releases.</p>
<pre class="clippy">heroku webhooks:add -i api:release -l notify -u {hook_url} -a YOUR_APP_NAME</pre>
"""
def get_release_hook(self) -> type[HerokuReleaseHook]:
return HerokuReleaseHook
|
HerokuPlugin
|
python
|
pytorch__pytorch
|
torch/ao/quantization/fx/_model_report/model_report_visualizer.py
|
{
"start": 411,
"end": 32667
}
|
class ____:
r"""
The ModelReportVisualizer class aims to provide users a way to visualize some of the statistics
that were generated by the ModelReport API. However, at a higher level, the class aims to provide
some level of visualization of statistics to PyTorch in order to make it easier to parse data and
diagnose any potential issues with data or a specific model. With respect to the visualizations,
the ModelReportVisualizer class currently supports several methods of visualizing data.
Supported Visualization Methods Include:
- Table format
- Plot format (line graph)
- Histogram format
For all of the existing visualization methods, there is the option to filter data based on:
- A module fqn prefix
- Feature [required for the plot and histogram]
* :attr:`generated_reports` The reports generated by the ModelReport class in the structure below
Ensure sure that features that are the same across different report contain the same name
Ensure that objects representing the same features are the same type / dimension (where applicable)
Note:
Currently, the ModelReportVisualizer class supports visualization of data generated by the
ModelReport class. However, this structure is extensible and should allow the visualization of
other information as long as the information is structured in the following general format:
Report Structure
-- module_fqn [module with attached detectors]
|
-- feature keys [not every detector extracts same information]
[same collected info has same keys, unless can be specific to detector]
The goal behind the class is that the generated visualizations can be used in conjunction with the generated
report for people to get a better understanding of issues and what the fix might be. It is also just to provide
a good visualization platform, since it might be hard to parse through the ModelReport returned dictionary as
that grows in size.
General Use Flow Expected
1.) Initialize ModelReport object with reports of interest by passing in initialized detector objects
2.) Prepare your model with prepare_fx
3.) Call model_report.prepare_detailed_calibration on your model to add relevant observers
4.) Calibrate your model with data
5.) Call model_report.generate_report on your model to generate report and optionally remove added observers
6.) Use output of model_report.generate_report to initialize ModelReportVisualizer instance
7.) Use instance to view different views of data as desired, applying filters as needed
8.) Either see the super detailed information or just the actual printed or shown table / plot / histogram
"""
# keys for table dict
TABLE_TENSOR_KEY = "tensor_level_info"
TABLE_CHANNEL_KEY = "channel_level_info"
# Constants for header vals
NUM_NON_FEATURE_TENSOR_HEADERS = 2
NUM_NON_FEATURE_CHANNEL_HEADERS = 3
# Constants for row index in header
CHANNEL_NUM_INDEX = 2
def __init__(self, generated_reports: OrderedDict[str, Any]):
r"""
Initializes the ModelReportVisualizer instance with the necessary reports.
Args:
generated_reports (Dict[str, Any]): The reports generated by the ModelReport class
can also be a dictionary generated in another manner, as long as format is same
"""
self.generated_reports = generated_reports
def get_all_unique_module_fqns(self) -> set[str]:
r"""
The purpose of this method is to provide a user the set of all module_fqns so that if
they wish to use some of the filtering capabilities of the ModelReportVisualizer class,
they don't need to manually parse the generated_reports dictionary to get this information.
Returns all the unique module fqns present in the reports the ModelReportVisualizer
instance was initialized with.
"""
# returns the keys of the ordered dict
return set(self.generated_reports.keys())
def get_all_unique_feature_names(
self, plottable_features_only: bool = True
) -> set[str]:
r"""
The purpose of this method is to provide a user the set of all feature names so that if
they wish to use the filtering capabilities of the generate_table_view(), or use either of
the generate_plot_view() or generate_histogram_view(), they don't need to manually parse
the generated_reports dictionary to get this information.
Args:
plottable_features_only (bool): True if the user is only looking for plottable features,
False otherwise
plottable features are those that are tensor values
Default: True (only return those feature names that are plottable)
Returns all the unique module fqns present in the reports the ModelReportVisualizer
instance was initialized with.
"""
unique_feature_names = set()
for module_fqn in self.generated_reports:
# get dict of the features
feature_dict: dict[str, Any] = self.generated_reports[module_fqn]
# loop through features
for feature_name in feature_dict:
# if we need plottable, ensure type of val is tensor
if (
not plottable_features_only
or type(feature_dict[feature_name]) is torch.Tensor
):
unique_feature_names.add(feature_name)
# return our compiled set of unique feature names
return unique_feature_names
def _get_filtered_data(
self, feature_filter: str, module_fqn_filter: str
) -> OrderedDict[str, Any]:
r"""
Filters the data and returns it in the same ordered dictionary format so the relevant views can be displayed.
Args:
feature_filter (str): The feature filter, if we want to filter the set of data to only include
a certain set of features that include feature_filter
If feature = "", then we do not filter based on any features
module_fqn_filter (str): The filter on prefix for the module fqn. All modules that have fqn with
this prefix will be included
If module_fqn_filter = "" we do not filter based on module fqn, and include all modules
First, the data is filtered based on module_fqn, and then filtered based on feature
Returns an OrderedDict (sorted in order of model) mapping:
module_fqns -> feature_names -> values
"""
# create return dict
filtered_dict: OrderedDict[str, Any] = OrdDict()
for module_fqn in self.generated_reports:
# first filter based on module
if module_fqn_filter == "" or module_fqn_filter in module_fqn:
# create entry for module and loop through features
filtered_dict[module_fqn] = {}
module_reports = self.generated_reports[module_fqn]
for feature_name in module_reports:
# check if filtering on features and do so if desired
if feature_filter == "" or feature_filter in feature_name:
filtered_dict[module_fqn][feature_name] = module_reports[
feature_name
]
# we have populated the filtered dict, and must return it
return filtered_dict
def _generate_tensor_table(
self,
filtered_data: OrderedDict[str, dict[str, Any]],
tensor_features: list[str],
) -> tuple[list, list]:
r"""
Takes in the filtered data and features list and generates the tensor headers and table
Currently meant to generate the headers and table for both the tensor information.
Args:
filtered_data (OrderedDict[str, Dict[str, Any]]): An OrderedDict (sorted in order of model) mapping:
module_fqns -> feature_names -> values
tensor_features (List[str]): A list of the tensor level features
Returns a tuple with:
A list of the headers of the tensor table
A list of lists containing the table information row by row
The 0th index row will contain the headers of the columns
The rest of the rows will contain data
"""
# now we compose the tensor information table
tensor_table: list[list[Any]] = []
tensor_headers: list[str] = []
# append the table row to the table only if we have features
if len(tensor_features) > 0:
# now we add all the data
for index, module_fqn in enumerate(filtered_data):
# we make a new row for the tensor table
tensor_table_row = [index, module_fqn]
for feature in tensor_features:
# we iterate in same order of added features
if feature in filtered_data[module_fqn]:
# add value if applicable to module
feature_val = filtered_data[module_fqn][feature]
else:
# add that it is not applicable
feature_val = "Not Applicable"
# if it's a tensor we want to extract val
if isinstance(feature_val, torch.Tensor):
feature_val = feature_val.item()
# we add to our list of values
# pyrefly: ignore [bad-argument-type]
tensor_table_row.append(feature_val)
tensor_table.append(tensor_table_row)
# add row of headers of we actually have something, otherwise just empty
if len(tensor_table) != 0:
tensor_headers = ["idx", "layer_fqn"] + tensor_features
return (tensor_headers, tensor_table)
def _generate_channels_table(
self,
filtered_data: OrderedDict[str, Any],
channel_features: list[str],
num_channels: int,
) -> tuple[list, list]:
r"""
Takes in the filtered data and features list and generates the channels headers and table
Currently meant to generate the headers and table for both the channels information.
Args:
filtered_data (OrderedDict[str, Any]): An OrderedDict (sorted in order of model) mapping:
module_fqns -> feature_names -> values
channel_features (List[str]): A list of the channel level features
num_channels (int): Number of channels in the channel data
Returns a tuple with:
A list of the headers of the channel table
A list of lists containing the table information row by row
The 0th index row will contain the headers of the columns
The rest of the rows will contain data
"""
# now we compose the table for the channel information table
channel_table: list[list[Any]] = []
channel_headers: list[str] = []
# counter to keep track of number of entries in
channel_table_entry_counter: int = 0
if len(channel_features) > 0:
# now we add all channel data
for module_fqn in filtered_data:
# we iterate over all channels
for channel in range(num_channels):
# we make a new row for the channel
new_channel_row = [channel_table_entry_counter, module_fqn, channel]
for feature in channel_features:
if feature in filtered_data[module_fqn]:
# add value if applicable to module
feature_val = filtered_data[module_fqn][feature][channel]
else:
# add that it is not applicable
feature_val = "Not Applicable"
# if it's a tensor we want to extract val
if type(feature_val) is torch.Tensor:
feature_val = feature_val.item()
# add value to channel specific row
# pyrefly: ignore [bad-argument-type]
new_channel_row.append(feature_val)
# add to table and increment row index counter
channel_table.append(new_channel_row)
channel_table_entry_counter += 1
# add row of headers of we actually have something, otherwise just empty
if len(channel_table) != 0:
channel_headers = ["idx", "layer_fqn", "channel"] + channel_features
return (channel_headers, channel_table)
def generate_filtered_tables(
self, feature_filter: str = "", module_fqn_filter: str = ""
) -> dict[str, tuple[list, list]]:
r"""
Takes in optional filter values and generates two tables with desired information.
The generated tables are presented in both a list-of-lists format
The reason for the two tables are that they handle different things:
1.) the first table handles all tensor level information
2.) the second table handles and displays all channel based information
The reasoning for this is that having all the info in one table can make it ambiguous which collected
statistics are global, and which are actually per-channel, so it's better to split it up into two
tables. This also makes the information much easier to digest given the plethora of statistics collected
Tensor table columns:
idx layer_fqn feature_1 feature_2 feature_3 .... feature_n
---- --------- --------- --------- --------- ---------
Per-Channel table columns:
idx layer_fqn channel feature_1 feature_2 feature_3 .... feature_n
---- --------- ------- --------- --------- --------- ---------
Args:
feature_filter (str, optional): Filters the features presented to only those that
contain this filter substring
Default = "", results in all the features being printed
module_fqn_filter (str, optional): Only includes modules that contains this string
Default = "", results in all the modules in the reports to be visible in the table
Returns a dictionary with two keys:
(Dict[str, Tuple[List, List]]) A dict containing two keys:
"tensor_level_info", "channel_level_info"
Each key maps to a tuple with:
A list of the headers of each table
A list of lists containing the table information row by row
The 0th index row will contain the headers of the columns
The rest of the rows will contain data
Example Use:
>>> # xdoctest: +SKIP("undefined variables")
>>> mod_report_visualizer.generate_filtered_tables(
... feature_filter="per_channel_min", module_fqn_filter="block1"
... ) # generates table with per_channel_min info for all modules in block 1 of the model
"""
# first get the filtered data
filtered_data: OrderedDict[str, Any] = self._get_filtered_data(
feature_filter, module_fqn_filter
)
# now we split into tensor and per-channel data
tensor_features: set[str] = set()
channel_features: set[str] = set()
# keep track of the number of channels we have
num_channels: int = 0
for module_fqn in filtered_data:
for feature_name in filtered_data[module_fqn]:
# get the data for that specific feature
feature_data = filtered_data[module_fqn][feature_name]
# check if not zero dim tensor
is_tensor: bool = isinstance(feature_data, torch.Tensor)
is_not_zero_dim: bool = is_tensor and len(feature_data.shape) != 0
if is_not_zero_dim or isinstance(feature_data, list):
# works means per channel
channel_features.add(feature_name)
num_channels = len(feature_data)
else:
# means is per-tensor
tensor_features.add(feature_name)
# we make them lists for iteration purposes
tensor_features_list: list[str] = sorted(tensor_features)
channel_features_list: list[str] = sorted(channel_features)
# get the tensor info
tensor_headers, tensor_table = self._generate_tensor_table(
filtered_data, tensor_features_list
)
# get the channel info
channel_headers, channel_table = self._generate_channels_table(
filtered_data, channel_features_list, num_channels
)
# let's now create the dictionary to return
table_dict = {
self.TABLE_TENSOR_KEY: (tensor_headers, tensor_table),
self.TABLE_CHANNEL_KEY: (channel_headers, channel_table),
}
# return the two tables
return table_dict
def generate_table_visualization(
self, feature_filter: str = "", module_fqn_filter: str = ""
):
r"""
Takes in optional filter values and prints out formatted tables of the information.
The reason for the two tables printed out instead of one large one are that they handle different things:
1.) the first table handles all tensor level information
2.) the second table handles and displays all channel based information
The reasoning for this is that having all the info in one table can make it ambiguous which collected
statistics are global, and which are actually per-channel, so it's better to split it up into two
tables. This also makes the information much easier to digest given the plethora of statistics collected
Tensor table columns:
idx layer_fqn feature_1 feature_2 feature_3 .... feature_n
---- --------- --------- --------- --------- ---------
Per-Channel table columns:
idx layer_fqn channel feature_1 feature_2 feature_3 .... feature_n
---- --------- ------- --------- --------- --------- ---------
Args:
feature_filter (str, optional): Filters the features presented to only those that
contain this filter substring
Default = "", results in all the features being printed
module_fqn_filter (str, optional): Only includes modules that contains this string
Default = "", results in all the modules in the reports to be visible in the table
Example Use:
>>> # xdoctest: +SKIP("undefined variables")
>>> mod_report_visualizer.generate_table_visualization(
... feature_filter="per_channel_min", module_fqn_filter="block1"
... )
>>> # prints out neatly formatted table with per_channel_min info
>>> # for all modules in block 1 of the model
"""
# see if we got tabulate
if not got_tabulate:
print("Make sure to install tabulate and try again.")
return None
# get the table dict and the specific tables of interest
table_dict = self.generate_filtered_tables(feature_filter, module_fqn_filter)
tensor_headers, tensor_table = table_dict[self.TABLE_TENSOR_KEY]
channel_headers, channel_table = table_dict[self.TABLE_CHANNEL_KEY]
# get the table string and print it out
# now we have populated the tables for each one
# let's create the strings to be returned
table_str = ""
# the tables will have some headers columns that are non-feature
# ex. table index, module name, channel index, etc.
# we want to look at header columns for features, that come after those headers
if len(tensor_headers) > self.NUM_NON_FEATURE_TENSOR_HEADERS:
# if we have at least one tensor level feature to be added we add tensor table
table_str += "Tensor Level Information \n"
table_str += tabulate(tensor_table, headers=tensor_headers)
if len(channel_headers) > self.NUM_NON_FEATURE_CHANNEL_HEADERS:
# if we have at least one channel level feature to be added we add tensor table
table_str += "\n\n Channel Level Information \n"
table_str += tabulate(channel_table, headers=channel_headers)
# if no features at all, let user know
if table_str == "":
table_str = "No data points to generate table with."
print(table_str)
def _get_plottable_data(
self, feature_filter: str, module_fqn_filter: str
) -> tuple[list, list[list], bool]:
r"""
Takes in the feature filters and module filters and outputs the x and y data for plotting
Args:
feature_filter (str): Filters the features presented to only those that
contain this filter substring
module_fqn_filter (str): Only includes modules that contains this string
Returns a tuple of three elements
The first is a list containing relevant x-axis data
The second is a list containing the corresponding y-axis data
If the data is per channel
"""
# get the table dict and the specific tables of interest
table_dict = self.generate_filtered_tables(feature_filter, module_fqn_filter)
tensor_headers, tensor_table = table_dict[self.TABLE_TENSOR_KEY]
channel_headers, channel_table = table_dict[self.TABLE_CHANNEL_KEY]
# make sure it is only 1 feature that is being plotted
# get the number of features in each of these
tensor_info_features_count = (
len(tensor_headers) - ModelReportVisualizer.NUM_NON_FEATURE_TENSOR_HEADERS
)
channel_info_features_count = (
len(channel_headers) - ModelReportVisualizer.NUM_NON_FEATURE_CHANNEL_HEADERS
)
# see if valid tensor or channel plot
is_valid_per_tensor_plot: bool = tensor_info_features_count == 1
is_valid_per_channel_plot: bool = channel_info_features_count == 1
# offset should either be one of tensor or channel table or neither
feature_column_offset = ModelReportVisualizer.NUM_NON_FEATURE_TENSOR_HEADERS
table = tensor_table
# if a per_channel plot, we have different offset and table
if is_valid_per_channel_plot:
feature_column_offset = (
ModelReportVisualizer.NUM_NON_FEATURE_CHANNEL_HEADERS
)
table = channel_table
x_data: list = []
y_data: list[list] = []
# the feature will either be a tensor feature or channel feature
if is_valid_per_tensor_plot:
for table_row_num, row in enumerate(table):
# get x_value to append
x_val_to_append = table_row_num
# the index of the feature will the 0 + num non feature columns
tensor_feature_index = feature_column_offset
row_value = row[tensor_feature_index]
if type(row_value) is not str:
x_data.append(x_val_to_append)
y_data.append(row_value)
elif is_valid_per_channel_plot:
# gather the x_data and multiple y_data
# calculate the number of channels
num_channels: int = max(row[self.CHANNEL_NUM_INDEX] for row in table) + 1
# separate data list per channel
y_data.extend([] for _ in range(num_channels))
for table_row_num, row in enumerate(table):
# get x_value to append
x_val_to_append = table_row_num
current_channel = row[
self.CHANNEL_NUM_INDEX
] # initially chose current channel
new_module_index: int = table_row_num // num_channels
x_val_to_append = new_module_index
# the index of the feature will the 0 + num non feature columns
tensor_feature_index = feature_column_offset
row_value = row[tensor_feature_index]
if type(row_value) is not str:
# only append if new index we are appending
if len(x_data) == 0 or x_data[-1] != x_val_to_append:
x_data.append(x_val_to_append)
# append value for that channel
y_data[current_channel].append(row_value)
else:
# more than one feature was chosen
error_str = "Make sure to pick only a single feature with your filter to plot a graph."
error_str += " We recommend calling get_all_unique_feature_names() to find unique feature names."
error_str += " Pick one of those features to plot."
raise ValueError(error_str)
# return x, y values, and if data is per-channel
return (x_data, y_data, is_valid_per_channel_plot)
def generate_plot_visualization(
self, feature_filter: str, module_fqn_filter: str = ""
):
r"""
Takes in a feature and optional module_filter and plots of the desired data.
For per channel features, it averages the value across the channels and plots a point
per module. The reason for this is that for models with hundreds of channels, it can
be hard to differentiate one channel line from another, and so the point of generating
a single average point per module is to give a sense of general trends that encourage
further deep dives.
Note:
Only features in the report that have tensor value data are plottable by this class
When the tensor information is plotted, it will plot:
idx as the x val, feature value as the y_val
When the channel information is plotted, it will plot:
the first idx of each module as the x val, feature value as the y_val [for each channel]
The reason for this is that we want to be able to compare values across the
channels for same layer, and it will be hard if values are staggered by idx
This means each module is represented by only 1 x value
Args:
feature_filter (str): Filters the features presented to only those that
contain this filter substring
module_fqn_filter (str, optional): Only includes modules that contains this string
Default = "", results in all the modules in the reports to be visible in the table
Example Use:
>>> # xdoctest: +SKIP("undefined variables")
>>> mod_report_visualizer.generate_plot_visualization(
... feature_filter="per_channel_min", module_fqn_filter="block1"
... )
>>> # outputs line plot of per_channel_min information for all
>>> # modules in block1 of model each channel gets it's own line,
>>> # and it's plotted across the in-order modules on the x-axis
"""
# checks if we have matplotlib and let's user know to install it if don't
if not got_matplotlib:
print("make sure to install matplotlib and try again.")
return None
# get the x and y data and if per channel
x_data, y_data, data_per_channel = self._get_plottable_data(
feature_filter, module_fqn_filter
)
# plot based on whether data is per channel or not
ax = plt.subplot()
ax.set_ylabel(feature_filter)
ax.set_title(feature_filter + " Plot")
plt.xticks(x_data) # only show ticks for actual points
if data_per_channel:
ax.set_xlabel("First idx of module")
# set the legend as well
# plot a single line that is average of the channel values
num_modules = len(
y_data[0]
) # all y_data have same length, so get num modules
num_channels = len(
y_data
) # we want num channels to be able to calculate average later
avg_vals = [
sum(y_data[:][index]) / num_channels for index in range(num_modules)
]
# plot the three things we measured
ax.plot(
x_data, avg_vals, label=f"Average Value Across {num_channels} Channels"
)
ax.legend(loc="upper right")
else:
ax.set_xlabel("idx")
ax.plot(x_data, y_data)
# actually show the plot
plt.show()
def generate_histogram_visualization(
self, feature_filter: str, module_fqn_filter: str = "", num_bins: int = 10
):
r"""
Takes in a feature and optional module_filter and plots the histogram of desired data.
Note:
Only features in the report that have tensor value data can be viewed as a histogram
If you want to plot a histogram from all the channel values of a specific feature for
a specific model, make sure to specify both the model and the feature properly
in the filters and you should be able to see a distribution of the channel data
Args:
feature_filter (str, optional): Filters the features presented to only those that
contain this filter substring
Default = "", results in all the features being printed
module_fqn_filter (str, optional): Only includes modules that contains this string
Default = "", results in all the modules in the reports to be visible in the table
num_bins (int, optional): The number of bins to create the histogram with
Default = 10, the values will be split into 10 equal sized bins
Example Use:
>>> # xdoctest: +SKIP
>>> mod_report_visualizer.generategenerate_histogram_visualization_plot_visualization(
... feature_filter="per_channel_min", module_fqn_filter="block1"
... )
# outputs histogram of per_channel_min information for all modules in block1 of model
information is gathered across all channels for all modules in block 1 for the
per_channel_min and is displayed in a histogram of equally sized bins
"""
# checks if we have matplotlib and let's user know to install it if don't
if not got_matplotlib:
print("make sure to install matplotlib and try again.")
return None
# get the x and y data and if per channel
_x_data, y_data, data_per_channel = self._get_plottable_data(
feature_filter, module_fqn_filter
)
# for histogram, we just care about plotting the y data
# plot based on whether data is per channel or not
ax = plt.subplot()
ax.set_xlabel(feature_filter)
ax.set_ylabel("Frequency")
ax.set_title(feature_filter + " Histogram")
if data_per_channel:
# set the legend as well
# combine all the data
all_data = []
for channel_info in y_data:
all_data.extend(channel_info)
_val, bins, _ = plt.hist(
all_data,
bins=num_bins,
stacked=True,
rwidth=0.8,
)
plt.xticks(bins)
else:
_val, bins, _ = plt.hist(
y_data,
bins=num_bins,
stacked=False,
rwidth=0.8,
)
plt.xticks(bins)
plt.show()
|
ModelReportVisualizer
|
python
|
plotly__plotly.py
|
plotly/graph_objs/image/_legendgrouptitle.py
|
{
"start": 233,
"end": 2925
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "image"
_path_str = "image.legendgrouptitle"
_valid_props = {"font", "text"}
@property
def font(self):
"""
Sets this legend group's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.image.legendgrouptitle.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.image.legendgrouptitle.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def text(self):
"""
Sets the title of the legend group.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this legend group's title font.
text
Sets the title of the legend group.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Legendgrouptitle object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.image.Legendgrouptitle`
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
Legendgrouptitle
"""
super().__init__("legendgrouptitle")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.image.Legendgrouptitle
constructor must be a dict or
an instance of :class:`plotly.graph_objs.image.Legendgrouptitle`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Legendgrouptitle
|
python
|
mlflow__mlflow
|
tests/tensorflow/test_tensorflow2_autolog.py
|
{
"start": 17795,
"end": 52385
}
|
class ____:
def __init__(self, data, target, batch_size):
self.data = data
self.target = target
self.batch_size = batch_size
self.ptr = 0
def __next__(self):
if self.ptr >= len(self.data):
raise StopIteration
idx = self.ptr % len(self.data)
self.ptr += 1
return (
self.data[idx : idx + self.batch_size, 0],
self.data[idx : idx + self.batch_size, 1],
), self.target[idx : idx + self.batch_size]
def __iter__(self):
return self
@pytest.mark.parametrize(
"generate_data",
[
__tf_dataset_multi_input,
__SequenceMultiInput,
functools.partial(__generator_multi_input, np.random.rand(2, 10), np.random.rand(10)),
functools.partial(__GeneratorClassMultiInput, np.random.rand(10, 2), np.random.rand(10, 1)),
],
)
@pytest.mark.parametrize("batch_size", [5, 10])
def test_tf_keras_autolog_implicit_batch_size_works_multi_input(generate_data, batch_size):
mlflow.tensorflow.autolog()
input1 = tf.keras.Input(shape=(1,))
input2 = tf.keras.Input(shape=(1,))
concat = tf.keras.layers.Concatenate()([input1, input2])
output = tf.keras.layers.Dense(1, activation="sigmoid")(concat)
model = tf.keras.models.Model(inputs=[input1, input2], outputs=output)
model.compile(loss="mse")
# 'x' passed as arg
model.fit(generate_data(batch_size), verbose=0)
assert mlflow.last_active_run().data.params["batch_size"] == str(batch_size)
# 'x' passed as kwarg
model.fit(x=generate_data(batch_size), verbose=0)
assert mlflow.last_active_run().data.params["batch_size"] == str(batch_size)
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.1.4"),
reason="Does not support passing of generator classes as `x` in `fit`",
)
@pytest.mark.parametrize(
"generator",
[
__generator,
pytest.param(
__GeneratorClass,
marks=pytest.mark.skipif(
Version(tf.__version__).release >= (2, 15)
and "TF_USE_LEGACY_KERAS" not in os.environ,
reason="does not support",
),
),
],
)
@pytest.mark.parametrize("batch_size", [2, 3, 6])
def test_tf_keras_autolog_implicit_batch_size_for_generator_dataset_without_side_effects(
generator,
batch_size,
):
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
data = np.array([[1, 2, 3], [3, 2, 1], [2, 2, 2], [10, 20, 30], [30, 20, 10], [20, 20, 20]])
target = np.array([[1], [3], [2], [11], [13], [12]])
model = Sequential()
model.add(
Dense(
5, input_dim=3, activation="relu", kernel_initializer="zeros", bias_initializer="zeros"
)
)
model.add(Dense(1, kernel_initializer="zeros", bias_initializer="zeros"))
model.compile(loss="mae", optimizer="adam", metrics=["mse"])
mlflow.autolog()
actual_mse = model.fit(generator(data, target, batch_size), verbose=0).history["mse"][-1]
mlflow.autolog(disable=True)
expected_mse = model.fit(generator(data, target, batch_size), verbose=0).history["mse"][-1]
np.testing.assert_allclose(actual_mse, expected_mse, atol=1)
assert mlflow.last_active_run().data.params["batch_size"] == str(batch_size)
def test_tf_keras_autolog_succeeds_for_tf_datasets_lacking_batch_size_info():
X_train = np.random.rand(100, 100)
y_train = np.random.randint(0, 10, 100)
train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train))
train_ds = train_ds.batch(50)
train_ds = train_ds.cache().prefetch(buffer_size=5)
assert not hasattr(train_ds, "_batch_size")
model = tf.keras.Sequential()
model.add(tf.keras.Input((100,)))
model.add(tf.keras.layers.Dense(256, activation="relu"))
model.add(tf.keras.layers.Dropout(rate=0.4))
model.add(tf.keras.layers.Dense(10, activation="sigmoid"))
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
optimizer="Adam",
metrics=["accuracy"],
)
mlflow.tensorflow.autolog()
model.fit(train_ds, epochs=100)
assert mlflow.last_active_run().data.params["batch_size"] == "None"
def test_tf_keras_autolog_records_metrics_for_last_epoch(random_train_data, random_one_hot_labels):
num_training_epochs = 17
mlflow.tensorflow.autolog(log_every_epoch=True)
model = create_tf_keras_model()
with mlflow.start_run() as run:
model.fit(
random_train_data,
random_one_hot_labels,
epochs=num_training_epochs,
initial_epoch=0,
)
client = MlflowClient()
run_metrics = client.get_run(run.info.run_id).data.metrics
assert "accuracy" in run_metrics
all_epoch_acc = client.get_metric_history(run.info.run_id, "accuracy")
assert len(all_epoch_acc) == num_training_epochs
def test_tf_keras_autolog_logs_metrics_for_single_epoch_training(
random_train_data, random_one_hot_labels
):
"""
tf.Keras exhibits inconsistent epoch indexing behavior in comparison with other
TF2 APIs (e.g., tf.Estimator). tf.Keras uses zero-indexing for epochs,
while other APIs use one-indexing. Accordingly, this test verifies that metrics are
produced in the boundary case where a model is trained for a single epoch, ensuring
that we don't miss the zero index in the tf.Keras case.
"""
mlflow.tensorflow.autolog()
model = create_tf_keras_model()
with mlflow.start_run() as run:
model.fit(random_train_data, random_one_hot_labels, epochs=1)
client = MlflowClient()
run_metrics = client.get_run(run.info.run_id).data.metrics
assert "accuracy" in run_metrics
assert "loss" in run_metrics
def test_tf_keras_autolog_names_positional_parameters_correctly(
random_train_data, random_one_hot_labels
):
mlflow.tensorflow.autolog()
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
with mlflow.start_run():
# Pass `batch_size` as a positional argument for testing purposes
model.fit(data, labels, 8, epochs=10, steps_per_epoch=1)
run_id = mlflow.active_run().info.run_id
client = MlflowClient()
run_info = client.get_run(run_id)
assert run_info.data.params.get("batch_size") == "8"
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_model_can_load_from_artifact(tf_keras_random_data_run, random_train_data):
run, _ = tf_keras_random_data_run
client = MlflowClient()
artifacts = client.list_artifacts(run.info.run_id)
artifacts = (x.path for x in artifacts)
assert "tensorboard_logs" in artifacts
model = mlflow.tensorflow.load_model("runs:/" + run.info.run_id + "/model")
model.predict(random_train_data)
def get_tf_keras_random_data_run_with_callback(
random_train_data,
random_one_hot_labels,
callback,
restore_weights,
patience,
initial_epoch,
log_models,
):
mlflow.tensorflow.autolog(log_models=log_models)
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
if callback == "early":
# min_delta is set as such to guarantee early stopping
callback = tf.keras.callbacks.EarlyStopping(
monitor="loss",
patience=patience,
min_delta=99999999,
restore_best_weights=restore_weights,
verbose=1,
)
else:
class CustomCallback(tf.keras.callbacks.Callback):
def on_train_end(self, logs=None):
pass
callback = CustomCallback()
history = model.fit(
data, labels, epochs=initial_epoch + 10, callbacks=[callback], initial_epoch=initial_epoch
)
client = MlflowClient()
return client.get_run(client.search_runs(["0"])[0].info.run_id), history, callback
@pytest.fixture
def tf_keras_random_data_run_with_callback(
random_train_data,
random_one_hot_labels,
callback,
restore_weights,
patience,
initial_epoch,
log_models,
):
return get_tf_keras_random_data_run_with_callback(
random_train_data,
random_one_hot_labels,
callback,
restore_weights,
patience,
initial_epoch,
log_models=log_models,
)
@pytest.mark.parametrize("log_models", [True, False])
@pytest.mark.parametrize("restore_weights", [True])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [0, 1, 5])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_early_stop_logs(
tf_keras_random_data_run_with_callback, initial_epoch, log_models
):
run, history, callback = tf_keras_random_data_run_with_callback
metrics = run.data.metrics
params = run.data.params
assert "patience" in params
assert params["patience"] == str(callback.patience)
assert "monitor" in params
assert params["monitor"] == "loss"
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" in metrics
assert "restored_epoch" in metrics
restored_epoch = int(metrics["restored_epoch"])
# In this test, the best epoch is always the first epoch because the early stopping callback
# never observes a loss improvement due to an extremely large `min_delta` value
assert restored_epoch == initial_epoch
assert "loss" in history.history
client = MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check that MLflow has logged the metrics of the "best" model, in addition to per-epoch metrics
loss = history.history["loss"]
assert len(metric_history) == len(loss) + 1
steps, values = map(list, zip(*[(m.step, m.value) for m in metric_history]))
# Check that MLflow has logged the correct steps
assert steps == [*history.epoch, callback.stopped_epoch + 1]
# Check that MLflow has logged the correct metric values
np.testing.assert_allclose(values, [*loss, callback.best])
artifacts = [f.path for f in client.list_artifacts(run.info.run_id)]
assert "tensorboard_logs" in artifacts
# Check metrics are logged to the LoggedModel
if log_models:
logged_model = mlflow.last_logged_model()
assert logged_model is not None
assert {metric.key: metric.value for metric in logged_model.metrics} == metrics
@pytest.mark.parametrize("log_models", [False])
@pytest.mark.parametrize("restore_weights", [True])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [11])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_early_stop_no_stop_does_not_log(tf_keras_random_data_run_with_callback):
run, history, callback = tf_keras_random_data_run_with_callback
metrics = run.data.metrics
params = run.data.params
assert "patience" in params
assert params["patience"] == str(callback.patience)
assert "monitor" in params
assert params["monitor"] == "loss"
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" not in metrics
assert "restored_epoch" not in metrics
assert "loss" in history.history
num_of_epochs = len(history.history["loss"])
client = MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check the test epoch numbers are correct
assert num_of_epochs == 10
assert len(metric_history) == num_of_epochs
@pytest.mark.parametrize("log_models", [False])
@pytest.mark.parametrize("restore_weights", [False])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [5])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_early_stop_no_restore_doesnt_log(tf_keras_random_data_run_with_callback):
run, history, callback = tf_keras_random_data_run_with_callback
metrics = run.data.metrics
params = run.data.params
assert "patience" in params
assert params["patience"] == str(callback.patience)
assert "monitor" in params
assert params["monitor"] == "loss"
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" in metrics
assert "restored_epoch" not in metrics
assert "loss" in history.history
num_of_epochs = len(history.history["loss"])
client = MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check the test epoch numbers are correct
assert num_of_epochs == callback.patience + 1
assert len(metric_history) == num_of_epochs
@pytest.mark.parametrize("log_models", [False])
@pytest.mark.parametrize("restore_weights", [False])
@pytest.mark.parametrize("callback", ["not-early"])
@pytest.mark.parametrize("patience", [5])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_non_early_stop_callback_no_log(tf_keras_random_data_run_with_callback):
run, history = tf_keras_random_data_run_with_callback[:-1]
metrics = run.data.metrics
params = run.data.params
assert "patience" not in params
assert "monitor" not in params
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" not in metrics
assert "restored_epoch" not in metrics
assert "loss" in history.history
num_of_epochs = len(history.history["loss"])
client = MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check the test epoch numbers are correct
assert num_of_epochs == 10
assert len(metric_history) == num_of_epochs
@pytest.mark.parametrize("positional", [True, False])
def test_tf_keras_autolog_does_not_mutate_original_callbacks_list(
tmp_path, random_train_data, random_one_hot_labels, positional
):
"""
TensorFlow autologging passes new callbacks to the `fit()` / `fit_generator()` function. If
preexisting user-defined callbacks already exist, these new callbacks are added to the
user-specified ones. This test verifies that the new callbacks are added to the without
permanently mutating the original list of callbacks.
"""
mlflow.tensorflow.autolog()
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=tmp_path)
callbacks = [tensorboard_callback]
model = create_tf_keras_model()
data = random_train_data
labels = random_one_hot_labels
if positional:
model.fit(data, labels, None, 10, 1, callbacks)
else:
model.fit(data, labels, epochs=10, callbacks=callbacks)
assert len(callbacks) == 1
assert callbacks == [tensorboard_callback]
def test_tf_keras_autolog_does_not_delete_logging_directory_for_tensorboard_callback(
tmp_path, random_train_data, random_one_hot_labels
):
tensorboard_callback_logging_dir_path = str(tmp_path.joinpath("tb_logs"))
tensorboard_callback = tf.keras.callbacks.TensorBoard(
tensorboard_callback_logging_dir_path, histogram_freq=0
)
mlflow.tensorflow.autolog()
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
model.fit(data, labels, epochs=10, callbacks=[tensorboard_callback])
assert os.path.exists(tensorboard_callback_logging_dir_path)
def test_tf_keras_autolog_logs_to_and_deletes_temporary_directory_when_tensorboard_callback_absent(
tmp_path, random_train_data, random_one_hot_labels
):
from mlflow.tensorflow import _TensorBoardLogDir
mlflow.tensorflow.autolog()
mock_log_dir_inst = _TensorBoardLogDir(
location=str(tmp_path.joinpath("tb_logging")), is_temp=True
)
with patch("mlflow.tensorflow._TensorBoardLogDir", autospec=True) as mock_log_dir_class:
mock_log_dir_class.return_value = mock_log_dir_inst
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
model.fit(data, labels, epochs=10)
assert not os.path.exists(mock_log_dir_inst.location)
def get_text_vec_model(train_samples):
# Taken from: https://github.com/mlflow/mlflow/issues/3910
try:
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
except ModuleNotFoundError:
from tensorflow.keras.layers import TextVectorization
VOCAB_SIZE = 10
SEQUENCE_LENGTH = 16
EMBEDDING_DIM = 16
vectorizer_layer = TextVectorization(
max_tokens=VOCAB_SIZE,
output_mode="int",
output_sequence_length=SEQUENCE_LENGTH,
)
vectorizer_layer.adapt(train_samples)
model = tf.keras.Sequential(
[
vectorizer_layer,
tf.keras.layers.Embedding(
VOCAB_SIZE,
EMBEDDING_DIM,
name="embedding",
mask_zero=True,
),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(16, activation="relu"),
tf.keras.layers.Dense(1, activation="tanh"),
]
)
model.compile(optimizer="adam", loss="mse", metrics=["mae"])
return model
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.3.0"),
reason=(
"Deserializing a model with `TextVectorization` and `Embedding` "
"fails in tensorflow < 2.3.0. See this issue: "
"https://github.com/tensorflow/tensorflow/issues/38250."
),
)
def test_autolog_text_vec_model(tmp_path):
"""
Verifies autolog successfully saves a model that can't be saved in the H5 format
"""
mlflow.tensorflow.autolog()
train_samples = tf.convert_to_tensor(["this is an example", "another example"])
train_labels = np.array([0.4, 0.2])
model = get_text_vec_model(train_samples)
with mlflow.start_run() as run:
model.fit(train_samples, train_labels, epochs=1)
loaded_model = mlflow.tensorflow.load_model("runs:/" + run.info.run_id + "/model")
np.testing.assert_array_equal(loaded_model.predict(train_samples), model.predict(train_samples))
def test_tf_keras_model_autolog_registering_model(random_train_data, random_one_hot_labels):
registered_model_name = "test_autolog_registered_model"
mlflow.tensorflow.autolog(registered_model_name=registered_model_name)
with mlflow.start_run():
model = create_tf_keras_model()
model.fit(random_train_data, random_one_hot_labels, epochs=10)
registered_model = MlflowClient().get_registered_model(registered_model_name)
assert registered_model.name == registered_model_name
def test_fluent_autolog_with_tf_keras_logs_expected_content(
random_train_data, random_one_hot_labels
):
"""
Guards against previously-exhibited issues where using the fluent `mlflow.autolog()` API with
`tf.keras` Models did not work due to conflicting patches set by both the
`mlflow.tensorflow.autolog()` and the `mlflow.keras.autolog()` APIs.
"""
mlflow.autolog()
model = create_tf_keras_model()
with mlflow.start_run() as run:
model.fit(random_train_data, random_one_hot_labels, epochs=10)
client = MlflowClient()
run_data = client.get_run(run.info.run_id).data
assert "accuracy" in run_data.metrics
assert "epochs" in run_data.params
def test_callback_is_picklable():
cb = MlflowCallback()
pickle.dumps(cb)
tb = _TensorBoard()
pickle.dumps(tb)
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.1.0"), reason="This test requires tensorflow >= 2.1.0"
)
def test_tf_keras_autolog_distributed_training(random_train_data, random_one_hot_labels):
# Ref: https://www.tensorflow.org/tutorials/distribute/keras
mlflow.tensorflow.autolog()
with tf.distribute.MirroredStrategy().scope():
model = create_tf_keras_model()
fit_params = {"epochs": 10, "batch_size": 10}
with mlflow.start_run() as run:
model.fit(random_train_data, random_one_hot_labels, **fit_params)
client = MlflowClient()
assert client.get_run(run.info.run_id).data.params.keys() >= fit_params.keys()
def test_import_tensorflow_with_fluent_autolog_enables_tensorflow_autologging():
mlflow.autolog()
import tensorflow # noqa: F401
assert not autologging_is_disabled(mlflow.tensorflow.FLAVOR_NAME)
def _assert_autolog_infers_model_signature_correctly(input_sig_spec, output_sig_spec):
logged_model = mlflow.last_logged_model()
ml_model_path = os.path.join(logged_model.artifact_location, "MLmodel")
with open(ml_model_path) as f:
data = yaml.safe_load(f)
assert data is not None
assert "signature" in data
signature = data["signature"]
assert signature is not None
assert "inputs" in signature
assert "outputs" in signature
assert json.loads(signature["inputs"]) == input_sig_spec
assert json.loads(signature["outputs"]) == output_sig_spec
def _assert_keras_autolog_input_example_load_and_predict_with_nparray(random_train_data):
logged_model = mlflow.last_logged_model()
model_conf = Model.load(logged_model.model_uri)
input_example = _read_example(model_conf, logged_model.model_uri)
np.testing.assert_array_almost_equal(input_example, random_train_data[:5])
pyfunc_model = mlflow.pyfunc.load_model(logged_model.model_uri)
pyfunc_model.predict(input_example)
def test_keras_autolog_input_example_load_and_predict_with_nparray(
random_train_data, random_one_hot_labels
):
mlflow.tensorflow.autolog(log_input_examples=True, log_model_signatures=True)
initial_model = create_tf_keras_model()
with mlflow.start_run():
initial_model.fit(random_train_data, random_one_hot_labels)
_assert_keras_autolog_input_example_load_and_predict_with_nparray(random_train_data)
def test_keras_autolog_infers_model_signature_correctly_with_nparray(
random_train_data, random_one_hot_labels
):
mlflow.tensorflow.autolog(log_model_signatures=True)
initial_model = create_tf_keras_model()
with mlflow.start_run():
initial_model.fit(random_train_data, random_one_hot_labels)
_assert_autolog_infers_model_signature_correctly(
[{"type": "tensor", "tensor-spec": {"dtype": "float64", "shape": [-1, 4]}}],
[{"type": "tensor", "tensor-spec": {"dtype": "float32", "shape": [-1, 3]}}],
)
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.1.0"),
reason="tf.data.Dataset inputs are unsupported for input example logging in TensorFlow < 2.1.0",
)
def test_keras_autolog_input_example_load_and_predict_with_tf_dataset(fashion_mnist_tf_dataset):
mlflow.tensorflow.autolog(log_input_examples=True, log_model_signatures=True)
fashion_mnist_model = _create_fashion_mnist_model()
with mlflow.start_run():
fashion_mnist_model.fit(fashion_mnist_tf_dataset)
logged_model = mlflow.last_logged_model()
model_conf = Model.load(logged_model.model_uri)
input_example = _read_example(model_conf, logged_model.model_uri)
pyfunc_model = mlflow.pyfunc.load_model(logged_model.model_uri)
pyfunc_model.predict(input_example)
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.1.0"),
reason="tf.data.Dataset inputs are unsupported for signature logging in TensorFlow < 2.1.0",
)
def test_keras_autolog_infers_model_signature_correctly_with_tf_dataset(fashion_mnist_tf_dataset):
mlflow.tensorflow.autolog(log_model_signatures=True)
fashion_mnist_model = _create_fashion_mnist_model()
with mlflow.start_run():
fashion_mnist_model.fit(fashion_mnist_tf_dataset)
_assert_autolog_infers_model_signature_correctly(
[{"type": "tensor", "tensor-spec": {"dtype": "float64", "shape": [-1, 28, 28]}}],
[{"type": "tensor", "tensor-spec": {"dtype": "float32", "shape": [-1, 10]}}],
)
def test_keras_autolog_input_example_load_and_predict_with_dict(
random_train_dict_mapping, random_one_hot_labels
):
mlflow.tensorflow.autolog(log_input_examples=True, log_model_signatures=True)
model = _create_model_for_dict_mapping()
with mlflow.start_run():
model.fit(random_train_dict_mapping, random_one_hot_labels)
logged_model = mlflow.last_logged_model()
model_conf = Model.load(logged_model.model_uri)
input_example = _read_example(model_conf, logged_model.model_uri)
for k, v in random_train_dict_mapping.items():
np.testing.assert_array_almost_equal(input_example[k], np.take(v, range(0, 5)))
pyfunc_model = mlflow.pyfunc.load_model(logged_model.model_uri)
pyfunc_model.predict(input_example)
def test_keras_autolog_infers_model_signature_correctly_with_dict(
random_train_dict_mapping, random_one_hot_labels
):
mlflow.tensorflow.autolog(log_model_signatures=True)
model = _create_model_for_dict_mapping()
with mlflow.start_run():
model.fit(random_train_dict_mapping, random_one_hot_labels)
_assert_autolog_infers_model_signature_correctly(
[
{"name": "a", "type": "tensor", "tensor-spec": {"dtype": "float64", "shape": [-1]}},
{"name": "b", "type": "tensor", "tensor-spec": {"dtype": "float64", "shape": [-1]}},
{"name": "c", "type": "tensor", "tensor-spec": {"dtype": "float64", "shape": [-1]}},
{"name": "d", "type": "tensor", "tensor-spec": {"dtype": "float64", "shape": [-1]}},
],
[{"type": "tensor", "tensor-spec": {"dtype": "float32", "shape": [-1, 3]}}],
)
def test_keras_autolog_input_example_load_and_predict_with_keras_sequence(keras_data_gen_sequence):
mlflow.tensorflow.autolog(log_input_examples=True, log_model_signatures=True)
model = create_tf_keras_model()
with mlflow.start_run():
model.fit(keras_data_gen_sequence)
_assert_keras_autolog_input_example_load_and_predict_with_nparray(
keras_data_gen_sequence[:][0][:5]
)
def test_keras_autolog_infers_model_signature_correctly_with_keras_sequence(
keras_data_gen_sequence,
):
mlflow.tensorflow.autolog(log_model_signatures=True)
initial_model = create_tf_keras_model()
with mlflow.start_run():
initial_model.fit(keras_data_gen_sequence)
_assert_autolog_infers_model_signature_correctly(
[{"type": "tensor", "tensor-spec": {"dtype": "float64", "shape": [-1, 4]}}],
[{"type": "tensor", "tensor-spec": {"dtype": "float32", "shape": [-1, 3]}}],
)
def test_keras_autolog_load_saved_hdf5_model(keras_data_gen_sequence):
mlflow.tensorflow.autolog(keras_model_kwargs={"save_format": "h5"})
model = create_tf_keras_model()
with mlflow.start_run():
model.fit(keras_data_gen_sequence)
logged_model = mlflow.last_logged_model()
assert Path(logged_model.artifact_location, "data", "model.h5").exists()
def test_keras_autolog_logs_model_signature_by_default(keras_data_gen_sequence):
mlflow.autolog()
initial_model = create_tf_keras_model()
initial_model.fit(keras_data_gen_sequence)
logged_model = mlflow.last_logged_model()
mlmodel_path = f"{logged_model.artifact_location}/MLmodel"
with open(mlmodel_path) as f:
mlmodel_contents = yaml.safe_load(f)
assert "signature" in mlmodel_contents.keys()
signature = mlmodel_contents["signature"]
assert signature is not None
assert "inputs" in signature
assert "outputs" in signature
assert json.loads(signature["inputs"]) == [
{"type": "tensor", "tensor-spec": {"dtype": "float64", "shape": [-1, 4]}}
]
assert json.loads(signature["outputs"]) == [
{"type": "tensor", "tensor-spec": {"dtype": "float32", "shape": [-1, 3]}}
]
def test_extract_tf_keras_input_example_unsupported_type_returns_None():
from mlflow.tensorflow.autologging import extract_tf_keras_input_example
extracted_data = extract_tf_keras_input_example([1, 2, 4, 5])
assert extracted_data is None, (
"Keras input data extraction function should have "
"returned None as input type is not supported."
)
def test_extract_input_example_from_tf_input_fn_unsupported_type_returns_None():
from mlflow.tensorflow.autologging import extract_tf_keras_input_example
extracted_data = extract_tf_keras_input_example(lambda: [1, 2, 4, 5])
assert extracted_data is None, (
"Tensorflow's input_fn training data extraction should have"
" returned None as input type is not supported."
)
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.6.0"),
reason=("TensorFlow only has a hard dependency on Keras in version >= 2.6.0"),
)
def test_import_keras_model_trigger_import_tensorflow():
# This test is for guarding importing keras model will trigger importing tensorflow
# Because in Keras>=2.6, the keras autologging patching is installed by
# `mlflow.tensorflow.autolog`, suppose user enable autolog by `mlflow.autolog()`,
# and then import keras, if keras does not trigger importing tensorflow,
# then the keras autologging patching cannot be installed.
py_executable = sys.executable
_exec_cmd(
[
py_executable,
"-c",
"from keras import Model; import sys; assert 'tensorflow' in sys.modules",
]
)
def test_autolog_throw_error_on_explicit_mlflow_callback(keras_data_gen_sequence):
mlflow.tensorflow.autolog()
model = create_tf_keras_model()
with mlflow.start_run() as run:
with pytest.raises(MlflowException, match="MLflow autologging must be turned off*"):
model.fit(keras_data_gen_sequence, callbacks=[MlflowCallback(run)])
def test_autolog_correct_logging_frequency(random_train_data, random_one_hot_labels):
logging_freq = 5
num_epochs = 2
batch_size = 10
mlflow.tensorflow.autolog(log_every_epoch=False, log_every_n_steps=logging_freq)
initial_model = create_tf_keras_model()
with mlflow.start_run() as run:
initial_model.fit(
random_train_data,
random_one_hot_labels,
batch_size=batch_size,
epochs=num_epochs,
)
client = MlflowClient()
loss_history = client.get_metric_history(run.info.run_id, "loss")
assert len(loss_history) == num_epochs * (len(random_train_data) // batch_size) // logging_freq
def test_automatic_checkpoint_per_epoch_callback(random_train_data, random_one_hot_labels):
mlflow.tensorflow.autolog(
checkpoint=True,
checkpoint_monitor=None,
checkpoint_mode=None,
checkpoint_save_best_only=False,
checkpoint_save_weights_only=False,
checkpoint_save_freq="epoch",
)
model = create_tf_keras_model()
with mlflow.start_run() as run:
model.fit(random_train_data, random_one_hot_labels, epochs=1)
run_id = run.info.run_id
logged_metrics = mlflow.artifacts.load_dict(
f"runs:/{run_id}/checkpoints/epoch_0/checkpoint_metrics.json"
)
assert set(logged_metrics) == {"epoch", "loss", "accuracy", "global_step"}
assert logged_metrics["epoch"] == 0
assert logged_metrics["global_step"] == 5
pred_result = model.predict(random_train_data)
pred_result2 = load_checkpoint(run_id=run_id).predict(random_train_data)
np.testing.assert_array_almost_equal(pred_result, pred_result2)
pred_result3 = load_checkpoint(run_id=run_id, epoch=0).predict(random_train_data)
np.testing.assert_array_almost_equal(pred_result, pred_result3)
def test_automatic_checkpoint_per_epoch_save_weight_only_callback(
random_train_data, random_one_hot_labels
):
mlflow.tensorflow.autolog(
checkpoint=True,
checkpoint_monitor=None,
checkpoint_mode=None,
checkpoint_save_best_only=False,
checkpoint_save_weights_only=True,
checkpoint_save_freq="epoch",
)
model = create_tf_keras_model()
with mlflow.start_run() as run:
model.fit(random_train_data, random_one_hot_labels, epochs=1)
run_id = run.info.run_id
logged_metrics = mlflow.artifacts.load_dict(
f"runs:/{run_id}/checkpoints/epoch_0/checkpoint_metrics.json"
)
assert set(logged_metrics) == {"epoch", "loss", "accuracy", "global_step"}
assert logged_metrics["epoch"] == 0
assert logged_metrics["global_step"] == 5
model2 = create_tf_keras_model()
pred_result = model.predict(random_train_data)
pred_result2 = load_checkpoint(model=model2, run_id=run_id).predict(random_train_data)
np.testing.assert_array_almost_equal(pred_result, pred_result2)
def test_automatic_checkpoint_per_3_steps_callback(random_train_data, random_one_hot_labels):
mlflow.tensorflow.autolog(
checkpoint=True,
checkpoint_monitor=None,
checkpoint_mode=None,
checkpoint_save_best_only=False,
checkpoint_save_weights_only=False,
checkpoint_save_freq=3,
)
model = create_tf_keras_model()
with mlflow.start_run() as run:
model.fit(random_train_data, random_one_hot_labels, epochs=1)
run_id = run.info.run_id
logged_metrics = mlflow.artifacts.load_dict(
f"runs:/{run_id}/checkpoints/global_step_3/checkpoint_metrics.json"
)
assert set(logged_metrics) == {"epoch", "loss", "accuracy", "global_step"}
assert logged_metrics["epoch"] == 0
assert logged_metrics["global_step"] == 3
assert isinstance(load_checkpoint(run_id=run_id), tf.keras.Sequential)
assert isinstance(load_checkpoint(run_id=run_id, global_step=3), tf.keras.Sequential)
def test_automatic_checkpoint_per_3_steps_save_best_only_callback(
random_train_data, random_one_hot_labels
):
mlflow.tensorflow.autolog(
checkpoint=True,
checkpoint_monitor="loss",
checkpoint_mode="min",
checkpoint_save_best_only=True,
checkpoint_save_weights_only=False,
checkpoint_save_freq=3,
)
model = create_tf_keras_model()
with mlflow.start_run() as run:
model.fit(
random_train_data,
random_one_hot_labels,
epochs=1,
)
run_id = run.info.run_id
logged_metrics = mlflow.artifacts.load_dict(
f"runs:/{run_id}/checkpoints/latest_checkpoint_metrics.json"
)
assert set(logged_metrics) == {"epoch", "loss", "accuracy", "global_step"}
assert logged_metrics["epoch"] == 0
assert logged_metrics["global_step"] == 3
assert isinstance(load_checkpoint(run_id=run_id), tf.keras.Sequential)
|
__GeneratorClassMultiInput
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/destination-chroma/destination_chroma/indexer.py
|
{
"start": 684,
"end": 5108
}
|
class ____(Indexer):
def __init__(self, config: ChromaIndexingConfigModel):
super().__init__(config)
self.collection_name = config.collection_name
def check(self):
collection_name_validation_error = is_valid_collection_name(self.collection_name)
if collection_name_validation_error:
return collection_name_validation_error
auth_method = self.config.auth_method
if auth_method.mode == "persistent_client" and not auth_method.path.startswith("/local/"):
return "Path must be prefixed with /local"
client = self._get_client()
try:
heartbeat = client.heartbeat()
if not heartbeat:
return "Chroma client server is not alive"
collection = client.get_or_create_collection(name=self.collection_name)
count = collection.count()
if count != 0 and not count:
return f"unable to get or create collection with name {self.collection_name}"
return
except Exception as e:
return format_exception(e)
finally:
del client
def delete(self, delete_ids, namespace, stream):
if len(delete_ids) > 0:
self._delete_by_filter(field_name=METADATA_RECORD_ID_FIELD, field_values=delete_ids)
def index(self, document_chunks, namespace, stream):
entities = []
for i in range(len(document_chunks)):
chunk = document_chunks[i]
entities.append(
{
"id": str(uuid.uuid4()),
"embedding": chunk.embedding,
"metadata": self._normalize(chunk.metadata),
"document": chunk.page_content if chunk.page_content is not None else "",
}
)
self._write_data(entities)
def pre_sync(self, catalog: ConfiguredAirbyteCatalog) -> None:
self.client = self._get_client()
streams_to_overwrite = [
create_stream_identifier(stream.stream)
for stream in catalog.streams
if stream.destination_sync_mode == DestinationSyncMode.overwrite
]
if len(streams_to_overwrite):
self._delete_by_filter(field_name=METADATA_STREAM_FIELD, field_values=streams_to_overwrite)
def _get_client(self):
auth_method = self.config.auth_method
if auth_method.mode == "persistent_client":
path = auth_method.path
client = chromadb.PersistentClient(path=path)
return client
elif auth_method.mode == "http_client":
host = auth_method.host
port = auth_method.port
ssl = auth_method.ssl
username = auth_method.username
password = auth_method.password
if username and password:
settings = Settings(
chroma_client_auth_provider="chromadb.auth.basic.BasicAuthClientProvider",
chroma_client_auth_credentials=f"{username}:{password}",
)
client = chromadb.HttpClient(settings=settings, host=host, port=port, ssl=ssl)
else:
client = chromadb.HttpClient(host=host, port=port, ssl=ssl)
return client
return
def _delete_by_filter(self, field_name, field_values):
collection = self.client.get_collection(name=self.collection_name)
where_filter = {field_name: {"$in": field_values}}
collection.delete(where=where_filter)
def _normalize(self, metadata: dict) -> dict:
result = {}
for key, value in metadata.items():
if isinstance(value, (str, int, float, bool)):
result[key] = value
else:
# JSON encode all other types
result[key] = json.dumps(value)
return result
def _write_data(self, entities):
ids = [entity["id"] for entity in entities]
embeddings = [entity["embedding"] for entity in entities]
if not any(embeddings):
embeddings = None
metadatas = [entity["metadata"] for entity in entities]
documents = [entity["document"] for entity in entities]
collection = self.client.get_collection(name=self.collection_name)
collection.add(ids=ids, embeddings=embeddings, metadatas=metadatas, documents=documents)
|
ChromaIndexer
|
python
|
gevent__gevent
|
src/greentest/3.14/test__interpreters.py
|
{
"start": 3743,
"end": 3976
}
|
class ____(TestBase):
def test_import_in_interpreter(self):
_run_output(
_interpreters.create(),
'import _interpreters',
)
##################################
# interpreter tests
|
ModuleTests
|
python
|
weaviate__weaviate-python-client
|
weaviate/client.py
|
{
"start": 9216,
"end": 9835
}
|
class ____:
def __init__(
self,
) -> None:
raise ValueError(
"""
Python client v3 `weaviate.Client(...)` has been removed.
Upgrade your code to use Python client v4 `weaviate.WeaviateClient` connections and methods.
- For Python Client v4 usage, see: https://weaviate.io/developers/weaviate/client-libraries/python
- For code migration, see: https://weaviate.io/developers/weaviate/client-libraries/python/v3_v4_migration
If you have to use v3 code, install the v3 client and pin the v3 dependency in your requirements file: `weaviate-client>=3.26.7;<4.0.0`"""
)
|
Client
|
python
|
apache__airflow
|
providers/opensearch/tests/system/opensearch/example_opensearch.py
|
{
"start": 1613,
"end": 4310
}
|
class ____(Document):
log_group_id = Integer()
logger = Text()
message = Text()
class Index:
name = INDEX_NAME
def save(self, **kwargs):
super().save(**kwargs)
def load_connections():
# Connections needed for this example dag to finish
from airflow.models import Connection
c = Connection(
conn_id="opensearch_test", conn_type="opensearch", host="127.0.0.1", login="test", password="test"
)
envvar = f"AIRFLOW_CONN_{c.conn_id.upper()}"
os.environ[envvar] = c.get_uri()
with DAG(
dag_id=DAG_ID,
start_date=datetime(2021, 1, 1),
schedule="@once",
catchup=False,
tags=["example"],
default_args=default_args,
description="Examples of OpenSearch Operators",
) as dag:
# [START howto_operator_opensearch_create_index]
create_index = OpenSearchCreateIndexOperator(
task_id="create_index",
index_name=INDEX_NAME,
index_body={"settings": {"index": {"number_of_shards": 1}}},
)
# [END howto_operator_opensearch_create_index]
# [START howto_operator_opensearch_add_document]
add_document_by_args = OpenSearchAddDocumentOperator(
task_id="add_document_with_args",
index_name=INDEX_NAME,
doc_id=1,
document={"log_group_id": 1, "logger": "python", "message": "hello world"},
)
add_document_by_class = OpenSearchAddDocumentOperator(
task_id="add_document_by_class",
doc_class=LogDocument(log_group_id=2, logger="airflow", message="hello airflow"),
)
# [END howto_operator_opensearch_add_document]
# [START howto_operator_opensearch_query]
search_low_level = OpenSearchQueryOperator(
task_id="low_level_query",
index_name="system_test",
query={"query": {"bool": {"must": {"match": {"message": "hello world"}}}}},
)
search = Search()
search._index = [INDEX_NAME]
search_object = search.filter("term", logger="airflow").query("match", message="hello airflow")
search_high_level = OpenSearchQueryOperator(task_id="high_level_query", search_object=search_object)
# [END howto_operator_opensearch_query]
chain(create_index, add_document_by_class, add_document_by_args, search_high_level, search_low_level)
from tests_common.test_utils.watcher import watcher
# This test needs watcher in order to properly mark success/failure
# when "tearDown" task with trigger rule is part of the DAG
list(dag.tasks) >> watcher()
from tests_common.test_utils.system_tests import get_test_run # noqa: E402
# Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest)
test_run = get_test_run(dag)
|
LogDocument
|
python
|
chroma-core__chroma
|
chromadb/test/conftest.py
|
{
"start": 27770,
"end": 28126
}
|
class ____(AsyncAdminClient):
pass
@pytest.fixture(scope="function")
def api(system: System) -> Generator[ServerAPI, None, None]:
system.reset_state()
api = system.instance(ServerAPI)
if isinstance(api, AsyncFastAPI):
transformed = async_class_to_sync(api)
yield transformed
else:
yield api
|
AsyncAdminClientSync
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-the-maximum-length-of-a-good-subsequence-i.py
|
{
"start": 665,
"end": 1180
}
|
class ____(object):
def maximumLength(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
dp = [collections.defaultdict(int) for _ in xrange(k+1)]
result = [0]*(k+1)
for x in nums:
for i in reversed(xrange(k+1)):
dp[i][x] = max(dp[i][x], result[i-1] if i-1 >= 0 else 0)+1
result[i] = max(result[i], dp[i][x])
return result[k]
# Time: O(n^2 * k)
# Space: O(n * k)
# dp
|
Solution2
|
python
|
pypa__pip
|
src/pip/_vendor/urllib3/exceptions.py
|
{
"start": 1657,
"end": 2194
}
|
class ____(RequestError):
"""Raised when the maximum number of retries is exceeded.
:param pool: The connection pool
:type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
:param string url: The requested Url
:param exceptions.Exception reason: The underlying error
"""
def __init__(self, pool, url, reason=None):
self.reason = reason
message = "Max retries exceeded with url: %s (Caused by %r)" % (url, reason)
RequestError.__init__(self, pool, url, message)
|
MaxRetryError
|
python
|
openai__gym
|
gym/envs/mujoco/humanoidstandup_v4.py
|
{
"start": 109,
"end": 21674
}
|
class ____(MujocoEnv, utils.EzPickle):
"""
### Description
This environment is based on the environment introduced by Tassa, Erez and Todorov
in ["Synthesis and stabilization of complex behaviors through online trajectory optimization"](https://ieeexplore.ieee.org/document/6386025).
The 3D bipedal robot is designed to simulate a human. It has a torso (abdomen) with a
pair of legs and arms. The legs each consist of two links, and so the arms (representing the
knees and elbows respectively). The environment starts with the humanoid laying on the ground,
and then the goal of the environment is to make the humanoid standup and then keep it standing
by applying torques on the various hinges.
### Action Space
The agent take a 17-element vector for actions.
The action space is a continuous `(action, ...)` all in `[-1, 1]`, where `action`
represents the numerical torques applied at the hinge joints.
| Num | Action | Control Min | Control Max | Name (in corresponding XML file) | Joint | Unit |
| --- | ---------------------------------------------------------------------------------- | ----------- | ----------- | -------------------------------- | ----- | ------------ |
| 0 | Torque applied on the hinge in the y-coordinate of the abdomen | -0.4 | 0.4 | hip_1 (front_left_leg) | hinge | torque (N m) |
| 1 | Torque applied on the hinge in the z-coordinate of the abdomen | -0.4 | 0.4 | angle_1 (front_left_leg) | hinge | torque (N m) |
| 2 | Torque applied on the hinge in the x-coordinate of the abdomen | -0.4 | 0.4 | hip_2 (front_right_leg) | hinge | torque (N m) |
| 3 | Torque applied on the rotor between torso/abdomen and the right hip (x-coordinate) | -0.4 | 0.4 | right_hip_x (right_thigh) | hinge | torque (N m) |
| 4 | Torque applied on the rotor between torso/abdomen and the right hip (z-coordinate) | -0.4 | 0.4 | right_hip_z (right_thigh) | hinge | torque (N m) |
| 5 | Torque applied on the rotor between torso/abdomen and the right hip (y-coordinate) | -0.4 | 0.4 | right_hip_y (right_thigh) | hinge | torque (N m) |
| 6 | Torque applied on the rotor between the right hip/thigh and the right shin | -0.4 | 0.4 | right_knee | hinge | torque (N m) |
| 7 | Torque applied on the rotor between torso/abdomen and the left hip (x-coordinate) | -0.4 | 0.4 | left_hip_x (left_thigh) | hinge | torque (N m) |
| 8 | Torque applied on the rotor between torso/abdomen and the left hip (z-coordinate) | -0.4 | 0.4 | left_hip_z (left_thigh) | hinge | torque (N m) |
| 9 | Torque applied on the rotor between torso/abdomen and the left hip (y-coordinate) | -0.4 | 0.4 | left_hip_y (left_thigh) | hinge | torque (N m) |
| 10 | Torque applied on the rotor between the left hip/thigh and the left shin | -0.4 | 0.4 | left_knee | hinge | torque (N m) |
| 11 | Torque applied on the rotor between the torso and right upper arm (coordinate -1) | -0.4 | 0.4 | right_shoulder1 | hinge | torque (N m) |
| 12 | Torque applied on the rotor between the torso and right upper arm (coordinate -2) | -0.4 | 0.4 | right_shoulder2 | hinge | torque (N m) |
| 13 | Torque applied on the rotor between the right upper arm and right lower arm | -0.4 | 0.4 | right_elbow | hinge | torque (N m) |
| 14 | Torque applied on the rotor between the torso and left upper arm (coordinate -1) | -0.4 | 0.4 | left_shoulder1 | hinge | torque (N m) |
| 15 | Torque applied on the rotor between the torso and left upper arm (coordinate -2) | -0.4 | 0.4 | left_shoulder2 | hinge | torque (N m) |
| 16 | Torque applied on the rotor between the left upper arm and left lower arm | -0.4 | 0.4 | left_elbow | hinge | torque (N m) |
### Observation Space
The state space consists of positional values of different body parts of the Humanoid,
followed by the velocities of those individual parts (their derivatives) with all the positions ordered before all the velocities.
**Note:** The x- and y-coordinates of the torso are being omitted to produce position-agnostic behavior in policies
The observation is a `ndarray` with shape `(376,)` where the elements correspond to the following:
| Num | Observation | Min | Max | Name (in corresponding XML file) | Joint | Unit |
| --- | --------------------------------------------------------------------------------------------------------------- | ---- | --- | -------------------------------- | ----- | -------------------------- |
| 0 | z-coordinate of the torso (centre) | -Inf | Inf | root | free | position (m) |
| 1 | x-orientation of the torso (centre) | -Inf | Inf | root | free | angle (rad) |
| 2 | y-orientation of the torso (centre) | -Inf | Inf | root | free | angle (rad) |
| 3 | z-orientation of the torso (centre) | -Inf | Inf | root | free | angle (rad) |
| 4 | w-orientation of the torso (centre) | -Inf | Inf | root | free | angle (rad) |
| 5 | z-angle of the abdomen (in lower_waist) | -Inf | Inf | abdomen_z | hinge | angle (rad) |
| 6 | y-angle of the abdomen (in lower_waist) | -Inf | Inf | abdomen_y | hinge | angle (rad) |
| 7 | x-angle of the abdomen (in pelvis) | -Inf | Inf | abdomen_x | hinge | angle (rad) |
| 8 | x-coordinate of angle between pelvis and right hip (in right_thigh) | -Inf | Inf | right_hip_x | hinge | angle (rad) |
| 9 | z-coordinate of angle between pelvis and right hip (in right_thigh) | -Inf | Inf | right_hip_z | hinge | angle (rad) |
| 10 | y-coordinate of angle between pelvis and right hip (in right_thigh) | -Inf | Inf | right_hip_y | hinge | angle (rad) |
| 11 | angle between right hip and the right shin (in right_knee) | -Inf | Inf | right_knee | hinge | angle (rad) |
| 12 | x-coordinate of angle between pelvis and left hip (in left_thigh) | -Inf | Inf | left_hip_x | hinge | angle (rad) |
| 13 | z-coordinate of angle between pelvis and left hip (in left_thigh) | -Inf | Inf | left_hip_z | hinge | angle (rad) |
| 14 | y-coordinate of angle between pelvis and left hip (in left_thigh) | -Inf | Inf | left_hip_y | hinge | angle (rad) |
| 15 | angle between left hip and the left shin (in left_knee) | -Inf | Inf | left_knee | hinge | angle (rad) |
| 16 | coordinate-1 (multi-axis) angle between torso and right arm (in right_upper_arm) | -Inf | Inf | right_shoulder1 | hinge | angle (rad) |
| 17 | coordinate-2 (multi-axis) angle between torso and right arm (in right_upper_arm) | -Inf | Inf | right_shoulder2 | hinge | angle (rad) |
| 18 | angle between right upper arm and right_lower_arm | -Inf | Inf | right_elbow | hinge | angle (rad) |
| 19 | coordinate-1 (multi-axis) angle between torso and left arm (in left_upper_arm) | -Inf | Inf | left_shoulder1 | hinge | angle (rad) |
| 20 | coordinate-2 (multi-axis) angle between torso and left arm (in left_upper_arm) | -Inf | Inf | left_shoulder2 | hinge | angle (rad) |
| 21 | angle between left upper arm and left_lower_arm | -Inf | Inf | left_elbow | hinge | angle (rad) |
| 22 | x-coordinate velocity of the torso (centre) | -Inf | Inf | root | free | velocity (m/s) |
| 23 | y-coordinate velocity of the torso (centre) | -Inf | Inf | root | free | velocity (m/s) |
| 24 | z-coordinate velocity of the torso (centre) | -Inf | Inf | root | free | velocity (m/s) |
| 25 | x-coordinate angular velocity of the torso (centre) | -Inf | Inf | root | free | anglular velocity (rad/s) |
| 26 | y-coordinate angular velocity of the torso (centre) | -Inf | Inf | root | free | anglular velocity (rad/s) |
| 27 | z-coordinate angular velocity of the torso (centre) | -Inf | Inf | root | free | anglular velocity (rad/s) |
| 28 | z-coordinate of angular velocity of the abdomen (in lower_waist) | -Inf | Inf | abdomen_z | hinge | anglular velocity (rad/s) |
| 29 | y-coordinate of angular velocity of the abdomen (in lower_waist) | -Inf | Inf | abdomen_y | hinge | anglular velocity (rad/s) |
| 30 | x-coordinate of angular velocity of the abdomen (in pelvis) | -Inf | Inf | abdomen_x | hinge | aanglular velocity (rad/s) |
| 31 | x-coordinate of the angular velocity of the angle between pelvis and right hip (in right_thigh) | -Inf | Inf | right_hip_x | hinge | anglular velocity (rad/s) |
| 32 | z-coordinate of the angular velocity of the angle between pelvis and right hip (in right_thigh) | -Inf | Inf | right_hip_z | hinge | anglular velocity (rad/s) |
| 33 | y-coordinate of the angular velocity of the angle between pelvis and right hip (in right_thigh) | -Inf | Inf | right_hip_y | hinge | anglular velocity (rad/s) |
| 35 | angular velocity of the angle between right hip and the right shin (in right_knee) | -Inf | Inf | right_knee | hinge | anglular velocity (rad/s) |
| 36 | x-coordinate of the angular velocity of the angle between pelvis and left hip (in left_thigh) | -Inf | Inf | left_hip_x | hinge | anglular velocity (rad/s) |
| 37 | z-coordinate of the angular velocity of the angle between pelvis and left hip (in left_thigh) | -Inf | Inf | left_hip_z | hinge | anglular velocity (rad/s) |
| 38 | y-coordinate of the angular velocity of the angle between pelvis and left hip (in left_thigh) | -Inf | Inf | left_hip_y | hinge | anglular velocity (rad/s) |
| 39 | angular velocity of the angle between left hip and the left shin (in left_knee) | -Inf | Inf | left_knee | hinge | anglular velocity (rad/s) |
| 40 | coordinate-1 (multi-axis) of the angular velocity of the angle between torso and right arm (in right_upper_arm) | -Inf | Inf | right_shoulder1 | hinge | anglular velocity (rad/s) |
| 41 | coordinate-2 (multi-axis) of the angular velocity of the angle between torso and right arm (in right_upper_arm) | -Inf | Inf | right_shoulder2 | hinge | anglular velocity (rad/s) |
| 42 | angular velocity of the angle between right upper arm and right_lower_arm | -Inf | Inf | right_elbow | hinge | anglular velocity (rad/s) |
| 43 | coordinate-1 (multi-axis) of the angular velocity of the angle between torso and left arm (in left_upper_arm) | -Inf | Inf | left_shoulder1 | hinge | anglular velocity (rad/s) |
| 44 | coordinate-2 (multi-axis) of the angular velocity of the angle between torso and left arm (in left_upper_arm) | -Inf | Inf | left_shoulder2 | hinge | anglular velocity (rad/s) |
| 45 | angular velocitty of the angle between left upper arm and left_lower_arm | -Inf | Inf | left_elbow | hinge | anglular velocity (rad/s) |
Additionally, after all the positional and velocity based values in the table,
the state_space consists of (in order):
- *cinert:* Mass and inertia of a single rigid body relative to the center of mass
(this is an intermediate result of transition). It has shape 14*10 (*nbody * 10*)
and hence adds to another 140 elements in the state space.
- *cvel:* Center of mass based velocity. It has shape 14 * 6 (*nbody * 6*) and hence
adds another 84 elements in the state space
- *qfrc_actuator:* Constraint force generated as the actuator force. This has shape
`(23,)` *(nv * 1)* and hence adds another 23 elements to the state space.
- *cfrc_ext:* This is the center of mass based external force on the body. It has shape
14 * 6 (*nbody * 6*) and hence adds to another 84 elements in the state space.
where *nbody* stands for the number of bodies in the robot and *nv* stands for the number
of degrees of freedom (*= dim(qvel)*)
The (x,y,z) coordinates are translational DOFs while the orientations are rotational
DOFs expressed as quaternions. One can read more about free joints on the
[Mujoco Documentation](https://mujoco.readthedocs.io/en/latest/XMLreference.html).
**Note:** HumanoidStandup-v4 environment no longer has the following contact forces issue.
If using previous HumanoidStandup versions from v4, there have been reported issues that using a Mujoco-Py version > 2.0 results
in the contact forces always being 0. As such we recommend to use a Mujoco-Py version < 2.0
when using the Humanoid environment if you would like to report results with contact forces
(if contact forces are not used in your experiments, you can use version > 2.0).
### Rewards
The reward consists of three parts:
- *uph_cost*: A reward for moving upward (in an attempt to stand up). This is not a relative
reward which measures how much upward it has moved from the last timestep, but it is an
absolute reward which measures how much upward the Humanoid has moved overall. It is
measured as *(z coordinate after action - 0)/(atomic timestep)*, where *z coordinate after
action* is index 0 in the state/index 2 in the table, and *atomic timestep* is the time for
one frame of movement even though the simulation has a framerate of 5 (done in order to inflate
rewards a little for faster learning).
- *quad_ctrl_cost*: A negative reward for penalising the humanoid if it has too large of
a control force. If there are *nu* actuators/controls, then the control has shape `nu x 1`.
It is measured as *0.1 **x** sum(control<sup>2</sup>)*.
- *quad_impact_cost*: A negative reward for penalising the humanoid if the external
contact force is too large. It is calculated as *min(0.5 * 0.000001 * sum(external
contact force<sup>2</sup>), 10)*.
The total reward returned is ***reward*** *=* *uph_cost + 1 - quad_ctrl_cost - quad_impact_cost*
### Starting State
All observations start in state
(0.0, 0.0, 0.105, 1.0, 0.0 ... 0.0) with a uniform noise in the range of
[-0.01, 0.01] added to the positional and velocity values (values in the table)
for stochasticity. Note that the initial z coordinate is intentionally selected
to be low, thereby indicating a laying down humanoid. The initial orientation is
designed to make it face forward as well.
### Episode End
The episode ends when any of the following happens:
1. Truncation: The episode duration reaches a 1000 timesteps
2. Termination: Any of the state space values is no longer finite
### Arguments
No additional arguments are currently supported.
```
env = gym.make('HumanoidStandup-v4')
```
There is no v3 for HumanoidStandup, unlike the robot environments where a v3 and
beyond take gym.make kwargs such as xml_file, ctrl_cost_weight, reset_noise_scale etc.
### Version History
* v4: all mujoco environments now use the mujoco bindings in mujoco>=2.1.3
* v3: support for gym.make kwargs such as xml_file, ctrl_cost_weight, reset_noise_scale etc. rgb rendering comes from tracking camera (so agent does not run away from screen)
* v2: All continuous control environments now use mujoco_py >= 1.50
* v1: max_time_steps raised to 1000 for robot based tasks. Added reward_threshold to environments.
* v0: Initial versions release (1.0.0)
"""
metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
],
"render_fps": 67,
}
def __init__(self, **kwargs):
observation_space = Box(
low=-np.inf, high=np.inf, shape=(376,), dtype=np.float64
)
MujocoEnv.__init__(
self,
"humanoidstandup.xml",
5,
observation_space=observation_space,
**kwargs
)
utils.EzPickle.__init__(self, **kwargs)
def _get_obs(self):
data = self.data
return np.concatenate(
[
data.qpos.flat[2:],
data.qvel.flat,
data.cinert.flat,
data.cvel.flat,
data.qfrc_actuator.flat,
data.cfrc_ext.flat,
]
)
def step(self, a):
self.do_simulation(a, self.frame_skip)
pos_after = self.data.qpos[2]
data = self.data
uph_cost = (pos_after - 0) / self.model.opt.timestep
quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()
quad_impact_cost = 0.5e-6 * np.square(data.cfrc_ext).sum()
quad_impact_cost = min(quad_impact_cost, 10)
reward = uph_cost - quad_ctrl_cost - quad_impact_cost + 1
if self.render_mode == "human":
self.render()
return (
self._get_obs(),
reward,
False,
False,
dict(
reward_linup=uph_cost,
reward_quadctrl=-quad_ctrl_cost,
reward_impact=-quad_impact_cost,
),
)
def reset_model(self):
c = 0.01
self.set_state(
self.init_qpos + self.np_random.uniform(low=-c, high=c, size=self.model.nq),
self.init_qvel
+ self.np_random.uniform(
low=-c,
high=c,
size=self.model.nv,
),
)
return self._get_obs()
def viewer_setup(self):
assert self.viewer is not None
self.viewer.cam.trackbodyid = 1
self.viewer.cam.distance = self.model.stat.extent * 1.0
self.viewer.cam.lookat[2] = 0.8925
self.viewer.cam.elevation = -20
|
HumanoidStandupEnv
|
python
|
ansible__ansible
|
test/lib/ansible_test/_util/controller/sanity/yamllint/yamllinter.py
|
{
"start": 675,
"end": 2320
}
|
class ____(SafeConstructor):
"""Yaml Safe Constructor that knows about Ansible tags."""
def construct_yaml_unsafe(self, node):
"""Construct an unsafe tag."""
return self._resolve_and_construct_object(node)
def construct_yaml_vault(self, node):
"""Construct a vault tag."""
ciphertext = self._resolve_and_construct_object(node)
if not isinstance(ciphertext, str):
raise ConstructorError(problem=f"the {node.tag!r} tag requires a string value", problem_mark=node.start_mark)
return ciphertext
def _resolve_and_construct_object(self, node):
# use a copied node to avoid mutating existing node and tripping the recursion check in construct_object
copied_node = copy.copy(node)
# repeat implicit resolution process to determine the proper tag for the value in the unsafe node
copied_node.tag = t.cast(BaseResolver, self).resolve(type(node), node.value, (True, False)) # pylint: disable=no-member
# re-entrant call using the correct tag
# non-deferred construction of hierarchical nodes so the result is a fully realized object, and so our stateful unsafe propagation behavior works
return self.construct_object(copied_node, deep=True)
TestConstructor.add_constructor(
'!unsafe',
TestConstructor.construct_yaml_unsafe) # type: ignore[type-var]
TestConstructor.add_constructor(
'!vault',
TestConstructor.construct_yaml_vault) # type: ignore[type-var]
TestConstructor.add_constructor(
'!vault-encrypted',
TestConstructor.construct_yaml_vault) # type: ignore[type-var]
|
TestConstructor
|
python
|
wandb__wandb
|
wandb/apis/paginator.py
|
{
"start": 621,
"end": 720
}
|
class ____(Protocol):
def execute(self, *args: Any, **kwargs: Any) -> dict[str, Any]: ...
|
_Client
|
python
|
ray-project__ray
|
python/ray/train/v2/_internal/execution/storage.py
|
{
"start": 1238,
"end": 10599
}
|
class ____(LocalFileSystem):
"""LocalFileSystem wrapper to exclude files according to patterns.
Args:
root_path: Root path to strip when matching with the exclude pattern.
Ex: root_path="/tmp/a/b/c", exclude=["*a*"], will exclude
/tmp/a/b/c/_a_.txt but not ALL of /tmp/a/*.
exclude: List of patterns that are applied to files returned by
``self.find()``. If a file path matches this pattern, it will
be excluded.
"""
def __init__(self, root_path: Path, exclude: List[str], **kwargs):
super().__init__(**kwargs)
self._exclude = exclude
self._root_path = root_path
@property
def fsid(self):
return "_excluding_local"
def _should_exclude(self, path: str) -> bool:
"""Return True if `path` (relative to `root_path`) matches any of the
`self._exclude` patterns."""
path = Path(path)
relative_path = path.relative_to(self._root_path).as_posix()
match_candidates = [relative_path]
if path.is_dir():
# Everything is in posix path format ('/')
match_candidates.append(relative_path + "/")
for excl in self._exclude:
if any(fnmatch.fnmatch(candidate, excl) for candidate in match_candidates):
return True
return False
def find(self, path, maxdepth=None, withdirs=False, detail=False, **kwargs):
"""Call parent find() and exclude from result."""
paths = super().find(
path, maxdepth=maxdepth, withdirs=withdirs, detail=detail, **kwargs
)
if detail:
return {
path: out
for path, out in paths.items()
if not self._should_exclude(path)
}
else:
return [path for path in paths if not self._should_exclude(path)]
def _pyarrow_fs_copy_files(
source, destination, source_filesystem=None, destination_filesystem=None, **kwargs
):
if isinstance(destination_filesystem, pyarrow.fs.S3FileSystem):
# Workaround multi-threading issue with pyarrow. Note that use_threads=True
# is safe for download, just not for uploads, see:
# https://github.com/apache/arrow/issues/32372
kwargs.setdefault("use_threads", False)
# Use a large chunk size to speed up large checkpoint transfers.
kwargs.setdefault("chunk_size", 64 * 1024 * 1024)
return pyarrow.fs.copy_files(
source,
destination,
source_filesystem=source_filesystem,
destination_filesystem=destination_filesystem,
**kwargs,
)
# TODO(justinvyu): Add unit tests for all these utils.
def delete_fs_path(fs: pyarrow.fs.FileSystem, fs_path: str):
"""Deletes (fs, fs_path) or raises FileNotFoundError if it doesn't exist."""
is_dir = _is_directory(fs, fs_path)
try:
if is_dir:
fs.delete_dir(fs_path)
else:
fs.delete_file(fs_path)
except Exception:
logger.exception(f"Caught exception when deleting path at ({fs}, {fs_path}):")
def _download_from_fs_path(
fs: pyarrow.fs.FileSystem,
fs_path: str,
local_path: str,
filelock: bool = True,
):
"""Downloads a directory or file from (fs, fs_path) to a local path.
If fs_path points to a directory:
- The full directory contents are downloaded directly into `local_path`,
rather than to a subdirectory of `local_path`.
If fs_path points to a file:
- The file is downloaded to `local_path`, which is expected to be a file path.
If the download fails, the `local_path` contents are
cleaned up before raising, if the directory did not previously exist.
NOTE: This method creates `local_path`'s parent directories if they do not
already exist. If the download fails, this does NOT clean up all the parent
directories that were created.
Args:
fs: The filesystem to download from.
fs_path: The filesystem path (either a directory or a file) to download.
local_path: The local path to download to.
filelock: Whether to require a file lock before downloading, useful for
multiple downloads to the same directory that may be happening in parallel.
Raises:
FileNotFoundError: if (fs, fs_path) doesn't exist.
"""
_local_path = Path(local_path).resolve()
exists_before = _local_path.exists()
if _is_directory(fs=fs, fs_path=fs_path):
_local_path.mkdir(parents=True, exist_ok=True)
else:
_local_path.parent.mkdir(parents=True, exist_ok=True)
try:
if filelock:
with TempFileLock(f"{os.path.normpath(local_path)}.lock"):
_pyarrow_fs_copy_files(fs_path, local_path, source_filesystem=fs)
else:
_pyarrow_fs_copy_files(fs_path, local_path, source_filesystem=fs)
except Exception as e:
# Clean up the directory if downloading was unsuccessful
if not exists_before:
shutil.rmtree(local_path, ignore_errors=True)
raise e
def _upload_to_fs_path(
local_path: str,
fs: pyarrow.fs.FileSystem,
fs_path: str,
exclude: Optional[List[str]] = None,
) -> None:
"""Uploads a local directory or file to (fs, fs_path).
NOTE: This will create all necessary parent directories at the destination.
Args:
local_path: The local path to upload.
fs: The filesystem to upload to.
fs_path: The filesystem path where the dir/file will be uploaded to.
exclude: A list of filename matches to exclude from upload. This includes
all files under subdirectories as well.
This pattern will match with the relative paths of all files under
`local_path`.
Ex: ["*.png"] to exclude all .png images.
"""
if not exclude:
# TODO(justinvyu): uploading a single file doesn't work
# (since we always create a directory at fs_path)
_create_directory(fs=fs, fs_path=fs_path)
_pyarrow_fs_copy_files(local_path, fs_path, destination_filesystem=fs)
return
_upload_to_uri_with_exclude_fsspec(
local_path=local_path, fs=fs, fs_path=fs_path, exclude=exclude
)
def _upload_to_uri_with_exclude_fsspec(
local_path: str, fs: "pyarrow.fs", fs_path: str, exclude: Optional[List[str]]
) -> None:
local_fs = _ExcludingLocalFilesystem(root_path=local_path, exclude=exclude)
handler = pyarrow.fs.FSSpecHandler(local_fs)
source_fs = pyarrow.fs.PyFileSystem(handler)
_create_directory(fs=fs, fs_path=fs_path)
_pyarrow_fs_copy_files(
local_path, fs_path, source_filesystem=source_fs, destination_filesystem=fs
)
def _list_at_fs_path(
fs: pyarrow.fs.FileSystem,
fs_path: str,
file_filter: Callable[[pyarrow.fs.FileInfo], bool] = lambda x: True,
) -> List[str]:
"""Returns the list of filenames at (fs, fs_path), similar to os.listdir.
If the path doesn't exist, returns an empty list.
"""
selector = pyarrow.fs.FileSelector(fs_path, allow_not_found=True, recursive=False)
return [
os.path.relpath(file_info.path.lstrip("/"), start=fs_path.lstrip("/"))
for file_info in fs.get_file_info(selector)
if file_filter(file_info)
]
def _exists_at_fs_path(fs: pyarrow.fs.FileSystem, fs_path: str) -> bool:
"""Returns True if (fs, fs_path) exists."""
valid = fs.get_file_info(fs_path)
return valid.type != pyarrow.fs.FileType.NotFound
def _is_directory(fs: pyarrow.fs.FileSystem, fs_path: str) -> bool:
"""Checks if (fs, fs_path) is a directory or a file.
Raises:
FileNotFoundError: if (fs, fs_path) doesn't exist.
"""
file_info = fs.get_file_info(fs_path)
if file_info.type == pyarrow.fs.FileType.NotFound:
raise FileNotFoundError(f"Path not found: ({fs}, {fs_path})")
return not file_info.is_file
def _create_directory(fs: pyarrow.fs.FileSystem, fs_path: str) -> None:
"""Create directory at (fs, fs_path).
Some external filesystems require directories to already exist, or at least
the `netloc` to be created (e.g. PyArrows ``mock://`` filesystem).
Generally this should be done before and outside of Ray applications. This
utility is thus primarily used in testing, e.g. of ``mock://` URIs.
"""
try:
fs.create_dir(fs_path)
except Exception:
logger.exception(
f"Caught exception when creating directory at ({fs}, {fs_path}):"
)
def get_fs_and_path(
storage_path: Union[str, os.PathLike],
storage_filesystem: Optional[pyarrow.fs.FileSystem] = None,
) -> Tuple[pyarrow.fs.FileSystem, str]:
"""Returns the fs and path from a storage path and an optional custom fs.
Args:
storage_path: A storage path or URI. (ex: s3://bucket/path or /tmp/ray_results)
storage_filesystem: A custom filesystem to use. If not provided,
this will be auto-resolved by pyarrow. If provided, the storage_path
is assumed to be prefix-stripped already, and must be a valid path
on the filesystem.
"""
storage_path = str(storage_path)
if storage_filesystem:
return storage_filesystem, storage_path
return pyarrow.fs.FileSystem.from_uri(storage_path)
@DeveloperAPI
|
_ExcludingLocalFilesystem
|
python
|
tensorflow__tensorflow
|
tensorflow/tools/compatibility/all_renames_v2_test.py
|
{
"start": 888,
"end": 1219
}
|
class ____(test_util.TensorFlowTestCase):
def test_no_identity_renames(self):
identity_renames = [
old_name
for old_name, new_name in all_renames_v2.symbol_renames.items()
if old_name == new_name
]
self.assertEmpty(identity_renames)
if __name__ == "__main__":
test_lib.main()
|
AllRenamesV2Test
|
python
|
eriklindernoren__ML-From-Scratch
|
mlfromscratch/deep_learning/layers.py
|
{
"start": 17213,
"end": 18978
}
|
class ____(Layer):
"""Adds rows and columns of constant values to the input.
Expects the input to be of shape (batch_size, channels, height, width)
Parameters:
-----------
padding: tuple
The amount of padding along the height and width dimension of the input.
If (pad_h, pad_w) the same symmetric padding is applied along height and width dimension.
If ((pad_h0, pad_h1), (pad_w0, pad_w1)) the specified padding is added to beginning and end of
the height and width dimension.
padding_value: int or tuple
The value the is added as padding.
"""
def __init__(self, padding, padding_value=0):
self.padding = padding
self.trainable = True
if not isinstance(padding[0], tuple):
self.padding = ((padding[0], padding[0]), padding[1])
if not isinstance(padding[1], tuple):
self.padding = (self.padding[0], (padding[1], padding[1]))
self.padding_value = padding_value
def forward_pass(self, X, training=True):
output = np.pad(X,
pad_width=((0,0), (0,0), self.padding[0], self.padding[1]),
mode="constant",
constant_values=self.padding_value)
return output
def backward_pass(self, accum_grad):
pad_top, pad_left = self.padding[0][0], self.padding[1][0]
height, width = self.input_shape[1], self.input_shape[2]
accum_grad = accum_grad[:, :, pad_top:pad_top+height, pad_left:pad_left+width]
return accum_grad
def output_shape(self):
new_height = self.input_shape[1] + np.sum(self.padding[0])
new_width = self.input_shape[2] + np.sum(self.padding[1])
return (self.input_shape[0], new_height, new_width)
|
ConstantPadding2D
|
python
|
py-pdf__pypdf
|
pypdf/_protocols.py
|
{
"start": 1432,
"end": 1714
}
|
class ____(PdfCommonDocProtocol, Protocol):
@property
@abstractmethod
def xref(self) -> dict[int, dict[int, Any]]:
... # pragma: no cover
@property
@abstractmethod
def trailer(self) -> dict[str, Any]:
... # pragma: no cover
|
PdfReaderProtocol
|
python
|
apache__airflow
|
providers/edge3/src/airflow/providers/edge3/worker_api/datamodels.py
|
{
"start": 3712,
"end": 4027
}
|
class ____(BaseModel):
"""Queues that a worker supports to run jobs on."""
queues: Annotated[
list[str] | None,
Field(
None,
description="List of queues the worker is pulling jobs from. If not provided, worker pulls from all queues.",
),
]
|
WorkerQueuesBase
|
python
|
google__jax
|
tests/lax_numpy_indexing_test.py
|
{
"start": 1603,
"end": 18044
}
|
class ____(typing.NamedTuple):
shape: tuple[int, ...]
indexer: Any
out_shape: tuple[int, ...] | None = None
def check_grads(f, args, order, atol=None, rtol=None, eps=None):
# TODO(mattjj,dougalm): add higher-order check
default_tol = 1e-6 if config.enable_x64.value else 1e-2
atol = atol or default_tol
rtol = rtol or default_tol
eps = eps or default_tol
jtu.check_jvp(f, partial(jax.jvp, f), args, atol, rtol, eps)
jtu.check_vjp(f, partial(jax.vjp, f), args, atol, rtol, eps)
STATIC_INDEXING_TESTS = [
("OneIntIndex", [
IndexSpec(shape=(3,), indexer=1, out_shape=()),
IndexSpec(shape=(3, 3), indexer=0, out_shape=(3,)),
IndexSpec(shape=(3, 4, 5), indexer=2, out_shape=(4, 5)),
IndexSpec(shape=(3,), indexer=-1, out_shape=()),
IndexSpec(shape=(3,), indexer=-2, out_shape=()),
]),
("TwoIntIndices", [
IndexSpec(shape=(3, 3), indexer=(2, 1), out_shape=()),
IndexSpec(shape=(3, 4, 5), indexer=(1, 2), out_shape=(5,)),
IndexSpec(shape=(3, 4, 5), indexer=(-1, 2), out_shape=(5,)),
]),
("ThreeIntIndices", [
IndexSpec(shape=(3, 4, 5), indexer=(1, 2, 3), out_shape=()),
]),
("OneSliceIndex", [
IndexSpec(shape=(10,), indexer=slice(1, 3), out_shape=(2,)),
IndexSpec(shape=(10,), indexer=slice(1, -1), out_shape=(8,)),
IndexSpec(shape=(10,), indexer=slice(None, -1), out_shape=(9,)),
IndexSpec(shape=(10,), indexer=slice(None, None, None), out_shape=(10,)),
IndexSpec(shape=(10, 8), indexer=slice(1, 3), out_shape=(2, 8)),
IndexSpec(shape=(10, 8), indexer=slice(1, None), out_shape=(9, 8)),
IndexSpec(shape=(10, 8), indexer=slice(None, 3), out_shape=(3, 8)),
IndexSpec(shape=(10, 8), indexer=slice(-3, None), out_shape=(3, 8)),
]),
("OneSliceIndexNegativeStride", [
IndexSpec(shape=(10,), indexer=slice(3, 1, -1), out_shape=(2,)),
IndexSpec(shape=(10,), indexer=slice(1, 8, -1), out_shape=(0,)),
IndexSpec(shape=(10,), indexer=slice(None, 1, -2), out_shape=(4,)),
IndexSpec(shape=(10,), indexer=slice(None, None, -1), out_shape=(10,)),
IndexSpec(shape=(10, 8), indexer=slice(3, 1, -1), out_shape=(2, 8)),
IndexSpec(shape=(10, 8), indexer=slice(0, 8, -1), out_shape=(0, 8)),
IndexSpec(shape=(10, 8), indexer=slice(None, None, -1), out_shape=(10, 8)),
]),
("SliceIndexClamping", [
IndexSpec(shape=(10,), indexer=slice(2, 11, 1), out_shape=(8,)),
IndexSpec(shape=(10,), indexer=slice(11, 12, 1), out_shape=(0,)),
IndexSpec(shape=(10,), indexer=slice(-11, -2, 1), out_shape=(8,)),
IndexSpec(shape=(10,), indexer=slice(-2, -12, -1), out_shape=(9,)),
IndexSpec(shape=(10,), indexer=slice(12, -12, -1), out_shape=(10,)),
]),
("OneSliceIndexNonUnitStride", [
IndexSpec(shape=(10,), indexer=slice(0, 8, 2), out_shape=(4,)),
IndexSpec(shape=(10,), indexer=slice(0, 8, 3), out_shape=(3,)),
IndexSpec(shape=(10,), indexer=slice(1, 3, 2), out_shape=(1,)),
IndexSpec(shape=(10,), indexer=slice(1, None, 2), out_shape=(5,)),
IndexSpec(shape=(10,), indexer=slice(None, 1, -2), out_shape=(4,)),
IndexSpec(shape=(10, 8), indexer=slice(1, 8, 3), out_shape=(3, 8)),
IndexSpec(shape=(10, 8), indexer=slice(None, None, 2), out_shape=(5, 8)),
IndexSpec(shape=(10, 8), indexer=slice(None, 1, -2), out_shape=(4, 8)),
IndexSpec(shape=(10, 8), indexer=slice(None, None, -2), out_shape=(5, 8)),
]),
("TwoSliceIndices", [
IndexSpec(shape=(10, 8), indexer=(slice(1, 3), slice(0, 2)),
out_shape=(2, 2)),
IndexSpec(shape=(10, 8), indexer=(slice(1, None), slice(None, 2)),
out_shape=(9, 2)),
IndexSpec(shape=(10, 8), indexer=(slice(None, None, -1), slice(None, 2)),
out_shape=(10, 2)),
IndexSpec(shape=(10, 8, 3), indexer=(slice(1, 3), slice(0, 2)),
out_shape=(2, 2, 3)),
IndexSpec(shape=(10, 8, 3), indexer=(slice(1, 3), slice(0, None)),
out_shape=(2, 8, 3)),
IndexSpec(shape=(10, 8, 3), indexer=(slice(1, None), slice(0, 2)),
out_shape=(9, 2, 3)),
]),
("OneColonIndex", [
IndexSpec(shape=(3,), indexer=slice(None), out_shape=(3,)),
IndexSpec(shape=(3, 4), indexer=slice(None), out_shape=(3, 4)),
]),
("MultipleColonIndices", [
IndexSpec(shape=(3, 4), indexer=(slice(None), slice(None)),
out_shape=(3, 4)),
IndexSpec(shape=(3, 4, 5), indexer=(slice(None), slice(None)),
out_shape=(3, 4, 5)),
]),
("MixedSliceIndices", [
IndexSpec(shape=(10, 4), indexer=(slice(None), slice(0, 2)),
out_shape=(10, 2)),
IndexSpec(shape=(10, 4), indexer=(1, slice(None)),
out_shape=(4,)),
]),
("EllipsisIndex", [
IndexSpec(shape=(3,), indexer=Ellipsis, out_shape=(3,)),
IndexSpec(shape=(3, 4), indexer=Ellipsis, out_shape=(3, 4)),
IndexSpec(shape=(3, 4, 5), indexer=(0, Ellipsis), out_shape=(4, 5)),
IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis, 2, 3), out_shape=(3,)),
]),
("NoneIndex", [
IndexSpec(shape=(), indexer=None, out_shape=(1,)),
IndexSpec(shape=(), indexer=(None, None), out_shape=(1, 1)),
IndexSpec(shape=(), indexer=(Ellipsis, None), out_shape=(1,)),
IndexSpec(shape=(3,), indexer=None, out_shape=(1, 3)),
IndexSpec(shape=(3, 4), indexer=None, out_shape=(1, 3, 4)),
IndexSpec(shape=(3, 4), indexer=(Ellipsis, None), out_shape=(3, 4, 1)),
IndexSpec(shape=(3, 4), indexer=(0, None, Ellipsis), out_shape=(1, 4)),
IndexSpec(shape=(3, 4, 5), indexer=(1, None, Ellipsis), out_shape=(1, 4, 5)),
]),
("EmptyIndex", [
IndexSpec(shape=(), indexer=(), out_shape=()),
IndexSpec(shape=(3,), indexer=(), out_shape=(3,)),
IndexSpec(shape=(3, 4), indexer=(), out_shape=(3, 4)),
]),
("TupleOfIntAndSliceAndIntArray", [
IndexSpec(shape=(3, 2, 3), indexer=(0, slice(None), np.arange(3)),
out_shape=(3, 2)),
IndexSpec(shape=(3, 2, 3), indexer=(np.int32(1), slice(None), np.arange(3)),
out_shape=(3, 2)),
IndexSpec(shape=(3, 2, 3), indexer=(np.array(2), slice(None), np.arange(3)),
out_shape=(3, 2)),
]),
]
STATIC_INDEXING_OUT_OF_BOUNDS_TESTS = [
("OneIntIndex", [
IndexSpec(shape=(3,), indexer=-4, out_shape=()),
IndexSpec(shape=(3, 3), indexer=3, out_shape=(3,)),
IndexSpec(shape=(3, 4, 5), indexer=4, out_shape=(4, 5)),
]),
("TwoIntIndices", [
IndexSpec(shape=(3, 3), indexer=(2, -4), out_shape=()),
IndexSpec(shape=(3, 4, 5), indexer=(3, 2), out_shape=()),
IndexSpec(shape=(3, 4, 5), indexer=(-4, 4), out_shape=(5,)),
]),
]
ADVANCED_INDEXING_TESTS = [
("One1DIntArrayIndex", [
IndexSpec(shape=(3,), indexer=np.array([0, 1]), out_shape=(2,)),
IndexSpec(shape=(3, 3), indexer=np.array([1, 2, 1]), out_shape=(3, 3)),
IndexSpec(shape=(3, 4, 5), indexer=np.array([0, 2, 0, 1]),
out_shape=(4, 4, 5)),
IndexSpec(shape=(3,), indexer=np.array([-1, 1]), out_shape=(2,)),
IndexSpec(shape=(3,), indexer=np.array([-2, -1]), out_shape=(2,)),
IndexSpec(shape=(0,), indexer=np.array([], dtype=np.int32),
out_shape=(0,)),
]),
("One2DIntArrayIndex", [
IndexSpec(shape=(3,), indexer=np.array([[0, 0]]),out_shape=(1, 2)),
IndexSpec(shape=(3, 3), indexer=np.array([[1, 2, 1], [0, 1, -1]]),
out_shape=(2, 3, 3)),
IndexSpec(shape=(3, 4, 5), indexer=np.array([[0, 2, 0, 1], [-1, -2, 1, 0]]),
out_shape=(2, 4, 4, 5)),
]),
("Two1DIntArrayIndicesNoBroadcasting", [
IndexSpec(shape=(3, 3), indexer=(np.array([0, 1]), np.array([1, 2])),
out_shape=(2,)),
IndexSpec(shape=(3, 4, 5),
indexer=(np.array([0, 2, 0, 1]), np.array([-1, 0, -1, 2])),
out_shape=(4, 5)),
]),
("Two1DIntArrayIndicesWithBroadcasting", [
IndexSpec(shape=(3, 3), indexer=(np.array([[0, 1]]), np.array([1, 2])),
out_shape=(1, 2)),
IndexSpec(shape=(3, 4, 5),
indexer=(np.array([[0, 2, 0, 1]]), np.array([-1, 0, -1, 2])),
out_shape=(1, 4, 5)),
]),
("ArrayOfInts", [
IndexSpec(shape=(3,), indexer=np.array([0, 1, 0]), out_shape=(3,)),
IndexSpec(shape=(3, 4, 5), indexer=np.array([ 0, -1]), out_shape=(2, 4, 5)),
]),
("TupleOfListsOfPythonInts", [
IndexSpec(shape=(3, 4, 5), indexer=([0, 1],), out_shape=(2, 4, 5)),
IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], [[2, 3, 0, 3]]),
out_shape=(2, 4, 5)),
]),
("TupleOfPythonIntsAndIntArrays", [
IndexSpec(shape=(3, 4, 5), indexer=(0, np.array([0, 1])), out_shape=(2, 5)),
IndexSpec(shape=(3, 4, 5), indexer=(0, 1, np.array([[2, 3, 0, 3]])),
out_shape=(1, 4)),
]),
("TupleOfListsOfPythonIntsAndIntArrays", [
IndexSpec(shape=(3, 4, 5), indexer=([0, 1], np.array([0])),
out_shape=(2, 5)),
IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], np.array([[2, 3, 0, 3]])),
out_shape=(2, 4, 5)),
]),
]
ADVANCED_INDEXING_TESTS_NO_REPEATS = [
("One1DIntArrayIndex", [
IndexSpec(shape=(3,), indexer=np.array([0, 1]), out_shape=(2,)),
IndexSpec(shape=(3, 3), indexer=np.array([1, 2, 0]), out_shape=(3, 3)),
IndexSpec(shape=(3, 4, 5), indexer=np.array([0, 2, 1]),
out_shape=(3, 4, 5)),
IndexSpec(shape=(3,), indexer=np.array([-1, 1]), out_shape=(2,)),
IndexSpec(shape=(3,), indexer=np.array([-2, -1]), out_shape=(2,)),
IndexSpec(shape=(0,), indexer=np.array([], dtype=np.int32), out_shape=(0,)),
]),
("One2DIntArrayIndex", [
IndexSpec(shape=(3,), indexer=np.array([[0, 1]]), out_shape=(1, 2)),
IndexSpec(shape=(6, 6), indexer=np.array([[1, 2, 0], [3, 4, -1]]),
out_shape=(2, 3, 6)),
]),
("Two1DIntArrayIndicesNoBroadcasting", [
IndexSpec(shape=(3, 3), indexer=(np.array([0, 1]), np.array([1, 2])),
out_shape=(2,)),
IndexSpec(shape=(4, 5, 6),
indexer=(np.array([0, 2, 1, 3]), np.array([-1, 0, -2, 1])),
out_shape=(4, 6)),
]),
("Two1DIntArrayIndicesWithBroadcasting", [
IndexSpec(shape=(3, 3), indexer=(np.array([[0, 1]]), np.array([1, 2])),
out_shape=(1, 2)),
IndexSpec(shape=(4, 5, 6),
indexer=(np.array([[0, 2, -1, 1]]), np.array([-1, 0, -2, 2])),
out_shape=(1, 4, 6)),
]),
("ArrayOfInts", [
IndexSpec(shape=(3,), indexer=np.array([0, 2, 1]), out_shape=(3,)),
IndexSpec(shape=(3, 4, 5), indexer=np.array([ 0, -1]), out_shape=(2, 4, 5)),
]),
("TupleOfListsOfPythonInts", [
IndexSpec(shape=(3, 4, 5), indexer=([0, 1],), out_shape=(2, 4, 5)),
IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], [[2, 3, 0]]),
out_shape=(2, 3, 5)),
]),
("TupleOfPythonIntsAndIntArrays", [
IndexSpec(shape=(3, 4, 5), indexer=(0, np.array([0, 1])), out_shape=(2, 5)),
IndexSpec(shape=(3, 4, 5), indexer=(0, 1, np.array([[2, 3, 0]])),
out_shape=(1, 3)),
]),
("TupleOfListsOfPythonIntsAndIntArrays", [
IndexSpec(shape=(3, 4, 5), indexer=([0, 1], np.array([0])),
out_shape=(2, 5)),
IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], np.array([[2, 3, 0]])),
out_shape=(2, 3, 5)),
]),
]
ADVANCED_INDEXING_TESTS_NO_REPEATS_SORTED = [
("One1DIntArrayIndex", [
IndexSpec(shape=(3,), indexer=np.array([0, 1]), out_shape=(2,)),
IndexSpec(shape=(3, 3), indexer=np.array([0, 1, 2]), out_shape=(3, 3)),
IndexSpec(shape=(3, 4, 5), indexer=np.array([0, 1, 2]),
out_shape=(3, 4, 5)),
IndexSpec(shape=(3,), indexer=np.array([-1, 1]), out_shape=(2,)),
IndexSpec(shape=(3,), indexer=np.array([-2, -1]), out_shape=(2,)),
IndexSpec(shape=(0,), indexer=np.array([], dtype=np.int32), out_shape=(0,)),
]),
("One2DIntArrayIndex", [
IndexSpec(shape=(3,), indexer=np.array([[0, 1]]), out_shape=(1, 2)),
IndexSpec(shape=(6, 6), indexer=np.array([[-1, 0, 1],
[ 2, 3, 4]]), out_shape=(2, 3, 6)),
]),
("Two1DIntArrayIndicesNoBroadcasting", [
IndexSpec(shape=(3, 3), indexer=(np.array([0, 1]), np.array([1, 2])),
out_shape=(2,)),
IndexSpec(shape=(4, 5, 6),
indexer=(np.array([0, 1, 2, 3]), np.array([-2, -1, 0, 1])),
out_shape=(4, 6)),
]),
("Two1DIntArrayIndicesWithBroadcasting", [
IndexSpec(shape=(3, 3), indexer=(np.array([[0, 1]]), np.array([1, 2])),
out_shape=(1, 2)),
IndexSpec(shape=(4, 5, 6),
indexer=(np.array([[-1, 0, 1, 2]]), np.array([-2, -1, 0, 2])),
out_shape=(1, 4, 6)),
]),
("TupleOfListsOfPythonInts", [
IndexSpec(shape=(3, 4, 5), indexer=([0, 1],), out_shape=(2, 4, 5)),
IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], [[0, 2, 3]]),
out_shape=(2, 3, 5)),
]),
("TupleOfPythonIntsAndIntArrays", [
IndexSpec(shape=(3, 4, 5), indexer=(0, np.array([0, 1])), out_shape=(2, 5)),
IndexSpec(shape=(3, 4, 5), indexer=(0, 1, np.array([[0, 2, 3]])),
out_shape=(1, 3)),
]),
("TupleOfListsOfPythonIntsAndIntArrays", [
IndexSpec(shape=(3, 4, 5), indexer=([0, 1], np.array([0])),
out_shape=(2, 5)),
IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], np.array([[0, 2, 3]])),
out_shape=(2, 3, 5)),
]),
]
MIXED_ADVANCED_INDEXING_TESTS_NO_REPEATS = [
("SlicesAndOneIntArrayIndex", [
IndexSpec(shape=(2, 3), indexer=(np.array([0, 1]), slice(1, 2)),
out_shape=(2, 1)),
IndexSpec(shape=(2, 3), indexer=(slice(0, 2), np.array([0, 2])),
out_shape=(2, 2)),
IndexSpec(shape=(3, 4, 5),
indexer=(Ellipsis, np.array([0, 2]), slice(None)),
out_shape=(3, 2, 5)),
IndexSpec(shape=(3, 4, 5),
indexer=(Ellipsis, np.array([[0, 2], [1, 3]]), slice(None)),
out_shape=(3, 2, 2, 5)),
]),
("SlicesAndTwoIntArrayIndices", [
IndexSpec(shape=(3, 4, 5),
indexer=(Ellipsis, np.array([0, 2]), np.array([-1, 2])),
out_shape=(3, 2)),
IndexSpec(shape=(3, 4, 5),
indexer=(np.array([0, 2]), Ellipsis, np.array([-1, 2])),
out_shape=(2, 4)),
IndexSpec(shape=(3, 4, 5),
indexer=(np.array([0, 2]), np.array([-1, 2]), Ellipsis),
out_shape=(2, 5)),
IndexSpec(shape=(3, 4, 5),
indexer=(np.array([0, 2]), np.array([-1, 2]), slice(1, 3)),
out_shape=(2, 2)),
IndexSpec(shape=(3, 4, 5),
indexer=(np.array([0, 2]), slice(1, 3), np.array([-1, 2])),
out_shape=(2, 2)),
IndexSpec(shape=(3, 4, 5),
indexer=(np.array([ 0, 2, -2]), slice(None, None, 2),
np.array([-1, 2, 1])),
out_shape=(3, 2)),
]),
("NonesAndIntArrayIndices", [
IndexSpec(shape=(3, 4, 5),
indexer=(np.array([0, 2]), None, np.array([-1, 2])),
out_shape=(2, 1, 5)),
IndexSpec(shape=(3, 4, 5),
indexer=(np.array([0, 2]), None, None, np.array([-1, 2])),
out_shape=(2, 1, 1, 5)),
IndexSpec(shape=(3, 4, 5),
indexer=(Ellipsis, np.array([0, 2]), None, None,
np.array([-1, 2])),
out_shape=(2, 3, 1, 1)),
]),
("IntArrayWithInt32Type", [
IndexSpec(shape=(3, 4), indexer=(Ellipsis, np.array(1, dtype=np.int32)),
out_shape=(3,)),
]),
("EllipsisWithArrayIndices", [
IndexSpec(shape=(3, 4, 5), indexer=(np.array([0, 1]), ..., np.array([0, 1])),
out_shape=(2, 4)),
IndexSpec(shape=(3, 4, 5), indexer=(slice(None), np.array([0, 1]), ..., np.array([0, 1])),
out_shape=(2, 3)),
IndexSpec(shape=(3, 4, 5), indexer=(slice(None), ..., np.array([0, 1]), np.array([0, 1])),
out_shape=(3, 2)),
]),
]
MIXED_ADVANCED_INDEXING_TESTS = MIXED_ADVANCED_INDEXING_TESTS_NO_REPEATS + [
("SlicesAndOneIntArrayIndex", [
IndexSpec(shape=(3, 4, 5),
indexer=(Ellipsis, np.array([[0, 2], [1, 1]]), slice(None)),
out_shape=(3, 2, 2, 5)),
]),
("SlicesAndTwoIntArrayIndices", [
IndexSpec(shape=(3, 4, 5),
indexer=(np.array([ 0, 2, -2]), slice(None, None, 2),
np.array([-1, 2, -1])),
out_shape=(3, 2)),
IndexSpec(shape=(3, 4, 5),
indexer=(np.array([[0, 2], [2, 0]]), Ellipsis,
np.array([[1, 0], [1, 0]])),
out_shape=(2, 2, 4)),
]),
]
MODES = ["clip", "drop", "promise_in_bounds"]
|
IndexSpec
|
python
|
google__flatbuffers
|
grpc/examples/python/greeter/greeter_grpc.fb.py
|
{
"start": 529,
"end": 1433
}
|
class ____(object):
"""Interface exported by the server."""
def SayHello(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SayManyHellos(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_GreeterServicer_to_server(servicer, server):
rpc_method_handlers = {
'SayHello': grpc.unary_unary_rpc_method_handler(servicer.SayHello),
'SayManyHellos': grpc.unary_stream_rpc_method_handler(
servicer.SayManyHellos
),
}
generic_handler = grpc.method_handlers_generic_handler(
'models.Greeter', rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
|
GreeterServicer
|
python
|
explosion__spaCy
|
spacy/lang/ko/__init__.py
|
{
"start": 684,
"end": 3063
}
|
class ____(DummyTokenizer):
def __init__(self, vocab: Vocab):
self.vocab = vocab
self._mecab = try_mecab_import() # type: ignore[func-returns-value]
self._mecab_tokenizer = None
@property
def mecab_tokenizer(self):
# This is a property so that initializing a pipeline with blank:ko is
# possible without actually requiring mecab-ko, e.g. to run
# `spacy init vectors ko` for a pipeline that will have a different
# tokenizer in the end. The languages need to match for the vectors
# to be imported and there's no way to pass a custom config to
# `init vectors`.
if self._mecab_tokenizer is None:
self._mecab_tokenizer = self._mecab("-F%f[0],%f[7]")
return self._mecab_tokenizer
def __reduce__(self):
return KoreanTokenizer, (self.vocab,)
def __call__(self, text: str) -> Doc:
dtokens = list(self.detailed_tokens(text))
surfaces = [dt["surface"] for dt in dtokens]
doc = Doc(self.vocab, words=surfaces, spaces=list(check_spaces(text, surfaces)))
for token, dtoken in zip(doc, dtokens):
first_tag, sep, eomi_tags = dtoken["tag"].partition("+")
token.tag_ = first_tag # stem(어간) or pre-final(선어말 어미)
if token.tag_ in TAG_MAP:
token.pos = TAG_MAP[token.tag_][POS]
else:
token.pos = X
token.lemma_ = dtoken["lemma"]
doc.user_data["full_tags"] = [dt["tag"] for dt in dtokens]
return doc
def detailed_tokens(self, text: str) -> Iterator[Dict[str, Any]]:
# 품사 태그(POS)[0], 의미 부류(semantic class)[1], 종성 유무(jongseong)[2], 읽기(reading)[3],
# 타입(type)[4], 첫번째 품사(start pos)[5], 마지막 품사(end pos)[6], 표현(expression)[7], *
for node in self.mecab_tokenizer.parse(text, as_nodes=True):
if node.is_eos():
break
surface = node.surface
feature = node.feature
tag, _, expr = feature.partition(",")
lemma, _, remainder = expr.partition("/")
if lemma == "*":
lemma = surface
yield {"surface": surface, "lemma": lemma, "tag": tag}
def score(self, examples):
validate_examples(examples, "KoreanTokenizer.score")
return Scorer.score_tokenization(examples)
|
KoreanTokenizer
|
python
|
pytorch__pytorch
|
torch/utils/data/datapipes/datapipe.py
|
{
"start": 10131,
"end": 10223
}
|
class ____(IterDataPipe):
def _is_dfpipe(self) -> bool:
return True
|
DFIterDataPipe
|
python
|
walkccc__LeetCode
|
solutions/48. Rotate Image/48-2.py
|
{
"start": 0,
"end": 404
}
|
class ____:
def rotate(self, matrix: list[list[int]]) -> None:
for mn in range(len(matrix) // 2):
mx = len(matrix) - mn - 1
for i in range(mn, mx):
offset = i - mn
top = matrix[mn][i]
matrix[mn][i] = matrix[mx - offset][mn]
matrix[mx - offset][mn] = matrix[mx][mx - offset]
matrix[mx][mx - offset] = matrix[i][mx]
matrix[i][mx] = top
|
Solution
|
python
|
great-expectations__great_expectations
|
contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_oregon_zip.py
|
{
"start": 1735,
"end": 4062
}
|
class ____(ColumnMapExpectation):
"""Expect values in this column to be valid Oregon zipcodes.
See https://pypi.org/project/zipcodes/ for more information.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_oregon_zip": ["97001", "97321", "97733", "97920"],
"invalid_oregon_zip": ["-10000", "1234", "99999", "25487"],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_oregon_zip"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_oregon_zip"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_oregon_zip"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["zipcodes"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidOregonZip().print_diagnostic_checklist()
|
ExpectColumnValuesToBeValidOregonZip
|
python
|
pytorch__pytorch
|
test/dynamo/test_graph_deduplication.py
|
{
"start": 17221,
"end": 50910
}
|
class ____(torch.nn.Module):
def forward(self, primals_1: "f32[10, 10]", primals_2: "f32[10, 20]"):
add: "f32[10, 20]" = torch.ops.aten.add.Tensor(primals_2, 2); primals_2 = None
sum_1: "f32[]" = torch.ops.aten.sum.default(add); add = None
partitioned_fw_subgraph_0_0 = self.partitioned_fw_subgraph_0_0
invoke_subgraph_4 = torch.ops.higher_order.invoke_subgraph(partitioned_fw_subgraph_0_0, 'partitioned_fw_subgraph_0_0', primals_1, sum_1); partitioned_fw_subgraph_0_0 = sum_1 = None
getitem: "f32[]" = invoke_subgraph_4[0]; invoke_subgraph_4 = None
add_1: "f32[]" = torch.ops.aten.add.Tensor(getitem, 2); getitem = None
sum_2: "f32[]" = torch.ops.aten.sum.default(add_1); add_1 = None
partitioned_fw_subgraph_0_1 = self.partitioned_fw_subgraph_0_0
invoke_subgraph_6 = torch.ops.higher_order.invoke_subgraph(partitioned_fw_subgraph_0_1, 'partitioned_fw_subgraph_0_0', primals_1, sum_2); partitioned_fw_subgraph_0_1 = primals_1 = sum_2 = None
getitem_1: "f32[]" = invoke_subgraph_6[0]; invoke_subgraph_6 = None
return (getitem_1,)
class partitioned_fw_subgraph_0_0(torch.nn.Module):
def forward(self, primals_0: "f32[10, 10]", primals_1: "f32[]"):
add: "f32[10, 10]" = torch.ops.aten.add.Tensor(primals_0, 1); primals_0 = None
sum_1: "f32[]" = torch.ops.aten.sum.default(add); add = None
add_1: "f32[]" = torch.ops.aten.add.Tensor(sum_1, primals_1); sum_1 = primals_1 = None
return (add_1,)
""",
)
def test_input_mutation(self):
def inner_fn2(x, y):
x0 = x + 1
y0 = y + 1
x.add_(x0)
y.add_(y0)
return x.sum() + y.sum()
def fn(x, y):
x0 = torch.sin(x)
o2 = inner_fn2(x0, y)
o3 = inner_fn2(x0.clone(), y.clone())
return o2 + o3
x = torch.rand(10, 10, requires_grad=False)
y = torch.rand(10, 20, requires_grad=False)
x_clone = x.clone()
y_clone = y.clone()
ref_result = fn(x, y)
result, graphs, fw_graphs = self.run_and_return_graphs(fn, x_clone, y_clone)
torch.allclose(ref_result, result)
self.assertEqual(len(graphs), 1)
self.assertEqual(len(fw_graphs), 1)
self.assertExpectedInline(
graph_str(fw_graphs[0]),
"""\
class <lambda>(torch.nn.Module):
def forward(self, arg0_1: "f32[10, 10]", arg1_1: "f32[10, 20]"):
sin: "f32[10, 10]" = torch.ops.aten.sin.default(arg0_1); arg0_1 = None
add: "f32[10, 10]" = torch.ops.aten.add.Tensor(sin, 1)
add_1: "f32[10, 20]" = torch.ops.aten.add.Tensor(arg1_1, 1)
add_2: "f32[10, 10]" = torch.ops.aten.add.Tensor(sin, add); sin = add = None
add_3: "f32[10, 20]" = torch.ops.aten.add.Tensor(arg1_1, add_1); add_1 = None
clone: "f32[10, 10]" = torch.ops.aten.clone.default(add_2)
clone_1: "f32[10, 20]" = torch.ops.aten.clone.default(add_3)
add_4: "f32[10, 10]" = torch.ops.aten.add.Tensor(clone, 1)
add_5: "f32[10, 20]" = torch.ops.aten.add.Tensor(clone_1, 1)
add_6: "f32[10, 10]" = torch.ops.aten.add.Tensor(clone, add_4); clone = add_4 = None
add_7: "f32[10, 20]" = torch.ops.aten.add.Tensor(clone_1, add_5); clone_1 = add_5 = None
repeated_subgraph0 = self.repeated_subgraph0
invoke_subgraph = torch.ops.higher_order.invoke_subgraph(repeated_subgraph0, 'subgraph_0', add_2, add_3); repeated_subgraph0 = add_2 = None
getitem: "f32[]" = invoke_subgraph[0]; invoke_subgraph = None
repeated_subgraph0_1 = self.repeated_subgraph0
invoke_subgraph_1 = torch.ops.higher_order.invoke_subgraph(repeated_subgraph0_1, 'subgraph_0', add_6, add_7); repeated_subgraph0_1 = add_6 = add_7 = None
getitem_1: "f32[]" = invoke_subgraph_1[0]; invoke_subgraph_1 = None
add_8: "f32[]" = torch.ops.aten.add.Tensor(getitem, getitem_1); getitem = getitem_1 = None
copy_: "f32[10, 20]" = torch.ops.aten.copy_.default(arg1_1, add_3); arg1_1 = add_3 = copy_ = None
return (add_8,)
class repeated_subgraph0(torch.nn.Module):
def forward(self, arg0_1: "f32[10, 10]", arg1_1: "f32[10, 20]"):
sum_1: "f32[]" = torch.ops.aten.sum.default(arg0_1); arg0_1 = None
sum_2: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None
add: "f32[]" = torch.ops.aten.add.Tensor(sum_1, sum_2); sum_1 = sum_2 = None
return (add,)
""",
)
def test_input_aliasing(self):
def inner_fn(x, y):
x0 = x.view(x.size())
return x0.view(x.size())
def inner_fn2(x, y):
x = x * 2
y = y * 2
return x.sum() + y.sum()
def fn(x, y):
o0 = inner_fn(x, y)
o1 = inner_fn(x, y)
o2 = inner_fn2(x, y)
o3 = inner_fn2(x, y)
return o0 + o1 + o2.sum() + o3.sum()
x = torch.rand(10, 10, requires_grad=False)
y = torch.rand(10, 20, requires_grad=False)
x_clone = x.clone()
y_clone = y.clone()
ref_result = fn(x, y)
result, graphs, fw_graphs = self.run_and_return_graphs(fn, x_clone, y_clone)
torch.allclose(ref_result, result)
self.assertEqual(len(graphs), 1)
self.assertEqual(len(fw_graphs), 1)
self.assertExpectedInline(
graph_str(fw_graphs[0]),
"""\
class <lambda>(torch.nn.Module):
def forward(self, arg0_1: "f32[10, 10]", arg1_1: "f32[10, 20]"):
view: "f32[10, 10]" = torch.ops.aten.view.default(arg0_1, [10, 10])
view_1: "f32[10, 10]" = torch.ops.aten.view.default(view, [10, 10]); view = None
view_2: "f32[10, 10]" = torch.ops.aten.view.default(arg0_1, [10, 10])
view_3: "f32[10, 10]" = torch.ops.aten.view.default(view_2, [10, 10]); view_2 = None
add: "f32[10, 10]" = torch.ops.aten.add.Tensor(view_1, view_3); view_1 = view_3 = None
repeated_subgraph0 = self.repeated_subgraph0
invoke_subgraph = torch.ops.higher_order.invoke_subgraph(repeated_subgraph0, 'subgraph_0', arg0_1, arg1_1); repeated_subgraph0 = None
getitem: "f32[]" = invoke_subgraph[0]; invoke_subgraph = None
sum_1: "f32[]" = torch.ops.aten.sum.default(getitem); getitem = None
add_1: "f32[10, 10]" = torch.ops.aten.add.Tensor(add, sum_1); add = sum_1 = None
repeated_subgraph0_1 = self.repeated_subgraph0
invoke_subgraph_1 = torch.ops.higher_order.invoke_subgraph(repeated_subgraph0_1, 'subgraph_0', arg0_1, arg1_1); repeated_subgraph0_1 = arg0_1 = arg1_1 = None
getitem_1: "f32[]" = invoke_subgraph_1[0]; invoke_subgraph_1 = None
sum_2: "f32[]" = torch.ops.aten.sum.default(getitem_1); getitem_1 = None
add_2: "f32[10, 10]" = torch.ops.aten.add.Tensor(add_1, sum_2); add_1 = sum_2 = None
return (add_2,)
class repeated_subgraph0(torch.nn.Module):
def forward(self, arg0_1: "f32[10, 10]", arg1_1: "f32[10, 20]"):
mul: "f32[10, 10]" = torch.ops.aten.mul.Tensor(arg0_1, 2); arg0_1 = None
mul_1: "f32[10, 20]" = torch.ops.aten.mul.Tensor(arg1_1, 2); arg1_1 = None
sum_1: "f32[]" = torch.ops.aten.sum.default(mul); mul = None
sum_2: "f32[]" = torch.ops.aten.sum.default(mul_1); mul_1 = None
add: "f32[]" = torch.ops.aten.add.Tensor(sum_1, sum_2); sum_1 = sum_2 = None
return (add,)
""",
)
def test_cycle_detection_no_cycle(self):
mod = self.run_and_get_simple_graph()
self.assertExpectedInline(
_detect_cycles(mod.graph, {}), """no cycle detected"""
)
def test_cycle_detection_single_node(self):
def fn(x, y):
x0 = x + 1
y0 = y + 2
z = x0.sum() + y0.sum()
return z
x = torch.rand(10, 10, requires_grad=False)
y = torch.rand(10, 20, requires_grad=False)
_, _, fw_graphs = self.run_and_return_graphs(fn, x, y)
mod = fw_graphs[0]
add_node = next(n for n in mod.graph.nodes if n.name == "add")
add_2 = next(n for n in mod.graph.nodes if n.name == "add_2")
args = add_node.args
add_node.args = (args[0], add_2)
self.assertExpectedInline(
_detect_cycles(mod.graph, {add_2: OrderedSet([add_2])}),
"""cycle detected in path: deque([output, add_2, add_2])""",
)
def test_cycle_detection_two_node(self):
def fn(x, y):
x0 = x + 1
y0 = y + 2
z = x0.sum() + y0.sum()
return z
x = torch.rand(10, 10, requires_grad=False)
y = torch.rand(10, 20, requires_grad=False)
_, _, fw_graphs = self.run_and_return_graphs(fn, x, y)
mod = fw_graphs[0]
add_node = next(n for n in mod.graph.nodes if n.name == "add")
add_2 = next(n for n in mod.graph.nodes if n.name == "add_2")
args = add_node.args
add_node.args = (args[0], add_2)
self.assertExpectedInline(
_detect_cycles(
mod.graph,
{add_2: OrderedSet([add_node]), add_node: OrderedSet([add_2])},
),
"""cycle detected in path: deque([output, add_2, add, add_2])""",
)
def test_cycle_detection_arg_and_additional_deps(self):
def fn(x, y):
x0 = x + 1
y0 = y + 2
z = x0.sum() + y0.sum()
return z
x = torch.rand(10, 10, requires_grad=False)
y = torch.rand(10, 20, requires_grad=False)
_, _, fw_graphs = self.run_and_return_graphs(fn, x, y)
mod = fw_graphs[0]
add_node = next(n for n in mod.graph.nodes if n.name == "add")
add_2 = next(n for n in mod.graph.nodes if n.name == "add_2")
args = add_node.args
add_node.args = (args[0], add_2)
self.assertExpectedInline(
_detect_cycles(mod.graph, {add_2: OrderedSet([add_node])}),
"""cycle detected in path: deque([output, add_2, add, add_2])""",
)
def test_cycle_detection_simple(self):
mod = self.run_and_get_simple_graph()
add_node = next(n for n in mod.graph.nodes if n.name == "add")
add_2 = next(n for n in mod.graph.nodes if n.name == "add_2")
args = add_node.args
add_node.args = (args[0], add_2)
self.assertExpectedInline(
_detect_cycles(mod.graph, {}),
"""cycle detected in path: deque([output, add_2, sum_1, add, add_2])""",
)
def test_cycle_detection_complex(self):
def inner_fn(x, y):
x0 = x.view(x.size())
return x0.view(x.size())
def inner_fn2(x, y):
x = x * 2
y = y * 2
return x.sum() + y.sum()
def fn(x, y):
o0 = inner_fn(x, y)
o1 = inner_fn(x, y)
o2 = inner_fn2(x, y)
o3 = inner_fn2(x, y)
return o0 + o1 + o2.sum() + o3.sum()
x = torch.rand(10, 10, requires_grad=False)
y = torch.rand(10, 20, requires_grad=False)
x_clone = x.clone()
y_clone = y.clone()
_, _, fw_graphs = self.run_and_return_graphs(fn, x_clone, y_clone)
mod = fw_graphs[0]
invoke_subgraph_node = next(
n for n in mod.graph.nodes if n.name == "invoke_subgraph"
)
add_2 = next(n for n in mod.graph.nodes if n.name == "add_2")
args = invoke_subgraph_node.args
invoke_subgraph_node.args = (add_2, args[1])
self.assertExpectedInline(
_detect_cycles(mod.graph, {}),
"""cycle detected in path: deque([output, add_2, add_1, sum_1, getitem, invoke_subgraph, add_2])""",
)
def test_autocast_ordering(self):
from torch._dynamo.graph_deduplication import (
_populate_additional_deps,
_stable_topological_sort,
)
def inner_fn(x, y):
x0 = x.view(x.size())
return x0.view(x.size())
def inner_fn2(x, y):
x = x * 2
y = y * 2
return x.sum() + y.sum()
def fn(x, y):
o0 = inner_fn(x, y)
o1 = inner_fn(x, y)
o2 = inner_fn2(x, y)
o3 = inner_fn2(x, y)
return o0 + o1 + o2.sum() + o3.sum()
x = torch.rand(10, 10, requires_grad=False)
y = torch.rand(10, 20, requires_grad=False)
x_clone = x.clone()
y_clone = y.clone()
_, _, fw_graphs = self.run_and_return_graphs(fn, x_clone, y_clone)
mod = fw_graphs[0]
def get_node(name):
return next(n for n in mod.graph.nodes if n.name == name)
sum_1 = get_node("sum_1")
enter_autocast = mod.graph.call_function(torch.amp._enter_autocast)
sum_1.append(enter_autocast)
sum_2 = get_node("sum_2")
exit_autocast = mod.graph.call_function(torch.amp._exit_autocast)
sum_2.append(exit_autocast)
additional_deps = _populate_additional_deps(mod.graph, {})
invoke_subgraph = get_node("invoke_subgraph")
invoke_subgraph.append(enter_autocast)
getitem_1 = get_node("getitem_1")
getitem_1.append(exit_autocast)
self.assertExpectedInline(
graph_str(mod),
"""\
class <lambda>(torch.nn.Module):
def forward(self, arg0_1: "f32[10, 10]", arg1_1: "f32[10, 20]"):
view: "f32[10, 10]" = torch.ops.aten.view.default(arg0_1, [10, 10])
view_1: "f32[10, 10]" = torch.ops.aten.view.default(view, [10, 10]); view = None
view_2: "f32[10, 10]" = torch.ops.aten.view.default(arg0_1, [10, 10])
view_3: "f32[10, 10]" = torch.ops.aten.view.default(view_2, [10, 10]); view_2 = None
add: "f32[10, 10]" = torch.ops.aten.add.Tensor(view_1, view_3); view_1 = view_3 = None
repeated_subgraph0 = self.repeated_subgraph0
invoke_subgraph = torch.ops.higher_order.invoke_subgraph(repeated_subgraph0, 'subgraph_0', arg0_1, arg1_1); repeated_subgraph0 = None
_enter_autocast = torch.amp.autocast_mode._enter_autocast(); _enter_autocast = None
getitem: "f32[]" = invoke_subgraph[0]; invoke_subgraph = None
sum_1: "f32[]" = torch.ops.aten.sum.default(getitem); getitem = None
add_1: "f32[10, 10]" = torch.ops.aten.add.Tensor(add, sum_1); add = sum_1 = None
repeated_subgraph0_1 = self.repeated_subgraph0
invoke_subgraph_1 = torch.ops.higher_order.invoke_subgraph(repeated_subgraph0_1, 'subgraph_0', arg0_1, arg1_1); repeated_subgraph0_1 = arg0_1 = arg1_1 = None
getitem_1: "f32[]" = invoke_subgraph_1[0]; invoke_subgraph_1 = None
_exit_autocast = torch.amp.autocast_mode._exit_autocast(); _exit_autocast = None
sum_2: "f32[]" = torch.ops.aten.sum.default(getitem_1); getitem_1 = None
add_2: "f32[10, 10]" = torch.ops.aten.add.Tensor(add_1, sum_2); add_1 = sum_2 = None
return (add_2,)
class repeated_subgraph0(torch.nn.Module):
def forward(self, arg0_1: "f32[10, 10]", arg1_1: "f32[10, 20]"):
mul: "f32[10, 10]" = torch.ops.aten.mul.Tensor(arg0_1, 2); arg0_1 = None
mul_1: "f32[10, 20]" = torch.ops.aten.mul.Tensor(arg1_1, 2); arg1_1 = None
sum_1: "f32[]" = torch.ops.aten.sum.default(mul); mul = None
sum_2: "f32[]" = torch.ops.aten.sum.default(mul_1); mul_1 = None
add: "f32[]" = torch.ops.aten.add.Tensor(sum_1, sum_2); sum_1 = sum_2 = None
return (add,)
""",
)
_stable_topological_sort(mod.graph, additional_deps)
self.assertExpectedInline(
graph_str(mod),
"""\
class <lambda>(torch.nn.Module):
def forward(self, arg0_1: "f32[10, 10]", arg1_1: "f32[10, 20]"):
view: "f32[10, 10]" = torch.ops.aten.view.default(arg0_1, [10, 10])
view_1: "f32[10, 10]" = torch.ops.aten.view.default(view, [10, 10]); view = None
view_2: "f32[10, 10]" = torch.ops.aten.view.default(arg0_1, [10, 10])
view_3: "f32[10, 10]" = torch.ops.aten.view.default(view_2, [10, 10]); view_2 = None
add: "f32[10, 10]" = torch.ops.aten.add.Tensor(view_1, view_3); view_1 = view_3 = None
repeated_subgraph0 = self.repeated_subgraph0
invoke_subgraph = torch.ops.higher_order.invoke_subgraph(repeated_subgraph0, 'subgraph_0', arg0_1, arg1_1); repeated_subgraph0 = None
getitem: "f32[]" = invoke_subgraph[0]; invoke_subgraph = None
sum_1: "f32[]" = torch.ops.aten.sum.default(getitem); getitem = None
_enter_autocast = torch.amp.autocast_mode._enter_autocast(); _enter_autocast = None
add_1: "f32[10, 10]" = torch.ops.aten.add.Tensor(add, sum_1); add = sum_1 = None
repeated_subgraph0_1 = self.repeated_subgraph0
invoke_subgraph_1 = torch.ops.higher_order.invoke_subgraph(repeated_subgraph0_1, 'subgraph_0', arg0_1, arg1_1); repeated_subgraph0_1 = arg0_1 = arg1_1 = None
getitem_1: "f32[]" = invoke_subgraph_1[0]; invoke_subgraph_1 = None
sum_2: "f32[]" = torch.ops.aten.sum.default(getitem_1); getitem_1 = None
_exit_autocast = torch.amp.autocast_mode._exit_autocast(); _exit_autocast = None
add_2: "f32[10, 10]" = torch.ops.aten.add.Tensor(add_1, sum_2); add_1 = sum_2 = None
return (add_2,)
class repeated_subgraph0(torch.nn.Module):
def forward(self, arg0_1: "f32[10, 10]", arg1_1: "f32[10, 20]"):
mul: "f32[10, 10]" = torch.ops.aten.mul.Tensor(arg0_1, 2); arg0_1 = None
mul_1: "f32[10, 20]" = torch.ops.aten.mul.Tensor(arg1_1, 2); arg1_1 = None
sum_1: "f32[]" = torch.ops.aten.sum.default(mul); mul = None
sum_2: "f32[]" = torch.ops.aten.sum.default(mul_1); mul_1 = None
add: "f32[]" = torch.ops.aten.add.Tensor(sum_1, sum_2); sum_1 = sum_2 = None
return (add,)
""",
)
def test_output_nodes_last(self):
from torch._dynamo.graph_deduplication import _stable_topological_sort
def inner_fn(x, y):
x0 = x.view(x.size())
return x0.view(x.size())
def inner_fn2(x, y):
x = x * 2
y = y * 2
return x.sum() + y.sum()
def fn(x, y):
o0 = inner_fn(x, y)
o1 = inner_fn(x, y)
o2 = inner_fn2(x, y)
o3 = inner_fn2(x, y)
return o0 + o1 + o2.sum() + o3.sum()
x = torch.rand(10, 10, requires_grad=False)
y = torch.rand(10, 20, requires_grad=False)
x_clone = x.clone()
y_clone = y.clone()
_, _, fw_graphs = self.run_and_return_graphs(fn, x_clone, y_clone)
mod = fw_graphs[0]
output = next(n for n in mod.graph.nodes if n.op == "output")
add_2 = next(n for n in mod.graph.nodes if n.name == "sum_2")
add_2.append(output)
self.assertExpectedInline(
graph_str(mod),
"""\
class <lambda>(torch.nn.Module):
def forward(self, arg0_1: "f32[10, 10]", arg1_1: "f32[10, 20]"):
view: "f32[10, 10]" = torch.ops.aten.view.default(arg0_1, [10, 10])
view_1: "f32[10, 10]" = torch.ops.aten.view.default(view, [10, 10]); view = None
view_2: "f32[10, 10]" = torch.ops.aten.view.default(arg0_1, [10, 10])
view_3: "f32[10, 10]" = torch.ops.aten.view.default(view_2, [10, 10]); view_2 = None
add: "f32[10, 10]" = torch.ops.aten.add.Tensor(view_1, view_3); view_1 = view_3 = None
repeated_subgraph0 = self.repeated_subgraph0
invoke_subgraph = torch.ops.higher_order.invoke_subgraph(repeated_subgraph0, 'subgraph_0', arg0_1, arg1_1); repeated_subgraph0 = None
getitem: "f32[]" = invoke_subgraph[0]; invoke_subgraph = None
sum_1: "f32[]" = torch.ops.aten.sum.default(getitem); getitem = None
add_1: "f32[10, 10]" = torch.ops.aten.add.Tensor(add, sum_1); add = sum_1 = None
repeated_subgraph0_1 = self.repeated_subgraph0
invoke_subgraph_1 = torch.ops.higher_order.invoke_subgraph(repeated_subgraph0_1, 'subgraph_0', arg0_1, arg1_1); repeated_subgraph0_1 = arg0_1 = arg1_1 = None
getitem_1: "f32[]" = invoke_subgraph_1[0]; invoke_subgraph_1 = None
sum_2: "f32[]" = torch.ops.aten.sum.default(getitem_1); getitem_1 = None
return (add_2,)
add_2: "f32[10, 10]" = torch.ops.aten.add.Tensor(add_1, sum_2); add_1 = sum_2 = None
class repeated_subgraph0(torch.nn.Module):
def forward(self, arg0_1: "f32[10, 10]", arg1_1: "f32[10, 20]"):
mul: "f32[10, 10]" = torch.ops.aten.mul.Tensor(arg0_1, 2); arg0_1 = None
mul_1: "f32[10, 20]" = torch.ops.aten.mul.Tensor(arg1_1, 2); arg1_1 = None
sum_1: "f32[]" = torch.ops.aten.sum.default(mul); mul = None
sum_2: "f32[]" = torch.ops.aten.sum.default(mul_1); mul_1 = None
add: "f32[]" = torch.ops.aten.add.Tensor(sum_1, sum_2); sum_1 = sum_2 = None
return (add,)
""",
)
_stable_topological_sort(mod.graph, {})
self.assertExpectedInline(
graph_str(mod),
"""\
class <lambda>(torch.nn.Module):
def forward(self, arg0_1: "f32[10, 10]", arg1_1: "f32[10, 20]"):
view: "f32[10, 10]" = torch.ops.aten.view.default(arg0_1, [10, 10])
view_1: "f32[10, 10]" = torch.ops.aten.view.default(view, [10, 10]); view = None
view_2: "f32[10, 10]" = torch.ops.aten.view.default(arg0_1, [10, 10])
view_3: "f32[10, 10]" = torch.ops.aten.view.default(view_2, [10, 10]); view_2 = None
add: "f32[10, 10]" = torch.ops.aten.add.Tensor(view_1, view_3); view_1 = view_3 = None
repeated_subgraph0 = self.repeated_subgraph0
invoke_subgraph = torch.ops.higher_order.invoke_subgraph(repeated_subgraph0, 'subgraph_0', arg0_1, arg1_1); repeated_subgraph0 = None
getitem: "f32[]" = invoke_subgraph[0]; invoke_subgraph = None
sum_1: "f32[]" = torch.ops.aten.sum.default(getitem); getitem = None
add_1: "f32[10, 10]" = torch.ops.aten.add.Tensor(add, sum_1); add = sum_1 = None
repeated_subgraph0_1 = self.repeated_subgraph0
invoke_subgraph_1 = torch.ops.higher_order.invoke_subgraph(repeated_subgraph0_1, 'subgraph_0', arg0_1, arg1_1); repeated_subgraph0_1 = arg0_1 = arg1_1 = None
getitem_1: "f32[]" = invoke_subgraph_1[0]; invoke_subgraph_1 = None
sum_2: "f32[]" = torch.ops.aten.sum.default(getitem_1); getitem_1 = None
add_2: "f32[10, 10]" = torch.ops.aten.add.Tensor(add_1, sum_2); add_1 = sum_2 = None
return (add_2,)
class repeated_subgraph0(torch.nn.Module):
def forward(self, arg0_1: "f32[10, 10]", arg1_1: "f32[10, 20]"):
mul: "f32[10, 10]" = torch.ops.aten.mul.Tensor(arg0_1, 2); arg0_1 = None
mul_1: "f32[10, 20]" = torch.ops.aten.mul.Tensor(arg1_1, 2); arg1_1 = None
sum_1: "f32[]" = torch.ops.aten.sum.default(mul); mul = None
sum_2: "f32[]" = torch.ops.aten.sum.default(mul_1); mul_1 = None
add: "f32[]" = torch.ops.aten.add.Tensor(sum_1, sum_2); sum_1 = sum_2 = None
return (add,)
""",
)
def test_mutation_ordering(self):
from torch._dynamo.graph_deduplication import _stable_topological_sort
def inner_fn(x, y):
x0 = x.view(x.size())
return x0.view(x.size())
def inner_fn2(x, y):
x = x * 2
y = y * 2
return x.sum() + y.sum()
def fn(x, y):
o0 = inner_fn(x, y)
o1 = inner_fn(x, y)
x.add_(x)
o2 = inner_fn2(x, y)
y.mul_(y)
o3 = inner_fn2(x, y)
return o0 + o1 + o2.sum() + o3.sum()
x = torch.rand(10, 10)
y = torch.rand(10, 20)
x_clone = x.clone()
y_clone = y.clone()
graph, _ = extract_graph_and_tracker(fn, x_clone, y_clone)
def graph_code(graph):
return graph.python_code("self").src
def get_node(name):
return next(n for n in graph.nodes if n.name == name)
self.assertExpectedInline(
graph_code(graph),
"""\
def forward(self, L_x_ : torch.Tensor, L_y_ : torch.Tensor):
subgraph_0 = self.subgraph_0
l_x_ = L_x_
l_y_ = L_y_
x0 = l_x_.view((10, 10))
o0 = x0.view((10, 10)); x0 = None
x0_1 = l_x_.view((10, 10))
o1 = x0_1.view((10, 10)); x0_1 = None
add_ = l_x_.add_(l_x_); add_ = None
add_2 = o0 + o1; o0 = o1 = None
invoke_subgraph = torch.ops.higher_order.invoke_subgraph(subgraph_0, 'subgraph_0', l_x_, l_y_)
mul_ = l_y_.mul_(l_y_); mul_ = None
getitem = invoke_subgraph[0]; invoke_subgraph = None
sum_5 = getitem.sum(); getitem = None
add_3 = add_2 + sum_5; add_2 = sum_5 = None
invoke_subgraph_1 = torch.ops.higher_order.invoke_subgraph(subgraph_0, 'subgraph_0', l_x_, l_y_); subgraph_0 = l_x_ = l_y_ = None
getitem_1 = invoke_subgraph_1[0]; invoke_subgraph_1 = None
sum_6 = getitem_1.sum(); getitem_1 = None
add_4 = add_3 + sum_6; add_3 = sum_6 = None
return (add_4,)
""",
)
# Shuffle nodes in the graph
add_ = get_node("add_")
mul_ = get_node("mul_")
o1 = get_node("o1")
o1.append(mul_)
add_2 = get_node("add_2")
add_2.append(add_)
self.assertExpectedInline(
graph_code(graph),
"""\
def forward(self, L_x_ : torch.Tensor, L_y_ : torch.Tensor):
subgraph_0 = self.subgraph_0
l_x_ = L_x_
l_y_ = L_y_
x0 = l_x_.view((10, 10))
o0 = x0.view((10, 10)); x0 = None
x0_1 = l_x_.view((10, 10))
o1 = x0_1.view((10, 10)); x0_1 = None
mul_ = l_y_.mul_(l_y_); mul_ = None
add_2 = o0 + o1; o0 = o1 = None
add_ = l_x_.add_(l_x_); add_ = None
invoke_subgraph = torch.ops.higher_order.invoke_subgraph(subgraph_0, 'subgraph_0', l_x_, l_y_)
getitem = invoke_subgraph[0]; invoke_subgraph = None
sum_5 = getitem.sum(); getitem = None
add_3 = add_2 + sum_5; add_2 = sum_5 = None
invoke_subgraph_1 = torch.ops.higher_order.invoke_subgraph(subgraph_0, 'subgraph_0', l_x_, l_y_); subgraph_0 = l_x_ = l_y_ = None
getitem_1 = invoke_subgraph_1[0]; invoke_subgraph_1 = None
sum_6 = getitem_1.sum(); getitem_1 = None
add_4 = add_3 + sum_6; add_3 = sum_6 = None
return (add_4,)
""",
)
_stable_topological_sort(
graph, torch._dynamo.graph_deduplication.last_node_to_additional_deps
)
self.assertExpectedInline(
graph_code(graph),
"""\
def forward(self, L_x_ : torch.Tensor, L_y_ : torch.Tensor):
subgraph_0 = self.subgraph_0
l_x_ = L_x_
l_y_ = L_y_
x0 = l_x_.view((10, 10))
o0 = x0.view((10, 10)); x0 = None
x0_1 = l_x_.view((10, 10))
o1 = x0_1.view((10, 10)); x0_1 = None
add_2 = o0 + o1; o0 = o1 = None
add_ = l_x_.add_(l_x_); add_ = None
invoke_subgraph = torch.ops.higher_order.invoke_subgraph(subgraph_0, 'subgraph_0', l_x_, l_y_)
mul_ = l_y_.mul_(l_y_); mul_ = None
getitem = invoke_subgraph[0]; invoke_subgraph = None
sum_5 = getitem.sum(); getitem = None
add_3 = add_2 + sum_5; add_2 = sum_5 = None
invoke_subgraph_1 = torch.ops.higher_order.invoke_subgraph(subgraph_0, 'subgraph_0', l_x_, l_y_); subgraph_0 = l_x_ = l_y_ = None
getitem_1 = invoke_subgraph_1[0]; invoke_subgraph_1 = None
sum_6 = getitem_1.sum(); getitem_1 = None
add_4 = add_3 + sum_6; add_3 = sum_6 = None
return (add_4,)
""",
)
def test_tuple_return(self):
@allow_in_graph
def tuple_return(x, y):
return x, y
def inner_fn(x, y):
x0 = x + x + 1
y0 = y + y + 1
return tuple_return(x0, y0)
def fn(x0, x1, x2, y0, y1, y2):
x0 = inner_fn(x0, y0)
x1 = inner_fn(x1, y1)
x2 = inner_fn(x2, y2)
return x0, x1, x2
fn_opt = torch.compile(fn, fullgraph=True)
inps = [torch.rand(10, 10) for _ in range(6)]
result_compiled = fn_opt(*inps)
result_eager = fn(*inps)
self.assertEqual(result_compiled, result_eager)
def test_tuple_inputs(self):
with (
torch._dynamo.config.patch("use_graph_deduplication", False),
torch._dynamo.config.patch("track_nodes_for_deduplication", True),
):
def inner(x, y):
x0, x1 = torch.split(x, 5)
return x0 + x1 + y
def fn(x, y):
o1 = inner(x, y)
o2 = inner(x, y)
o3 = inner(x, y)
o4 = inner(x, y)
return o1.sum() + o2.sum() + o3.sum() + o4.sum()
graph, tracker = extract_graph_and_tracker(
fn, torch.rand(10, 10), torch.rand(5, 10)
)
class MockOutputGraph:
def __init__(self):
self.graph = graph
self.region_tracker = tracker
self.nn_modules = FakeRootModule({})
def install_subgraph(self, name, subgraph):
return ""
splits = [
n
for n in graph.nodes
if n.op == "call_function" and n.target is torch.split
]
for split in splits:
tracker.node_to_duplicates.pop(split)
apply_graph_deduplication(MockOutputGraph())
self.assertExpectedInline(
graph,
"""\
graph():
%_unnamed : [num_users=4] = get_attr[target=]
%l_x_ : torch.Tensor [num_users=4] = placeholder[target=L_x_]
%l_y_ : torch.Tensor [num_users=4] = placeholder[target=L_y_]
%split : [num_users=2] = call_function[target=torch.functional.split](args = (%l_x_, 5), kwargs = {})
%x0 : [num_users=1] = call_function[target=operator.getitem](args = (%split, 0), kwargs = {})
%x1 : [num_users=1] = call_function[target=operator.getitem](args = (%split, 1), kwargs = {})
%split_1 : [num_users=2] = call_function[target=torch.functional.split](args = (%l_x_, 5), kwargs = {})
%x0_1 : [num_users=1] = call_function[target=operator.getitem](args = (%split_1, 0), kwargs = {})
%x1_1 : [num_users=1] = call_function[target=operator.getitem](args = (%split_1, 1), kwargs = {})
%split_2 : [num_users=2] = call_function[target=torch.functional.split](args = (%l_x_, 5), kwargs = {})
%x0_2 : [num_users=1] = call_function[target=operator.getitem](args = (%split_2, 0), kwargs = {})
%x1_2 : [num_users=1] = call_function[target=operator.getitem](args = (%split_2, 1), kwargs = {})
%split_3 : [num_users=2] = call_function[target=torch.functional.split](args = (%l_x_, 5), kwargs = {})
%x0_3 : [num_users=1] = call_function[target=operator.getitem](args = (%split_3, 0), kwargs = {})
%x1_3 : [num_users=1] = call_function[target=operator.getitem](args = (%split_3, 1), kwargs = {})
%invoke_subgraph : [num_users=1] = call_function[target=torch.ops.higher_order.invoke_subgraph](args = (%_unnamed, , %x0, %x1, %l_y_), kwargs = {})
%getitem_8 : [num_users=1] = call_function[target=operator.getitem](args = (%invoke_subgraph, 0), kwargs = {})
%sum_1 : [num_users=1] = call_method[target=sum](args = (%getitem_8,), kwargs = {})
%invoke_subgraph_1 : [num_users=1] = call_function[target=torch.ops.higher_order.invoke_subgraph](args = (%_unnamed, , %x0_1, %x1_1, %l_y_), kwargs = {})
%getitem_9 : [num_users=1] = call_function[target=operator.getitem](args = (%invoke_subgraph_1, 0), kwargs = {})
%sum_2 : [num_users=1] = call_method[target=sum](args = (%getitem_9,), kwargs = {})
%add_8 : [num_users=1] = call_function[target=operator.add](args = (%sum_1, %sum_2), kwargs = {})
%invoke_subgraph_2 : [num_users=1] = call_function[target=torch.ops.higher_order.invoke_subgraph](args = (%_unnamed, , %x0_2, %x1_2, %l_y_), kwargs = {})
%getitem_10 : [num_users=1] = call_function[target=operator.getitem](args = (%invoke_subgraph_2, 0), kwargs = {})
%sum_3 : [num_users=1] = call_method[target=sum](args = (%getitem_10,), kwargs = {})
%add_9 : [num_users=1] = call_function[target=operator.add](args = (%add_8, %sum_3), kwargs = {})
%invoke_subgraph_3 : [num_users=1] = call_function[target=torch.ops.higher_order.invoke_subgraph](args = (%_unnamed, , %x0_3, %x1_3, %l_y_), kwargs = {})
%getitem_11 : [num_users=1] = call_function[target=operator.getitem](args = (%invoke_subgraph_3, 0), kwargs = {})
%sum_4 : [num_users=1] = call_method[target=sum](args = (%getitem_11,), kwargs = {})
%add_10 : [num_users=1] = call_function[target=operator.add](args = (%add_9, %sum_4), kwargs = {})
return (add_10,)""",
)
def test_param_transfer_to_submodule(self):
def inner_fn(x, y):
return x + y + y + x
def fn(x0, x1, x2, y0, y1, y2):
x0 = inner_fn(x0, y0)
x1 = inner_fn(x1, y1)
x2 = inner_fn(x2, y2)
return x0.sum() + x1.sum() + x2.sum()
fn_opt = torch.compile(fn, fullgraph=True)
args = [torch.rand(10, 10) for _ in range(6)]
for arg in args:
torch._dynamo.mark_static_address(arg)
fn_opt(*args)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
|
GraphModule
|
python
|
eventlet__eventlet
|
tests/timer_test.py
|
{
"start": 113,
"end": 1018
}
|
class ____(TestCase):
def test_copy(self):
t = timer.Timer(0, lambda: None)
t2 = t.copy()
assert t.seconds == t2.seconds
assert t.tpl == t2.tpl
assert t.called == t2.called
def test_schedule(self):
hub = hubs.get_hub()
# clean up the runloop, preventing side effects from previous tests
# on this thread
if hub.running:
hub.abort()
eventlet.sleep(0)
called = []
# t = timer.Timer(0, lambda: (called.append(True), hub.abort()))
# t.schedule()
# let's have a timer somewhere in the future; make sure abort() still works
hubs.get_hub().schedule_call_global(0, lambda: (called.append(True), hub.abort()))
hub.default_sleep = lambda: 0.0
hub.switch()
assert called
assert not hub.running
if __name__ == '__main__':
main()
|
TestTimer
|
python
|
run-llama__llama_index
|
llama-index-integrations/tools/llama-index-tools-brightdata/tests/test_tools_brightdata.py
|
{
"start": 243,
"end": 2662
}
|
class ____(unittest.TestCase):
def test_class_inheritance(self):
"""Test that BrightDataToolSpec inherits from BaseToolSpec."""
names_of_base_classes = [b.__name__ for b in BrightDataToolSpec.__mro__]
self.assertIn(BaseToolSpec.__name__, names_of_base_classes)
def test_initialization(self):
"""Test that the class initializes correctly."""
tool = BrightDataToolSpec(api_key="test_key", zone="test_zone")
self.assertEqual(tool._api_key, "test_key")
self.assertEqual(tool._zone, "test_zone")
self.assertEqual(tool._endpoint, "https://api.brightdata.com/request")
@patch("requests.post")
def test_scrape_as_markdown_success(self, mock_post):
"""Test successful scraping."""
# Mock the response
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.text = "# Markdown Content\n\nThis is a test."
mock_post.return_value = mock_response
tool = BrightDataToolSpec(api_key="test_key")
result = tool.scrape_as_markdown("https://example.com")
self.assertEqual(result.text, "# Markdown Content\n\nThis is a test.")
self.assertEqual(result.metadata, {"url": "https://example.com"})
mock_post.assert_called_once()
call_args = mock_post.call_args
self.assertEqual(call_args[0][0], "https://api.brightdata.com/request")
payload = json.loads(call_args[1]["data"])
self.assertEqual(payload["url"], "https://example.com")
self.assertEqual(payload["zone"], "unblocker") # default value
self.assertEqual(payload["format"], "raw")
self.assertEqual(payload["data_format"], "markdown")
headers = call_args[1]["headers"]
self.assertEqual(headers["Authorization"], "Bearer test_key")
@patch("requests.post")
def test_scrape_as_markdown_failure(self, mock_post):
"""Test failed scraping."""
mock_response = MagicMock()
mock_response.status_code = 403
mock_response.text = "Access denied"
mock_post.return_value = mock_response
tool = BrightDataToolSpec(api_key="test_key")
with pytest.raises(Exception) as context:
tool.scrape_as_markdown("https://example.com")
self.assertIn("Failed to scrape: 403", str(context.value))
if __name__ == "__main__":
unittest.main()
|
TestBrightDataToolSpec
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/batch_waiters.py
|
{
"start": 1466,
"end": 10958
}
|
class ____(BatchClientHook):
"""
A utility to manage waiters for AWS Batch services.
.. code-block:: python
import random
from airflow.providers.amazon.aws.operators.batch_waiters import BatchWaiters
# to inspect default waiters
waiters = BatchWaiters()
config = waiters.default_config # type: Dict
waiter_names = waiters.list_waiters() # -> ["JobComplete", "JobExists", "JobRunning"]
# The default_config is a useful stepping stone to creating custom waiters, e.g.
custom_config = waiters.default_config # this is a deepcopy
# modify custom_config['waiters'] as necessary and get a new instance:
waiters = BatchWaiters(waiter_config=custom_config)
waiters.waiter_config # check the custom configuration (this is a deepcopy)
waiters.list_waiters() # names of custom waiters
# During the init for BatchWaiters, the waiter_config is used to build a waiter_model;
# and note that this only occurs during the class init, to avoid any accidental mutations
# of waiter_config leaking into the waiter_model.
waiters.waiter_model # -> botocore.waiter.WaiterModel object
# The waiter_model is combined with the waiters.client to get a specific waiter
# and the details of the config on that waiter can be further modified without any
# accidental impact on the generation of new waiters from the defined waiter_model, e.g.
waiters.get_waiter("JobExists").config.delay # -> 5
waiter = waiters.get_waiter("JobExists") # -> botocore.waiter.Batch.Waiter.JobExists object
waiter.config.delay = 10
waiters.get_waiter("JobExists").config.delay # -> 5 as defined by waiter_model
# To use a specific waiter, update the config and call the `wait()` method for jobId, e.g.
waiter = waiters.get_waiter("JobExists") # -> botocore.waiter.Batch.Waiter.JobExists object
waiter.config.delay = random.uniform(1, 10) # seconds
waiter.config.max_attempts = 10
waiter.wait(jobs=[jobId])
.. seealso::
- https://www.2ndwatch.com/blog/use-waiters-boto3-write/
- https://github.com/boto/botocore/blob/develop/botocore/waiter.py
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#waiters
- https://github.com/boto/botocore/tree/develop/botocore/data/ec2/2016-11-15
- https://github.com/boto/botocore/issues/1915
:param waiter_config: a custom waiter configuration for AWS Batch services
:param aws_conn_id: connection id of AWS credentials / region name. If None,
credential boto3 strategy will be used
(https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html).
:param region_name: region name to use in AWS client.
Override the AWS region in connection (if provided)
"""
def __init__(self, *args, waiter_config: dict | None = None, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._default_config: dict | None = None
self._waiter_config = waiter_config or self.default_config
self._waiter_model = botocore.waiter.WaiterModel(self._waiter_config)
@property
def default_config(self) -> dict:
"""
An immutable default waiter configuration.
:return: a waiter configuration for AWS Batch services
"""
if self._default_config is None:
config_path = Path(__file__).with_name("batch_waiters.json").resolve()
with open(config_path) as config_file:
self._default_config = json.load(config_file)
return deepcopy(self._default_config) # avoid accidental mutation
@property
def waiter_config(self) -> dict:
"""
An immutable waiter configuration for this instance; a ``deepcopy`` is returned by this property.
During the init for BatchWaiters, the waiter_config is used to build a
waiter_model and this only occurs during the class init, to avoid any
accidental mutations of waiter_config leaking into the waiter_model.
:return: a waiter configuration for AWS Batch services
"""
return deepcopy(self._waiter_config) # avoid accidental mutation
@property
def waiter_model(self) -> botocore.waiter.WaiterModel:
"""
A configured waiter model used to generate waiters on AWS Batch services.
:return: a waiter model for AWS Batch services
"""
return self._waiter_model
def get_waiter(
self,
waiter_name: str,
parameters: dict[str, str] | None = None,
config_overrides: dict[str, Any] | None = None,
deferrable: bool = False,
client=None,
) -> botocore.waiter.Waiter:
"""
Get an AWS Batch service waiter, using the configured ``.waiter_model``.
The ``.waiter_model`` is combined with the ``.client`` to get a specific waiter and
the properties of that waiter can be modified without any accidental impact on the
generation of new waiters from the ``.waiter_model``, e.g.
.. code-block:: python
waiters.get_waiter("JobExists").config.delay # -> 5
waiter = waiters.get_waiter("JobExists") # a new waiter object
waiter.config.delay = 10
waiters.get_waiter("JobExists").config.delay # -> 5 as defined by waiter_model
To use a specific waiter, update the config and call the `wait()` method for jobId, e.g.
.. code-block:: python
import random
waiter = waiters.get_waiter("JobExists") # a new waiter object
waiter.config.delay = random.uniform(1, 10) # seconds
waiter.config.max_attempts = 10
waiter.wait(jobs=[jobId])
:param waiter_name: The name of the waiter. The name should match
the name (including the casing) of the key name in the waiter
model file (typically this is CamelCasing); see ``.list_waiters``.
:param parameters: unused, just here to match the method signature in base_aws
:param config_overrides: unused, just here to match the method signature in base_aws
:param deferrable: unused, just here to match the method signature in base_aws
:param client: unused, just here to match the method signature in base_aws
:return: a waiter object for the named AWS Batch service
"""
return botocore.waiter.create_waiter_with_client(waiter_name, self.waiter_model, self.client)
def list_waiters(self) -> list[str]:
"""
List the waiters in a waiter configuration for AWS Batch services.
:return: waiter names for AWS Batch services
"""
return self.waiter_model.waiter_names
def wait_for_job(
self,
job_id: str,
delay: int | float | None = None,
get_batch_log_fetcher: Callable[[str], AwsTaskLogFetcher | None] | None = None,
) -> None:
"""
Wait for Batch job to complete.
This assumes that the ``.waiter_model`` is configured using some
variation of the ``.default_config`` so that it can generate waiters
with the following names: "JobExists", "JobRunning" and "JobComplete".
:param job_id: a Batch job ID
:param delay: A delay before polling for job status
:param get_batch_log_fetcher: A method that returns batch_log_fetcher of
type AwsTaskLogFetcher or None when the CloudWatch log stream hasn't been created yet.
:raises: AirflowException
.. note::
This method adds a small random jitter to the ``delay`` (+/- 2 sec, >= 1 sec).
Using a random interval helps to avoid AWS API throttle limits when many
concurrent tasks request job-descriptions.
It also modifies the ``max_attempts`` to use the ``sys.maxsize``,
which allows Airflow to manage the timeout on waiting.
"""
self.delay(delay)
try:
waiter = self.get_waiter("JobExists")
waiter.config.delay = self.add_jitter(waiter.config.delay, width=2, minima=1)
waiter.config.max_attempts = sys.maxsize # timeout is managed by Airflow
waiter.wait(jobs=[job_id])
waiter = self.get_waiter("JobRunning")
waiter.config.delay = self.add_jitter(waiter.config.delay, width=2, minima=1)
waiter.config.max_attempts = sys.maxsize # timeout is managed by Airflow
waiter.wait(jobs=[job_id])
batch_log_fetcher = None
try:
if get_batch_log_fetcher:
batch_log_fetcher = get_batch_log_fetcher(job_id)
if batch_log_fetcher:
batch_log_fetcher.start()
waiter = self.get_waiter("JobComplete")
waiter.config.delay = self.add_jitter(waiter.config.delay, width=2, minima=1)
waiter.config.max_attempts = sys.maxsize # timeout is managed by Airflow
waiter.wait(jobs=[job_id])
finally:
if batch_log_fetcher:
batch_log_fetcher.stop()
batch_log_fetcher.join()
except (botocore.exceptions.ClientError, botocore.exceptions.WaiterError) as err:
raise AirflowException(err)
|
BatchWaitersHook
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/wrong_variant_in_depends_on/package.py
|
{
"start": 216,
"end": 511
}
|
class ____(Package):
"""This package has a wrong variant spelled in a depends_on."""
homepage = "http://www.example.com"
url = "http://www.example.com/b-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
depends_on("pkg-b+doesnotexist")
|
WrongVariantInDependsOn
|
python
|
kamyu104__LeetCode-Solutions
|
Python/get-the-maximum-score.py
|
{
"start": 33,
"end": 814
}
|
class ____(object):
def maxSum(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: int
"""
MOD = 10**9+7
i, j = 0, 0
result, sum1, sum2 = 0, 0, 0,
while i != len(nums1) or j != len(nums2):
if i != len(nums1) and (j == len(nums2) or nums1[i] < nums2[j]):
sum1 += nums1[i]
i += 1
elif j != len(nums2) and (i == len(nums1) or nums1[i] > nums2[j]):
sum2 += nums2[j]
j += 1
else:
result = (result + (max(sum1, sum2) + nums1[i])) % MOD
sum1, sum2 = 0, 0
i += 1
j += 1
return (result + max(sum1, sum2)) % MOD
|
Solution
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/protocol43.py
|
{
"start": 551,
"end": 872
}
|
class ____(Protocol):
def __add__(self: A, other: A) -> A: ...
T2 = TypeVar("T2", bound=HasAdd2)
def merge_element_lists2(a: Sequence[T2], b: Sequence[T2]) -> Sequence[T2]:
retval: Sequence[T2] = []
for a_elem in a:
for b_elem in b:
retval.append(a_elem + b_elem)
return retval
|
HasAdd2
|
python
|
rapidsai__cudf
|
python/cudf_polars/cudf_polars/experimental/rapidsmpf/utils.py
|
{
"start": 2116,
"end": 3915
}
|
class ____:
"""
A pair of channels for metadata and table data.
This abstraction ensures that metadata and data are kept separate,
avoiding ordering issues and making the code more type-safe.
Attributes
----------
metadata :
Channel for metadata.
data :
Channel for table data chunks.
Notes
-----
This is a placeholder implementation. The metadata channel exists
but is not used yet. Metadata handling will be fully implemented
in follow-up work.
"""
metadata: Channel[ArbitraryChunk]
data: Channel[TableChunk]
@classmethod
def create(cls, context: Context) -> ChannelPair:
"""Create a new ChannelPair with fresh channels."""
return cls(
metadata=context.create_channel(),
data=context.create_channel(),
)
async def send_metadata(self, ctx: Context, metadata: Metadata) -> None:
"""
Send metadata and drain the metadata channel.
Parameters
----------
ctx :
The streaming context.
metadata :
The metadata to send.
"""
msg = Message(0, ArbitraryChunk(metadata))
await self.metadata.send(ctx, msg)
await self.metadata.drain(ctx)
async def recv_metadata(self, ctx: Context) -> Metadata:
"""
Receive metadata from the metadata channel.
Parameters
----------
ctx :
The streaming context.
Returns
-------
ChunkMetadata
The metadata, or None if channel is drained.
"""
msg = await self.metadata.recv(ctx)
assert msg is not None, f"Expected Metadata message, got {msg}."
return ArbitraryChunk.from_message(msg).release()
|
ChannelPair
|
python
|
ethereum__web3.py
|
ens/base_ens.py
|
{
"start": 568,
"end": 3490
}
|
class ____:
w3: Union["AsyncWeb3[Any]", "Web3"] = None
ens: Union["Contract", "AsyncContract"] = None
_resolver_contract: type["Contract"] | type["AsyncContract"] = None
_reverse_resolver_contract: type["Contract"] | type["AsyncContract"] = None
@property
def strict_bytes_type_checking(self) -> bool:
return self.w3.strict_bytes_type_checking
@strict_bytes_type_checking.setter
def strict_bytes_type_checking(self, strict_bytes_type_check: bool) -> None:
self.w3.strict_bytes_type_checking = strict_bytes_type_check
@staticmethod
@wraps(label_to_hash)
def labelhash(label: str) -> HexBytes:
return label_to_hash(label)
@staticmethod
@wraps(raw_name_to_hash)
def namehash(name: str) -> HexBytes:
return raw_name_to_hash(name)
@staticmethod
@wraps(normalize_name)
def nameprep(name: str) -> str:
return normalize_name(name)
@staticmethod
@wraps(is_valid_name)
def is_valid_name(name: str) -> bool:
return is_valid_name(name)
@staticmethod
@wraps(address_to_reverse_domain)
def reverse_domain(address: ChecksumAddress) -> str:
return address_to_reverse_domain(address)
@staticmethod
def parent(name: str) -> str:
"""
Part of ENSIP-10. Returns the parent of a given ENS name,
or the empty string if the ENS name does not have a parent.
e.g.
- parent('1.foo.bar.eth') = 'foo.bar.eth'
- parent('foo.bar.eth') = 'bar.eth'
- parent('foo.eth') = 'eth'
- parent('eth') is defined as the empty string ''
:param name: an ENS name
:return: the parent for the provided ENS name
:rtype: str
"""
if not name:
return ""
labels = name.split(".")
return "" if len(labels) == 1 else ".".join(labels[1:])
def _decode_ensip10_resolve_data(
self,
contract_call_result: bytes,
extended_resolver: Union["Contract", "AsyncContract"],
fn_name: str,
) -> Any:
# avoid getting 2 resolver functions named `addr`
func = (
extended_resolver.get_function_by_signature("addr(bytes32)")
if fn_name == "addr"
else extended_resolver.get_function_by_name(fn_name)
)
output_types = get_abi_output_types(func.abi)
decoded = self.w3.codec.decode(output_types, contract_call_result)
# if decoding a single value, return that value - else, return the tuple
return decoded[0] if len(decoded) == 1 else decoded
def _type_aware_resolver(
self,
address: ChecksumAddress,
func: str,
) -> Union["Contract", "AsyncContract"]:
return (
self._reverse_resolver_contract(address=address)
if func == "name"
else self._resolver_contract(address=address)
)
|
BaseENS
|
python
|
ray-project__ray
|
python/ray/tune/stopper/stopper.py
|
{
"start": 1723,
"end": 3079
}
|
class ____(Stopper):
"""Combine several stoppers via 'OR'.
Args:
*stoppers: Stoppers to be combined.
Examples:
>>> import numpy as np
>>> from ray import tune
>>> from ray.tune.stopper import (
... CombinedStopper,
... MaximumIterationStopper,
... TrialPlateauStopper,
... )
>>>
>>> stopper = CombinedStopper(
... MaximumIterationStopper(max_iter=10),
... TrialPlateauStopper(metric="my_metric"),
... )
>>> def train_fn(config):
... for i in range(15):
... tune.report({"my_metric": np.random.normal(0, 1 - i / 15)})
...
>>> tuner = tune.Tuner(
... train_fn,
... run_config=tune.RunConfig(stop=stopper),
... )
>>> print("[ignore]"); result_grid = tuner.fit() # doctest: +ELLIPSIS
[ignore]...
>>> all(result.metrics["training_iteration"] <= 20 for result in result_grid)
True
"""
def __init__(self, *stoppers: Stopper):
self._stoppers = stoppers
def __call__(self, trial_id: str, result: Dict[str, Any]) -> bool:
return any(s(trial_id, result) for s in self._stoppers)
def stop_all(self) -> bool:
return any(s.stop_all() for s in self._stoppers)
|
CombinedStopper
|
python
|
h5py__h5py
|
h5py/tests/test_file.py
|
{
"start": 29623,
"end": 30739
}
|
class ____(TestCase):
"""
Feature: Create file that switches on SWMR mode
"""
def test_file_mode_generalizes(self):
fname = self.mktemp()
fid = File(fname, 'w', libver='latest')
g = fid.create_group('foo')
# fid and group member file attribute should have the same mode
assert fid.mode == g.file.mode == 'r+'
fid.swmr_mode = True
# fid and group member file attribute should still be 'r+'
# even though file intent has changed
assert fid.mode == g.file.mode == 'r+'
fid.close()
def test_swmr_mode_consistency(self):
fname = self.mktemp()
fid = File(fname, 'w', libver='latest')
g = fid.create_group('foo')
assert fid.swmr_mode == g.file.swmr_mode == False
fid.swmr_mode = True
# This setter should affect both fid and group member file attribute
assert fid.swmr_mode == g.file.swmr_mode == True
fid.close()
@pytest.mark.skipif("HDF5_USE_FILE_LOCKING" in os.environ,
reason="HDF5_USE_FILE_LOCKING env. var. is set")
|
TestSWMRMode
|
python
|
eriklindernoren__ML-From-Scratch
|
mlfromscratch/deep_learning/optimizers.py
|
{
"start": 869,
"end": 1580
}
|
class ____():
def __init__(self, learning_rate=0.001, momentum=0.4):
self.learning_rate = learning_rate
self.momentum = momentum
self.w_updt = np.array([])
def update(self, w, grad_func):
# Calculate the gradient of the loss a bit further down the slope from w
approx_future_grad = np.clip(grad_func(w - self.momentum * self.w_updt), -1, 1)
# Initialize on first update
if not self.w_updt.any():
self.w_updt = np.zeros(np.shape(w))
self.w_updt = self.momentum * self.w_updt + self.learning_rate * approx_future_grad
# Move against the gradient to minimize loss
return w - self.w_updt
|
NesterovAcceleratedGradient
|
python
|
pennersr__django-allauth
|
allauth/headless/account/views.py
|
{
"start": 7234,
"end": 8579
}
|
class ____(APIView):
input_class = VerifyPhoneInput
def handle(self, request, *args, **kwargs):
self.stage = LoginStageController.enter(request, PhoneVerificationStage.key)
if self.stage:
self.process = (
flows.phone_verification.PhoneVerificationStageProcess.resume(
self.stage
)
)
else:
if not request.user.is_authenticated:
return ConflictResponse(request)
self.process = (
flows.phone_verification.ChangePhoneVerificationProcess.resume(request)
)
if not self.process:
return ConflictResponse(request)
return super().handle(request, *args, **kwargs)
def get_input_kwargs(self):
return {
"code": self.process.code,
"user": self.process.user,
"phone": self.process.phone,
}
def handle_invalid_input(self, input: VerifyPhoneInput):
self.process.record_invalid_attempt()
return super().handle_invalid_input(input)
def post(self, request, *args, **kwargs):
self.process.finish()
response = None
if self.stage:
response = self.stage.exit()
return AuthenticationResponse.from_response(request, response)
|
VerifyPhoneView
|
python
|
django__django
|
tests/i18n/test_extraction.py
|
{
"start": 42812,
"end": 43915
}
|
class ____(ExtractorTests):
PO_FILE_PT = "locale/pt/LC_MESSAGES/django.po"
PO_FILE_DE = "locale/de/LC_MESSAGES/django.po"
PO_FILE_KO = "locale/ko/LC_MESSAGES/django.po"
LOCALES = ["pt", "de", "ch"]
def test_multiple_locales(self):
management.call_command("makemessages", locale=["pt", "de"], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE_PT))
self.assertTrue(os.path.exists(self.PO_FILE_DE))
def test_all_locales(self):
"""
When the `locale` flag is absent, all dirs from the parent locale dir
are considered as language directories, except if the directory doesn't
start with two letters (which excludes __pycache__, .gitignore, etc.).
"""
os.mkdir(os.path.join("locale", "_do_not_pick"))
# Excluding locales that do not compile
management.call_command("makemessages", exclude=["ja", "es_AR"], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE_KO))
self.assertFalse(os.path.exists("locale/_do_not_pick/LC_MESSAGES/django.po"))
|
MultipleLocaleExtractionTests
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/io/json.py
|
{
"start": 6359,
"end": 9004
}
|
class ____(BaseIO):
fname = "__test__.json"
def setup(self):
N = 10**5
ncols = 5
index = date_range("20000101", periods=N, freq="h")
timedeltas = timedelta_range(start=1, periods=N, freq="s")
datetimes = date_range(start=1, periods=N, freq="s")
ints = np.random.randint(100000000, size=N)
longints = sys.maxsize * np.random.randint(100000000, size=N)
floats = np.random.randn(N)
strings = Index([f"i-{i}" for i in range(N)], dtype=object)
self.df = DataFrame(np.random.randn(N, ncols), index=np.arange(N))
self.df_date_idx = DataFrame(np.random.randn(N, ncols), index=index)
self.df_td_int_ts = DataFrame(
{
"td_1": timedeltas,
"td_2": timedeltas,
"int_1": ints,
"int_2": ints,
"ts_1": datetimes,
"ts_2": datetimes,
},
index=index,
)
self.df_int_floats = DataFrame(
{
"int_1": ints,
"int_2": ints,
"int_3": ints,
"float_1": floats,
"float_2": floats,
"float_3": floats,
},
index=index,
)
self.df_int_float_str = DataFrame(
{
"int_1": ints,
"int_2": ints,
"float_1": floats,
"float_2": floats,
"str_1": strings,
"str_2": strings,
},
index=index,
)
self.df_longint_float_str = DataFrame(
{
"longint_1": longints,
"longint_2": longints,
"float_1": floats,
"float_2": floats,
"str_1": strings,
"str_2": strings,
},
index=index,
)
def time_floats_with_int_idex_lines(self):
self.df.to_json(self.fname, orient="records", lines=True)
def time_floats_with_dt_index_lines(self):
self.df_date_idx.to_json(self.fname, orient="records", lines=True)
def time_delta_int_tstamp_lines(self):
self.df_td_int_ts.to_json(self.fname, orient="records", lines=True)
def time_float_int_lines(self):
self.df_int_floats.to_json(self.fname, orient="records", lines=True)
def time_float_int_str_lines(self):
self.df_int_float_str.to_json(self.fname, orient="records", lines=True)
def time_float_longint_str_lines(self):
self.df_longint_float_str.to_json(self.fname, orient="records", lines=True)
|
ToJSONLines
|
python
|
openai__openai-python
|
src/openai/types/responses/tool_choice_shell_param.py
|
{
"start": 220,
"end": 353
}
|
class ____(TypedDict, total=False):
type: Required[Literal["shell"]]
"""The tool to call. Always `shell`."""
|
ToolChoiceShellParam
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_bar09.py
|
{
"start": 315,
"end": 1363
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_bar09.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "bar", "subtype": "stacked"})
chart.axis_ids = [40274560, 40295040]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-the-integer-added-to-array-i.py
|
{
"start": 37,
"end": 246
}
|
class ____(object):
def addedInteger(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: int
"""
return max(nums2)-max(nums1)
|
Solution
|
python
|
astropy__astropy
|
astropy/coordinates/tests/test_representation_arithmetic.py
|
{
"start": 41302,
"end": 43671
}
|
class ____:
"""Test copied from SphericalDifferential, so less extensive."""
def setup_method(self):
s = CylindricalRepresentation(
rho=[1, 2, 3] * u.kpc, phi=[0.0, 90.0, 315.0] * u.deg, z=[3, 2, 1] * u.kpc
)
self.s = s
self.e = s.unit_vectors()
self.sf = s.scale_factors()
def test_name(self):
assert CylindricalDifferential.name == "cylindrical"
assert CylindricalDifferential.name in DIFFERENTIAL_CLASSES
def test_simple_differentials(self):
s, e, sf = self.s, self.e, self.sf
o_rho = CylindricalDifferential(1.0 * u.mpc, 0.0 * u.arcsec, 0.0 * u.kpc)
o_rhoc = o_rho.to_cartesian(base=s)
assert_quantity_allclose(o_rhoc[0].xyz, [1.0e-6, 0.0, 0.0] * u.kpc)
s_rho = s + 1.0 * u.mpc * sf["rho"] * e["rho"]
assert_representation_allclose(o_rhoc, s_rho - s, atol=1e-10 * u.kpc)
s_rho2 = s + o_rho
assert_representation_allclose(s_rho2, s_rho)
o_phi = CylindricalDifferential(0.0 * u.kpc, 1.0 * u.arcsec, 0.0 * u.kpc)
o_phic = o_phi.to_cartesian(base=s)
o_phi2 = CylindricalDifferential.from_cartesian(o_phic, base=s)
assert_quantity_allclose(o_phi.d_rho, o_phi2.d_rho, atol=1.0 * u.npc)
assert_quantity_allclose(o_phi.d_phi, o_phi2.d_phi, atol=1.0 * u.narcsec)
assert_quantity_allclose(o_phi.d_z, o_phi2.d_z, atol=1.0 * u.npc)
# simple check by hand for first element.
assert_quantity_allclose(
o_phic[0].xyz, [0.0, np.pi / 180.0 / 3600.0, 0.0] * u.kpc
)
# check all using unit vectors and scale factors.
s_phi = s + 1.0 * u.arcsec * sf["phi"] * e["phi"]
assert_representation_allclose(o_phic, s_phi - s, atol=1e-10 * u.kpc)
o_z = CylindricalDifferential(0.0 * u.kpc, 0.0 * u.arcsec, 1.0 * u.mpc)
o_zc = o_z.to_cartesian(base=s)
assert_quantity_allclose(o_zc[0].xyz, [0.0, 0.0, 1.0e-6] * u.kpc)
s_z = s + 1.0 * u.mpc * sf["z"] * e["z"]
assert_representation_allclose(o_zc, s_z - s, atol=1e-10 * u.kpc)
s_z2 = s + o_z
assert_representation_allclose(s_z2, s_z)
def test_differential_init_errors(self):
with pytest.raises(u.UnitsError):
CylindricalDifferential(1.0 * u.pc, 1.0 * u.arcsec, 3.0 * u.km / u.s)
|
TestCylindricalDifferential
|
python
|
davidhalter__jedi
|
test/completion/arrays.py
|
{
"start": 1991,
"end": 2349
}
|
class ____():
def __init__(self):
self.a = (str()).upper()
#? str()
C().a
# -----------------
# imbalanced sides
# -----------------
(f, g) = (1,)
#? int()
f
#? []
g.
(f, g, h) = (1,'')
#? int()
f
#? str()
g
#? []
h.
(f1, g1) = 1
#? []
f1.
#? []
g1.
(f, g) = (1,'',1.0)
#? int()
f
#? str()
g
# -----------------
# setitem
# -----------------
|
C
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1047520,
"end": 1048187
}
|
class ____(sgqlc.types.Type):
"""Autogenerated return type of UpdateTopics"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "invalid_topic_names", "repository")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
invalid_topic_names = sgqlc.types.Field(sgqlc.types.list_of(sgqlc.types.non_null(String)), graphql_name="invalidTopicNames")
"""Names of the provided topics that are not valid."""
repository = sgqlc.types.Field("Repository", graphql_name="repository")
"""The updated repository."""
|
UpdateTopicsPayload
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/src/hypothesis/extra/django/_impl.py
|
{
"start": 1727,
"end": 8977
}
|
class ____(HypothesisTestCase, dst.StaticLiveServerTestCase):
pass
@defines_strategy()
def from_model(
model: type[ModelT], /, **field_strategies: st.SearchStrategy | EllipsisType
) -> st.SearchStrategy[ModelT]:
"""Return a strategy for examples of ``model``.
.. warning::
Hypothesis creates saved models. This will run inside your testing
transaction when using the test runner, but if you use the dev console
this will leave debris in your database.
``model`` must be an subclass of :class:`~django:django.db.models.Model`.
Strategies for fields may be passed as keyword arguments, for example
``is_staff=st.just(False)``. In order to support models with fields named
"model", this is a positional-only parameter.
Hypothesis can often infer a strategy based the field type and validators,
and will attempt to do so for any required fields. No strategy will be
inferred for an :class:`~django:django.db.models.AutoField`, nullable field,
foreign key, or field for which a keyword
argument is passed to ``from_model()``. For example,
a Shop type with a foreign key to Company could be generated with::
shop_strategy = from_model(Shop, company=from_model(Company))
Like for :func:`~hypothesis.strategies.builds`, you can pass
``...`` (:obj:`python:Ellipsis`) as a keyword argument to infer a strategy for
a field which has a default value instead of using the default.
"""
if not issubclass(model, dm.Model):
raise InvalidArgument(f"{model=} must be a subtype of Model")
fields_by_name = {f.name: f for f in model._meta.concrete_fields}
for name, value in sorted(field_strategies.items()):
if value is ...:
field_strategies[name] = from_field(fields_by_name[name])
for name, field in sorted(fields_by_name.items()):
if (
name not in field_strategies
and not field.auto_created
and not isinstance(field, dm.AutoField)
and not isinstance(field, getattr(dm, "GeneratedField", ()))
and field.default is dm.fields.NOT_PROVIDED
):
field_strategies[name] = from_field(field)
for field in field_strategies:
if model._meta.get_field(field).primary_key:
# The primary key is generated as part of the strategy. We
# want to find any existing row with this primary key and
# overwrite its contents.
kwargs = {field: field_strategies.pop(field)}
kwargs["defaults"] = st.fixed_dictionaries(field_strategies) # type: ignore
return _models_impl(st.builds(model.objects.update_or_create, **kwargs))
# The primary key is not generated as part of the strategy, so we
# just match against any row that has the same value for all
# fields.
return _models_impl(st.builds(model.objects.get_or_create, **field_strategies))
@st.composite
def _models_impl(draw, strat):
"""Handle the nasty part of drawing a value for models()"""
try:
return draw(strat)[0]
except IntegrityError:
reject()
@defines_strategy()
def from_form(
form: type[df.Form],
form_kwargs: dict | None = None,
**field_strategies: st.SearchStrategy | EllipsisType,
) -> st.SearchStrategy[df.Form]:
"""Return a strategy for examples of ``form``.
``form`` must be an subclass of :class:`~django:django.forms.Form`.
Strategies for fields may be passed as keyword arguments, for example
``is_staff=st.just(False)``.
Hypothesis can often infer a strategy based the field type and validators,
and will attempt to do so for any required fields. No strategy will be
inferred for a disabled field or field for which a keyword argument
is passed to ``from_form()``.
This function uses the fields of an unbound ``form`` instance to determine
field strategies, any keyword arguments needed to instantiate the unbound
``form`` instance can be passed into ``from_form()`` as a dict with the
keyword ``form_kwargs``. E.g.::
shop_strategy = from_form(Shop, form_kwargs={"company_id": 5})
Like for :func:`~hypothesis.strategies.builds`, you can pass
``...`` (:obj:`python:Ellipsis`) as a keyword argument to infer a strategy for
a field which has a default value instead of using the default.
"""
# currently unsupported:
# ComboField
# FilePathField
# ImageField
form_kwargs = form_kwargs or {}
if not issubclass(form, df.BaseForm):
raise InvalidArgument(f"{form=} must be a subtype of Form")
# Forms are a little bit different from models. Model classes have
# all their fields defined, whereas forms may have different fields
# per-instance. So, we ought to instantiate the form and get the
# fields from the instance, thus we need to accept the kwargs for
# instantiation as well as the explicitly defined strategies
unbound_form = form(**form_kwargs)
fields_by_name = {}
for name, field in unbound_form.fields.items():
if isinstance(field, df.MultiValueField):
# PS: So this is a little strange, but MultiValueFields must
# have their form data encoded in a particular way for the
# values to actually be picked up by the widget instances'
# ``value_from_datadict``.
# E.g. if a MultiValueField named 'mv_field' has 3
# sub-fields then the ``value_from_datadict`` will look for
# 'mv_field_0', 'mv_field_1', and 'mv_field_2'. Here I'm
# decomposing the individual sub-fields into the names that
# the form validation process expects
for i, _field in enumerate(field.fields):
fields_by_name[f"{name}_{i}"] = _field
else:
fields_by_name[name] = field
for name, value in sorted(field_strategies.items()):
if value is ...:
field_strategies[name] = from_field(fields_by_name[name])
for name, field in sorted(fields_by_name.items()):
if name not in field_strategies and not field.disabled:
field_strategies[name] = from_field(field)
# files are handled a bit specially in forms. A Form accepts two arguments:
# `data` and `files`. The former is for normal fields, and the latter is for
# file fields.
# see https://docs.djangoproject.com/en/5.1/ref/forms/api/#binding-uploaded-files.
data_strategies: dict[str, Any] = {}
file_strategies: dict[str, Any] = {}
for name, field in field_strategies.items():
form_field = fields_by_name[name]
dictionary = (
file_strategies if isinstance(form_field, df.FileField) else data_strategies
)
dictionary[name] = field
return _forms_impl(
st.builds(
partial(form, **form_kwargs), # type: ignore
data=st.fixed_dictionaries(data_strategies),
files=st.fixed_dictionaries(file_strategies),
)
)
@st.composite
def _forms_impl(draw, strat):
"""Handle the nasty part of drawing a value for from_form()"""
try:
return draw(strat)
except ValidationError:
reject()
|
StaticLiveServerTestCase
|
python
|
kamyu104__LeetCode-Solutions
|
Python/minimum-number-of-coins-for-fruits.py
|
{
"start": 1300,
"end": 1655
}
|
class ____(object):
def minimumCoins(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
dp = [float("inf")]*(len(prices)+1)
dp[0] = 0
for i in xrange(len(prices)):
for j in xrange(i//2, i+1):
dp[i+1] = min(dp[i+1], dp[j]+prices[j])
return dp[-1]
|
Solution3
|
python
|
fluentpython__example-code
|
20-descriptor/descriptorkinds_dump.py
|
{
"start": 4669,
"end": 4846
}
|
class ____: # <4>
"""an overriding descriptor without ``__get__``"""
def __set__(self, instance, value):
print_args('set', self, instance, value)
|
OverridingNoGet
|
python
|
Textualize__textual
|
tests/test_on.py
|
{
"start": 4038,
"end": 9002
}
|
class ____(Widget):
@dataclass
class Parent(Message):
sender: MessageSender
@property
def control(self) -> MessageSender:
return self.sender
class Child(Parent):
pass
def post_parent(self) -> None:
self.post_message(self.Parent(self))
def post_child(self) -> None:
self.post_message(self.Child(self))
async def test_fire_on_inherited_message() -> None:
"""Handlers should fire when descendant messages are posted."""
posted: list[str] = []
class InheritTestApp(App[None]):
def compose(self) -> ComposeResult:
yield MessageSender()
@on(MessageSender.Parent)
def catch_parent(self) -> None:
posted.append("parent")
@on(MessageSender.Child)
def catch_child(self) -> None:
posted.append("child")
def on_mount(self) -> None:
self.query_one(MessageSender).post_parent()
self.query_one(MessageSender).post_child()
async with InheritTestApp().run_test():
pass
assert posted == ["parent", "child", "parent"]
async def test_fire_inherited_on_single_handler() -> None:
"""Test having parent/child messages on a single handler."""
posted: list[str] = []
class InheritTestApp(App[None]):
def compose(self) -> ComposeResult:
yield MessageSender()
@on(MessageSender.Parent)
@on(MessageSender.Child)
def catch_either(self, event: MessageSender.Parent) -> None:
posted.append(f"either {event.__class__.__name__}")
def on_mount(self) -> None:
self.query_one(MessageSender).post_parent()
self.query_one(MessageSender).post_child()
async with InheritTestApp().run_test():
pass
assert posted == ["either Parent", "either Child"]
async def test_fire_inherited_on_single_handler_multi_selector() -> None:
"""Test having parent/child messages on a single handler but with different selectors."""
posted: list[str] = []
class InheritTestApp(App[None]):
def compose(self) -> ComposeResult:
yield MessageSender(classes="a b")
@on(MessageSender.Parent, ".y")
@on(MessageSender.Child, ".y")
@on(MessageSender.Parent, ".a.b")
@on(MessageSender.Child, ".a.b")
@on(MessageSender.Parent, ".a")
@on(MessageSender.Child, ".a")
@on(MessageSender.Parent, ".b")
@on(MessageSender.Child, ".b")
@on(MessageSender.Parent, ".x")
@on(MessageSender.Child, ".x")
def catch_either(self, event: MessageSender.Parent) -> None:
posted.append(f"either {event.__class__.__name__}")
@on(MessageSender.Child, ".a, .x")
def catch_selector_list_one_miss(self, event: MessageSender.Parent) -> None:
posted.append(f"selector list one miss {event.__class__.__name__}")
@on(MessageSender.Child, ".a, .b")
def catch_selector_list_two_hits(self, event: MessageSender.Parent) -> None:
posted.append(f"selector list two hits {event.__class__.__name__}")
@on(MessageSender.Child, ".a.b")
def catch_selector_combined_hits(self, event: MessageSender.Parent) -> None:
posted.append(f"combined hits {event.__class__.__name__}")
@on(MessageSender.Child, ".a.x")
def catch_selector_combined_miss(self, event: MessageSender.Parent) -> None:
posted.append(f"combined miss {event.__class__.__name__}")
def on_mount(self) -> None:
self.query_one(MessageSender).post_parent()
self.query_one(MessageSender).post_child()
async with InheritTestApp().run_test():
pass
assert posted == [
"either Parent",
"either Child",
"selector list one miss Child",
"selector list two hits Child",
"combined hits Child",
]
async def test_fire_inherited_and_on_methods() -> None:
posted: list[str] = []
class OnAndOnTestApp(App[None]):
def compose(self) -> ComposeResult:
yield MessageSender()
def on_message_sender_parent(self) -> None:
posted.append("on_message_sender_parent")
@on(MessageSender.Parent)
def catch_parent(self) -> None:
posted.append("catch_parent")
def on_message_sender_child(self) -> None:
posted.append("on_message_sender_child")
@on(MessageSender.Child)
def catch_child(self) -> None:
posted.append("catch_child")
def on_mount(self) -> None:
self.query_one(MessageSender).post_parent()
self.query_one(MessageSender).post_child()
async with OnAndOnTestApp().run_test():
pass
assert posted == [
"catch_parent",
"on_message_sender_parent",
"catch_child",
"catch_parent",
"on_message_sender_child",
]
|
MessageSender
|
python
|
rapidsai__cudf
|
python/cudf_polars/cudf_polars/dsl/ir.py
|
{
"start": 97820,
"end": 98603
}
|
class ____(IR):
"""Slice a dataframe."""
__slots__ = ("length", "offset")
_non_child = ("schema", "offset", "length")
offset: int
"""Start of the slice."""
length: int | None
"""Length of the slice."""
def __init__(self, schema: Schema, offset: int, length: int | None, df: IR):
self.schema = schema
self.offset = offset
self.length = length
self._non_child_args = (offset, length)
self.children = (df,)
@classmethod
@log_do_evaluate
@nvtx_annotate_cudf_polars(message="Slice")
def do_evaluate(
cls, offset: int, length: int, df: DataFrame, *, context: IRExecutionContext
) -> DataFrame:
"""Evaluate and return a dataframe."""
return df.slice((offset, length))
|
Slice
|
python
|
wandb__wandb
|
wandb/vendor/watchdog_0_9_0/wandb_watchdog/observers/kqueue.py
|
{
"start": 9976,
"end": 12033
}
|
class ____(object):
"""
A kevent descriptor convenience data structure to keep together:
* kevent
* directory status
* path
* file descriptor
:param path:
Path string for which a kevent descriptor will be created.
:param is_directory:
``True`` if the path refers to a directory; ``False`` otherwise.
:type is_directory:
``bool``
"""
def __init__(self, path, is_directory):
self._path = absolute_path(path)
self._is_directory = is_directory
self._fd = os.open(path, WATCHDOG_OS_OPEN_FLAGS)
self._kev = select.kevent(self._fd,
filter=WATCHDOG_KQ_FILTER,
flags=WATCHDOG_KQ_EV_FLAGS,
fflags=WATCHDOG_KQ_FFLAGS)
@property
def fd(self):
"""OS file descriptor for the kevent descriptor."""
return self._fd
@property
def path(self):
"""The path associated with the kevent descriptor."""
return self._path
@property
def kevent(self):
"""The kevent object associated with the kevent descriptor."""
return self._kev
@property
def is_directory(self):
"""Determines whether the kevent descriptor refers to a directory.
:returns:
``True`` or ``False``
"""
return self._is_directory
def close(self):
"""
Closes the file descriptor associated with a kevent descriptor.
"""
try:
os.close(self.fd)
except OSError:
pass
@property
def key(self):
return (self.path, self.is_directory)
def __eq__(self, descriptor):
return self.key == descriptor.key
def __ne__(self, descriptor):
return self.key != descriptor.key
def __hash__(self):
return hash(self.key)
def __repr__(self):
return "<KeventDescriptor: path=%s, is_directory=%s>"\
% (self.path, self.is_directory)
|
KeventDescriptor
|
python
|
kamyu104__LeetCode-Solutions
|
Python/minimum-number-of-operations-to-make-all-array-elements-equal-to-1.py
|
{
"start": 78,
"end": 724
}
|
class ____(object):
def minOperations(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def gcd(a, b):
while b:
a, b = b, a%b
return a
cnt = nums.count(1)
if cnt:
return len(nums)-cnt
result = float("inf")
for i in xrange(len(nums)):
g = nums[i]
for j in range(i+1, len(nums)):
g = gcd(g, nums[j])
if g == 1:
result = min(result, j-i)
break
return result+(len(nums)-1) if result != float("inf") else -1
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/api/serializers/models/event.py
|
{
"start": 4933,
"end": 5520
}
|
class ____(TypedDict):
id: str
groupID: str | None
eventID: str
projectID: str
message: str | None
title: str
location: str | None
user: EventUserApiContext | None
tags: list[EventTag]
platform: str
dateReceived: datetime | None
contexts: dict[str, Any] | None
size: int | None
entries: list[Any]
dist: str | None
sdk: dict[str, str]
context: dict[str, Any] | None
packages: dict[str, Any]
type: str
metadata: Any
errors: list[Any]
occurrence: Any
_meta: dict[str, Any]
|
BaseEventSerializerResponse
|
python
|
mlflow__mlflow
|
mlflow/legacy_databricks_cli/configure/provider.py
|
{
"start": 6827,
"end": 7224
}
|
class ____:
"""
Responsible for providing hostname and authentication information to make
API requests against the Databricks REST API.
This method should generally return None if it cannot provide credentials, in order
to facilitate chanining of providers.
"""
__metaclass__ = ABCMeta
@abstractmethod
def get_config(self):
pass
|
DatabricksConfigProvider
|
python
|
spyder-ide__spyder
|
spyder/plugins/application/container.py
|
{
"start": 3228,
"end": 33847
}
|
class ____(PluginMainContainer):
sig_report_issue_requested = Signal()
"""
Signal to request reporting an issue to Github.
"""
sig_load_log_file = Signal(str)
"""
Signal to load a log file
"""
sig_new_file_requested = Signal()
"""
Signal to request that a new file be created in a suitable plugin.
"""
sig_open_file_in_plugin_requested = Signal(str)
"""
Signal to request that given file is opened in a suitable plugin.
Arguments
---------
filename : str
"""
sig_open_file_using_dialog_requested = Signal()
"""
Signal to request that the Open File dialog is shown to open a file.
"""
sig_open_last_closed_requested = Signal()
"""
Signal to request that the last closed file be opened again.
"""
sig_save_file_requested = Signal()
"""
Signal to request that the current file be saved.
"""
sig_save_all_requested = Signal()
"""
Signal to request that all files in the current plugin be saved.
"""
sig_save_file_as_requested = Signal()
"""
Signal to request that the current file be saved under a different name.
"""
sig_save_copy_as_requested = Signal()
"""
Signal to request that copy of current file be saved under a new name.
"""
sig_revert_file_requested = Signal()
"""
Signal to request that the current file be reverted from disk.
"""
sig_close_file_requested = Signal()
"""
Signal to request that the current file be closed.
"""
sig_close_all_requested = Signal()
"""
Signal to request that all open files be closed.
"""
sig_undo_requested = Signal()
"""
Signal to request an undo operation.
"""
sig_redo_requested = Signal()
"""
Signal to request a redo operation.
"""
sig_cut_requested = Signal()
"""
Signal to request a cut operation.
"""
sig_copy_requested = Signal()
"""
Signal to request a copy operation.
"""
sig_paste_requested = Signal()
"""
Signal to request a paste operation.
"""
sig_select_all_requested = Signal()
"""
Signal to request that all text is selected.
"""
sig_find_requested = Signal()
"""
Signal to request to find text.
"""
sig_find_next_requested = Signal()
"""
Signal to request to find the next text occurrence.
"""
sig_find_previous_requested = Signal()
"""
Signal to request to find the previous text occurrence.
"""
sig_replace_requested = Signal()
"""
Signal to request to replace a text occurrence.
"""
def __init__(self, name, plugin, parent=None):
super().__init__(name, plugin, parent)
# Keep track of dpi message
self.current_dpi = None
self.dpi_messagebox = None
# Keep track of list of recent files
self.recent_files = self.get_conf('recent_files', [])
# ---- PluginMainContainer API
# -------------------------------------------------------------------------
def setup(self):
# Compute dependencies in a thread to not block the interface.
self.dependencies_thread = QThread(None)
self.dependencies_dialog = DependenciesDialog(self)
# Attributes
self.dialog_manager = DialogManager()
self.inapp_appeal_status = InAppAppealStatus(self)
# Actions
# Documentation actions
self.documentation_action = self.create_action(
ApplicationActions.SpyderDocumentationAction,
text=_("Spyder documentation"),
icon=self.create_icon("DialogHelpButton"),
triggered=lambda: start_file(__docs_url__),
context=Qt.ApplicationShortcut,
register_shortcut=True,
shortcut_context="_")
spyder_video_url = ("https://www.youtube.com/playlist"
"?list=PLPonohdiDqg9epClEcXoAPUiK0pN5eRoc")
self.video_action = self.create_action(
ApplicationActions.SpyderDocumentationVideoAction,
text=_("Tutorial videos"),
icon=self.create_icon("VideoIcon"),
triggered=lambda: start_file(spyder_video_url))
# Support actions
self.trouble_action = self.create_action(
ApplicationActions.SpyderTroubleshootingAction,
_("Troubleshooting guide"),
triggered=lambda: start_file(__trouble_url__))
self.report_action = self.create_action(
ConsoleActions.SpyderReportAction,
_("Report issue..."),
icon=self.create_icon('bug'),
triggered=self.sig_report_issue_requested)
self.dependencies_action = self.create_action(
ApplicationActions.SpyderDependenciesAction,
_("Dependency status"),
triggered=self.show_dependencies,
icon=self.create_icon('advanced'))
self.support_group_action = self.create_action(
ApplicationActions.SpyderSupportAction,
_("Spyder Google group"),
triggered=lambda: start_file(__forum_url__))
self.create_action(
ApplicationActions.HelpSpyderAction,
_("Help Spyder..."),
icon=self.create_icon("inapp_appeal"),
triggered=self.inapp_appeal_status.show_appeal
)
# About action
self.about_action = self.create_action(
ApplicationActions.SpyderAbout,
_("About %s") % "Spyder",
icon=self.create_icon('MessageBoxInformation'),
triggered=self.show_about,
menurole=QAction.AboutRole)
# Tools actions
if os.name == 'nt':
tip = _("Show and edit current user environment variables in "
"Windows registry (i.e. for all sessions)")
else:
tip = _("Show current user environment variables (i.e. for all "
"sessions)")
self.user_env_action = self.create_action(
ApplicationActions.SpyderUserEnvVariables,
_("User environment variables"),
icon=self.create_icon('environment'),
tip=tip,
triggered=self.show_user_env_variables)
# Application base actions
self.restart_action = self.create_action(
ApplicationActions.SpyderRestart,
_("&Restart"),
icon=self.create_icon('restart'),
tip=_("Restart"),
triggered=self.restart_normal,
context=Qt.ApplicationShortcut,
shortcut_context="_",
register_shortcut=True)
self.restart_debug_action = self.create_action(
ApplicationActions.SpyderRestartDebug,
_("&Restart in debug mode"),
tip=_("Restart in debug mode"),
triggered=self.restart_debug,
context=Qt.ApplicationShortcut,
shortcut_context="_",
register_shortcut=True)
# File actions
self.new_action = self.create_action(
ApplicationActions.NewFile,
text=_("&New file..."),
icon=self.create_icon('filenew'),
tip=_("New file"),
triggered=self.sig_new_file_requested.emit,
shortcut_context="main",
register_shortcut=True
)
self.open_action = self.create_action(
ApplicationActions.OpenFile,
text=_("&Open..."),
icon=self.create_icon('fileopen'),
tip=_("Open file"),
triggered=self.sig_open_file_using_dialog_requested.emit,
shortcut_context="main",
register_shortcut=True
)
self.open_last_closed_action = self.create_action(
ApplicationActions.OpenLastClosed,
text=_("O&pen last closed"),
tip=_("Open last closed"),
triggered=self.sig_open_last_closed_requested.emit,
shortcut_context="main",
register_shortcut=True
)
self.recent_files_menu = self.create_menu(
ApplicationPluginMenus.RecentFilesMenu,
title=_("Open &recent")
)
self.recent_files_menu.aboutToShow.connect(
self.update_recent_files_menu
)
self.max_recent_action = self.create_action(
ApplicationActions.MaxRecentFiles,
text=_("Maximum number of recent files..."),
triggered=self.change_max_recent_files
)
self.clear_recent_action = self.create_action(
ApplicationActions.ClearRecentFiles,
text=_("Clear this list"),
tip=_("Clear recent files list"),
triggered=self.clear_recent_files
)
self.save_action = self.create_action(
ApplicationActions.SaveFile,
text=_("&Save"),
icon=self.create_icon('filesave'),
tip=_("Save file"),
triggered=self.sig_save_file_requested.emit,
shortcut_context="main",
register_shortcut=True
)
self.save_all_action = self.create_action(
ApplicationActions.SaveAll,
text=_("Sav&e all"),
icon=self.create_icon('save_all'),
tip=_("Save all files"),
triggered=self.sig_save_all_requested.emit,
shortcut_context="main",
register_shortcut=True
)
self.save_as_action = self.create_action(
ApplicationActions.SaveAs,
text=_("Save &as"),
icon=self.create_icon('filesaveas'),
tip=_("Save current file as..."),
triggered=self.sig_save_file_as_requested.emit,
shortcut_context="main",
register_shortcut=True
)
self.save_copy_as_action = self.create_action(
ApplicationActions.SaveCopyAs,
text=_("Save copy as..."),
icon=self.create_icon('filesaveas'),
tip=_("Save copy of current file as..."),
triggered=self.sig_save_copy_as_requested.emit
)
self.revert_action = self.create_action(
ApplicationActions.RevertFile,
text=_("&Revert"),
icon=self.create_icon('revert'),
tip=_("Revert file from disk"),
triggered=self.sig_revert_file_requested.emit
)
self.close_file_action = self.create_action(
ApplicationActions.CloseFile,
text=_("&Close"),
icon=self.create_icon('fileclose'),
tip=_("Close current file"),
triggered=self.sig_close_file_requested.emit
)
self.close_all_action = self.create_action(
ApplicationActions.CloseAll,
text=_("C&lose all"),
icon=ima.icon('filecloseall'),
tip=_("Close all opened files"),
triggered=self.sig_close_all_requested.emit,
shortcut_context="main",
register_shortcut=True
)
# Edit actions
self.undo_action = self.create_action(
ApplicationActions.Undo,
text=_('Undo'),
icon=self.create_icon('undo'),
triggered=self.sig_undo_requested.emit,
shortcut_context="main",
register_shortcut=True
)
self.redo_action = self.create_action(
ApplicationActions.Redo,
text=_('Redo'),
icon=self.create_icon('redo'),
triggered=self.sig_redo_requested.emit,
shortcut_context="main",
register_shortcut=True
)
self.cut_action = self.create_action(
ApplicationActions.Cut,
text=_('Cut'),
icon=self.create_icon('editcut'),
triggered=self.sig_cut_requested.emit,
shortcut_context="main",
register_shortcut=True
)
self.copy_action = self.create_action(
ApplicationActions.Copy,
text=_('Copy'),
icon=self.create_icon('editcopy'),
triggered=self.sig_copy_requested.emit,
shortcut_context="main",
register_shortcut=True
)
self.paste_action = self.create_action(
ApplicationActions.Paste,
text=_('Paste'),
icon=self.create_icon('editpaste'),
triggered=self.sig_paste_requested.emit,
shortcut_context="main",
register_shortcut=True
)
self.select_all_action = self.create_action(
ApplicationActions.SelectAll,
text=_('Select All'),
icon=self.create_icon('selectall'),
triggered=self.sig_select_all_requested.emit,
shortcut_context="main",
register_shortcut=True
)
# Search actions
self.find_action = self.create_action(
ApplicationActions.FindText,
text=_("&Find text"),
icon=self.create_icon('find'),
tip=_("Find text"),
triggered=self.sig_find_requested,
shortcut_context="find_replace",
)
self.find_next_action = self.create_action(
ApplicationActions.FindNext,
text=_("Find &next"),
icon=self.create_icon('findnext'),
triggered=self.sig_find_next_requested,
shortcut_context="find_replace",
)
self.find_previous_action = self.create_action(
ApplicationActions.FindPrevious,
text=_("Find &previous"),
icon=ima.icon('findprevious'),
triggered=self.sig_find_previous_requested,
shortcut_context="find_replace",
)
self.replace_action = self.create_action(
ApplicationActions.ReplaceText,
text=_("&Replace text"),
icon=ima.icon('replace'),
tip=_("Replace text"),
triggered=self.sig_replace_requested,
shortcut_context="find_replace",
)
# Debug logs
if get_debug_level() >= 2:
self.menu_debug_logs = self.create_menu(
ApplicationPluginMenus.DebugLogsMenu,
_("Debug logs")
)
# The menu can't be built at startup because Completions can
# start after Application.
self.menu_debug_logs.aboutToShow.connect(
self.create_debug_log_actions)
# File types and filters used by the Open dialog
self.edit_filetypes = None
self.edit_filters = None
def update_actions(self):
pass
# ---- Other functionality
# -------------------------------------------------------------------------
def on_close(self):
"""To call from Spyder when the plugin is closed."""
self.dialog_manager.close_all()
self.set_conf('recent_files', self.recent_files)
if self.dependencies_thread is not None:
self.dependencies_thread.quit()
self.dependencies_thread.wait()
@Slot()
def show_about(self):
"""Show Spyder About dialog."""
abt = AboutDialog(self)
abt.show()
@Slot()
def show_user_env_variables(self):
"""Show Windows current user environment variables."""
self.dialog_manager.show(UserEnvDialog(self))
# ---- Dependencies
# -------------------------------------------------------------------------
def _set_dependencies(self):
if dependencies.DEPENDENCIES:
self.dependencies_dialog.set_data(dependencies.DEPENDENCIES)
@Slot()
def show_dependencies(self):
"""Show Spyder Dependencies dialog."""
self.dependencies_dialog.show()
def _compute_dependencies(self):
"""Compute dependencies without errors."""
# Skip error when trying to register dependencies several times.
# This can happen if the user tries to display the dependencies
# dialog before dependencies_thread has finished.
try:
dependencies.declare_dependencies()
except ValueError:
pass
def compute_dependencies(self):
"""Compute dependencies."""
self.dependencies_thread.run = self._compute_dependencies
self.dependencies_thread.finished.connect(
self.report_missing_dependencies)
self.dependencies_thread.finished.connect(self._set_dependencies)
# This avoids computing missing deps before the window is fully up
dependencies_timer = QTimer(self)
dependencies_timer.setInterval(30000)
dependencies_timer.setSingleShot(True)
dependencies_timer.timeout.connect(self.dependencies_thread.start)
dependencies_timer.start()
@Slot()
def report_missing_dependencies(self):
"""Show a QMessageBox with a list of missing hard dependencies."""
missing_deps = dependencies.missing_dependencies()
if missing_deps:
InstallerMissingDependencies(missing_deps)
# We change '<br>' by '\n', in order to replace the '<'
# that appear in our deps by '<' (to not break html
# formatting) and finally we restore '<br>' again.
missing_deps = (missing_deps.replace('<br>', '\n').
replace('<', '<').replace('\n', '<br>'))
message = (
_("<b>You have missing dependencies!</b>"
"<br><br><tt>%s</tt><br>"
"<b>Please install them to avoid this message.</b>"
"<br><br>"
"<i>Note</i>: Spyder could work without some of these "
"dependencies, however to have a smooth experience when "
"using Spyder we <i>strongly</i> recommend you to install "
"all the listed missing dependencies.<br><br>"
"Failing to install these dependencies might result in bugs."
" Please be sure that any found bugs are not the direct "
"result of missing dependencies, prior to reporting a new "
"issue."
) % missing_deps
)
message_box = QMessageBox(self)
message_box.setIcon(QMessageBox.Critical)
message_box.setAttribute(Qt.WA_DeleteOnClose)
message_box.setAttribute(Qt.WA_ShowWithoutActivating)
message_box.setStandardButtons(QMessageBox.Ok)
message_box.setWindowModality(Qt.NonModal)
message_box.setWindowTitle(_('Error'))
message_box.setText(message)
message_box.show()
# ---- Restart
# -------------------------------------------------------------------------
@Slot()
def restart_normal(self):
"""Restart in standard mode."""
os.environ['SPYDER_DEBUG'] = ''
self.sig_restart_requested.emit()
@Slot()
def restart_debug(self):
"""Restart in debug mode."""
box = QMessageBox(self)
box.setWindowTitle(_("Question"))
box.setIcon(QMessageBox.Question)
box.setText(
_("Which debug mode do you want Spyder to restart in?")
)
button_verbose = QPushButton(_('Verbose'))
button_minimal = QPushButton(_('Minimal'))
box.addButton(button_verbose, QMessageBox.AcceptRole)
box.addButton(button_minimal, QMessageBox.AcceptRole)
box.setStandardButtons(QMessageBox.Cancel)
box.exec_()
if box.clickedButton() == button_minimal:
os.environ['SPYDER_DEBUG'] = '2'
elif box.clickedButton() == button_verbose:
os.environ['SPYDER_DEBUG'] = '3'
else:
return
self.sig_restart_requested.emit()
# ---- File actions
# -------------------------------------------------------------------------
def open_file_using_dialog(self, filename: Optional[str], basedir: str):
"""
Show Open File dialog and open the selected file.
Parameters
----------
filename : Optional[str]
Name of currently active file. This is used to set the selected
name filter for the Open File dialog.
basedir : str
Directory initially displayed in the Open File dialog.
"""
if self.edit_filetypes is None:
self.edit_filetypes = get_edit_filetypes()
if self.edit_filters is None:
self.edit_filters = get_edit_filters()
self.sig_redirect_stdio_requested.emit(False)
if filename is not None:
selectedfilter = get_filter(
self.edit_filetypes, osp.splitext(filename)[1]
)
else:
selectedfilter = ''
filenames = []
if not running_under_pytest():
# See: spyder-ide/spyder#3291
if sys.platform == 'darwin':
dialog = QFileDialog(
parent=self,
caption=_("Open file"),
directory=basedir,
)
dialog.setNameFilters(self.edit_filters.split(';;'))
dialog.setOption(QFileDialog.HideNameFilterDetails, True)
dialog.setFilter(
QDir.AllDirs | QDir.Files | QDir.Drives | QDir.Hidden
)
dialog.setFileMode(QFileDialog.ExistingFiles)
if dialog.exec_():
filenames = dialog.selectedFiles()
else:
filenames, _sf = getopenfilenames(
self,
_("Open file"),
basedir,
self.edit_filters,
selectedfilter=selectedfilter,
options=QFileDialog.HideNameFilterDetails,
)
else:
# Use a Qt (i.e. scriptable) dialog for pytest
dialog = QFileDialog(
self, _("Open file"), options=QFileDialog.DontUseNativeDialog
)
if dialog.exec_():
filenames = dialog.selectedFiles()
self.sig_redirect_stdio_requested.emit(True)
for filename in filenames:
filename = osp.normpath(filename)
self.sig_open_file_in_plugin_requested.emit(filename)
def add_recent_file(self, fname: str) -> None:
"""
Add file to the list of recent files.
This function adds the given file name to the list of recent files,
which is used in the `File > Open recent` menu. The function ensures
that the list has no duplicates and it is no longer than the maximum
length.
"""
if fname in self.recent_files:
self.recent_files.remove(fname)
self.recent_files.insert(0, fname)
if len(self.recent_files) > self.get_conf('max_recent_files'):
self.recent_files.pop(-1)
def clear_recent_files(self) -> None:
"""
Clear list of recent files.
"""
self.recent_files = []
def update_recent_files_menu(self):
"""
Update recent files menu
Add menu items for all the recent files to the menu. Also add items
for setting the maximum number and for clearing the list.
This function is called before the menu is about to be shown.
"""
self.recent_files_menu.clear_actions()
recent_files = [
fname for fname in self.recent_files
if osp.isfile(fname)
]
for fname in recent_files:
icon = ima.get_icon_by_extension_or_type(fname, scale_factor=1.0)
action = self.create_action(
name=f'Recent file {fname}',
text=fname,
icon=icon,
triggered=functools.partial(
self.sig_open_file_in_plugin_requested.emit,
fname
)
)
self.recent_files_menu.add_action(
action,
section='recent_files_section',
omit_id=True,
before_section='recent_files_actions_section'
)
self.clear_recent_action.setEnabled(len(recent_files) > 0)
for menu_action in (self.max_recent_action, self.clear_recent_action):
self.recent_files_menu.add_action(
menu_action,
section='recent_files_actions_section'
)
self.recent_files_menu.render()
def change_max_recent_files(self) -> None:
"""
Change the maximum length of the list of recent files.
"""
mrf, valid = QInputDialog.getInt(
self,
_('Editor'),
_('Maximum number of recent files'),
self.get_conf('max_recent_files'),
1,
35
)
if valid:
self.set_conf('max_recent_files', mrf)
# ---- Log files
# -------------------------------------------------------------------------
def create_debug_log_actions(self):
"""Create an action for each lsp and debug log file."""
self.menu_debug_logs.clear_actions()
files = [os.environ['SPYDER_DEBUG_FILE']]
files += glob.glob(os.path.join(get_conf_path('lsp_logs'), '*.log'))
debug_logs_actions = []
for file in files:
action = self.create_action(
file,
os.path.basename(file),
tip=file,
triggered=lambda _, file=file: self.load_log_file(file),
overwrite=True,
register_action=False
)
debug_logs_actions.append(action)
# Add Spyder log on its own section
self.add_item_to_menu(
debug_logs_actions[0],
self.menu_debug_logs,
section=LogsMenuSections.SpyderLogSection
)
# Add LSP logs
for action in debug_logs_actions[1:]:
self.add_item_to_menu(
action,
self.menu_debug_logs,
section=LogsMenuSections.LSPLogsSection
)
# Render menu
self.menu_debug_logs.render()
def load_log_file(self, file):
"""Load log file in editor"""
self.sig_load_log_file.emit(file)
# ---- DPI changes
# -------------------------------------------------------------------------
def set_window(self, window):
"""Set window property of main window."""
self._window = window
def handle_new_screen(self, new_screen):
"""Connect DPI signals for new screen."""
if new_screen is not None:
new_screen_dpi = new_screen.logicalDotsPerInch()
if self.current_dpi != new_screen_dpi:
self.show_dpi_change_message(new_screen_dpi)
else:
new_screen.logicalDotsPerInchChanged.connect(
self.show_dpi_change_message)
def handle_dpi_change_response(self, result, dpi):
"""Handle dpi change message dialog result."""
if self.dpi_messagebox.is_checked():
self.set_conf('show_dpi_message', False)
self.dpi_messagebox = None
if result == 0: # Restart button was clicked
# Activate HDPI auto-scaling option since is needed for a
# proper display when using OS scaling
self.set_conf('normal_screen_resolution', False)
self.set_conf('high_dpi_scaling', True)
self.set_conf('high_dpi_custom_scale_factor', False)
self.sig_restart_requested.emit()
else:
# Update current dpi for future checks
self.current_dpi = dpi
def show_dpi_change_message(self, dpi):
"""Show message to restart Spyder since the DPI scale changed."""
if not self.get_conf('show_dpi_message'):
return
if self.current_dpi != dpi:
# Check the window state to not show the message if the window
# is in fullscreen mode.
window = self._window.windowHandle()
if (window.windowState() == Qt.WindowFullScreen and
sys.platform == 'darwin'):
return
if self.get_conf('high_dpi_scaling'):
return
if self.dpi_messagebox is not None:
self.dpi_messagebox.activateWindow()
self.dpi_messagebox.raise_()
return
self.dpi_messagebox = MessageCheckBox(icon=QMessageBox.Warning,
parent=self)
self.dpi_messagebox.set_checkbox_text(_("Don't show again."))
self.dpi_messagebox.set_checked(False)
self.dpi_messagebox.set_check_visible(True)
self.dpi_messagebox.setText(
_
("A monitor scale change was detected. <br><br>"
"We recommend restarting Spyder to ensure that it's properly "
"displayed. If you don't want to do that, please be sure to "
"activate the option<br><br><tt>Enable auto high DPI scaling"
"</tt><br><br>in <tt>Preferences > Application > "
"Interface</tt>, in case Spyder is not displayed "
"correctly.<br><br>"
"Do you want to restart Spyder?"))
self.dpi_messagebox.addButton(_('Restart now'), QMessageBox.NoRole)
dismiss_button = self.dpi_messagebox.addButton(
_('Dismiss'), QMessageBox.NoRole)
self.dpi_messagebox.setDefaultButton(dismiss_button)
self.dpi_messagebox.finished.connect(
lambda result: self.handle_dpi_change_response(result, dpi))
self.dpi_messagebox.open()
# Show dialog always in the primary screen to prevent not being
# able to see it if a screen gets disconnected while
# in suspended state. See spyder-ide/spyder#16390
dpi_messagebox_width = self.dpi_messagebox.rect().width()
dpi_messagebox_height = self.dpi_messagebox.rect().height()
screen_geometry = QGuiApplication.primaryScreen().geometry()
x = (screen_geometry.width() - dpi_messagebox_width) / 2
y = (screen_geometry.height() - dpi_messagebox_height) / 2
# Convert coordinates to int to avoid a TypeError in Python 3.10
# Fixes spyder-ide/spyder#17677
self.dpi_messagebox.move(int(x), int(y))
self.dpi_messagebox.adjustSize()
|
ApplicationContainer
|
python
|
pennersr__django-allauth
|
allauth/account/views.py
|
{
"start": 18829,
"end": 20162
}
|
class ____(AjaxCapableProcessFormViewMixin, NextRedirectMixin, FormView):
template_name = "account/password_set." + app_settings.TEMPLATE_EXTENSION
form_class = SetPasswordForm
def get_form_class(self):
return get_form_class(app_settings.FORMS, "set_password", self.form_class)
@sensitive_post_parameters_m
def dispatch(self, request, *args, **kwargs):
if self.request.user.has_usable_password():
return HttpResponseRedirect(reverse("account_change_password"))
return super().dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["user"] = self.request.user
return kwargs
def get_default_success_url(self):
return get_adapter().get_password_change_redirect_url(self.request)
def form_valid(self, form):
form.save()
flows.password_change.finalize_password_set(self.request, form.user)
return super().form_valid(form)
def get_context_data(self, **kwargs):
ret = super().get_context_data(**kwargs)
# NOTE: For backwards compatibility
ret["password_set_form"] = ret.get("form")
# (end NOTE)
return ret
password_set = PasswordSetView.as_view()
@method_decorator(login_not_required, name="dispatch")
|
PasswordSetView
|
python
|
Textualize__textual
|
src/textual/highlight.py
|
{
"start": 324,
"end": 4835
}
|
class ____:
"""Contains the style definition for user with the highlight method."""
STYLES: dict[TokenType, str] = {
Token.Comment: "$text 60%",
Token.Error: "$text-error on $error-muted",
Token.Generic.Strong: "bold",
Token.Generic.Emph: "italic",
Token.Generic.Error: "$text-error on $error-muted",
Token.Generic.Heading: "$text-primary underline",
Token.Generic.Subheading: "$text-primary",
Token.Keyword: "$text-accent",
Token.Keyword.Constant: "bold $text-success 80%",
Token.Keyword.Namespace: "$text-error",
Token.Keyword.Type: "bold",
Token.Literal.Number: "$text-warning",
Token.Literal.String.Backtick: "$text 60%",
Token.Literal.String: "$text-success 90%",
Token.Literal.String.Doc: "$text-success 80% italic",
Token.Literal.String.Double: "$text-success 90%",
Token.Name: "$text-primary",
Token.Name.Attribute: "$text-warning",
Token.Name.Builtin: "$text-accent",
Token.Name.Builtin.Pseudo: "italic",
Token.Name.Class: "$text-warning bold",
Token.Name.Constant: "$text-error",
Token.Name.Decorator: "$text-primary bold",
Token.Name.Function: "$text-warning underline",
Token.Name.Function.Magic: "$text-warning underline",
Token.Name.Tag: "$text-primary bold",
Token.Name.Variable: "$text-secondary",
Token.Number: "$text-warning",
Token.Operator: "bold",
Token.Operator.Word: "bold $text-error",
Token.String: "$text-success",
Token.Whitespace: "",
}
def guess_language(code: str, path: str | None) -> str:
"""Guess the language based on the code and path.
The result may be used in the [highlight][textual.highlight.highlight] function.
Args:
code: The code to guess from.
path: A path to the code.
Returns:
The language, suitable for use with Pygments.
"""
if path and os.path.splitext(path)[-1] == ".tcss":
# A special case for TCSS files which aren't known outside of Textual
return "scss"
lexer: Lexer | None = None
lexer_name = "default"
if code:
if path:
try:
lexer = guess_lexer_for_filename(path, code)
except ClassNotFound:
pass
if lexer is None:
from pygments.lexers import guess_lexer
try:
lexer = guess_lexer(code)
except Exception:
pass
if not lexer and path:
try:
_, ext = os.path.splitext(path)
if ext:
extension = ext.lstrip(".").lower()
lexer = get_lexer_by_name(extension)
except ClassNotFound:
pass
if lexer:
if lexer.aliases:
lexer_name = lexer.aliases[0]
else:
lexer_name = lexer.name
return lexer_name
def highlight(
code: str,
*,
language: str | None = None,
path: str | None = None,
theme: type[HighlightTheme] = HighlightTheme,
tab_size: int = 8,
) -> Content:
"""Apply syntax highlighting to a string.
Args:
code: A string to highlight.
language: The language to highlight.
theme: A HighlightTheme class (type not instance).
tab_size: Number of spaces in a tab.
Returns:
A Content instance which may be used in a widget.
"""
if not language:
language = guess_language(code, path)
assert language is not None
code = "\n".join(code.splitlines())
try:
lexer = get_lexer_by_name(
language,
stripnl=False,
ensurenl=True,
tabsize=tab_size,
)
except ClassNotFound:
lexer = get_lexer_by_name(
"text",
stripnl=False,
ensurenl=True,
tabsize=tab_size,
)
token_start = 0
spans: list[Span] = []
styles = theme.STYLES
for token_type, token in lexer.get_tokens(code):
token_end = token_start + len(token)
while True:
if style := styles.get(token_type):
spans.append(Span(token_start, token_end, style))
break
if (token_type := token_type.parent) is None:
break
token_start = token_end
highlighted_code = Content(code, spans=spans).stylize_before("$text")
return highlighted_code
|
HighlightTheme
|
python
|
langchain-ai__langchain
|
libs/core/langchain_core/tracers/core.py
|
{
"start": 885,
"end": 23330
}
|
class ____(ABC):
"""Abstract base class for tracers.
This class provides common methods, and reusable methods for tracers.
"""
log_missing_parent: bool = True
def __init__(
self,
*,
_schema_format: Literal[
"original", "streaming_events", "original+chat"
] = "original",
**kwargs: Any,
) -> None:
"""Initialize the tracer.
Args:
_schema_format: Primarily changes how the inputs and outputs are
handled. For internal use only. This API will change.
- 'original' is the format used by all current tracers.
This format is slightly inconsistent with respect to inputs
and outputs.
- 'streaming_events' is used for supporting streaming events,
for internal usage. It will likely change in the future, or
be deprecated entirely in favor of a dedicated async tracer
for streaming events.
- 'original+chat' is a format that is the same as 'original'
except it does NOT raise an attribute error on_chat_model_start
**kwargs: Additional keyword arguments that will be passed to
the superclass.
"""
super().__init__(**kwargs)
self._schema_format = _schema_format # For internal use only API will change.
self.run_map: dict[str, Run] = {}
"""Map of run ID to run. Cleared on run end."""
self.order_map: dict[UUID, tuple[UUID, str]] = {}
"""Map of run ID to (trace_id, dotted_order). Cleared when tracer GCed."""
@abstractmethod
def _persist_run(self, run: Run) -> Coroutine[Any, Any, None] | None:
"""Persist a run."""
@staticmethod
def _add_child_run(
parent_run: Run,
child_run: Run,
) -> None:
"""Add child run to a chain run or tool run."""
parent_run.child_runs.append(child_run)
@staticmethod
def _get_stacktrace(error: BaseException) -> str:
"""Get the stacktrace of the parent error."""
msg = repr(error)
try:
tb = traceback.format_exception(error)
return (msg + "\n\n".join(tb)).strip()
except: # noqa: E722
return msg
def _start_trace(self, run: Run) -> Coroutine[Any, Any, None] | None: # type: ignore[return]
current_dotted_order = run.start_time.strftime("%Y%m%dT%H%M%S%fZ") + str(run.id)
if run.parent_run_id:
if parent := self.order_map.get(run.parent_run_id):
run.trace_id, run.dotted_order = parent
run.dotted_order += "." + current_dotted_order
if parent_run := self.run_map.get(str(run.parent_run_id)):
self._add_child_run(parent_run, run)
else:
if self.log_missing_parent:
logger.debug(
"Parent run %s not found for run %s. Treating as a root run.",
run.parent_run_id,
run.id,
)
run.parent_run_id = None
run.trace_id = run.id
run.dotted_order = current_dotted_order
else:
run.trace_id = run.id
run.dotted_order = current_dotted_order
self.order_map[run.id] = (run.trace_id, run.dotted_order)
self.run_map[str(run.id)] = run
def _get_run(self, run_id: UUID, run_type: str | set[str] | None = None) -> Run:
try:
run = self.run_map[str(run_id)]
except KeyError as exc:
msg = f"No indexed run ID {run_id}."
raise TracerException(msg) from exc
if isinstance(run_type, str):
run_types: set[str] | None = {run_type}
else:
run_types = run_type
if run_types is not None and run.run_type not in run_types:
msg = (
f"Found {run.run_type} run at ID {run_id}, "
f"but expected {run_types} run."
)
raise TracerException(msg)
return run
def _create_chat_model_run(
self,
serialized: dict[str, Any],
messages: list[list[BaseMessage]],
run_id: UUID,
tags: list[str] | None = None,
parent_run_id: UUID | None = None,
metadata: dict[str, Any] | None = None,
name: str | None = None,
**kwargs: Any,
) -> Run:
"""Create a chat model run."""
if self._schema_format not in {"streaming_events", "original+chat"}:
# Please keep this un-implemented for backwards compatibility.
# When it's unimplemented old tracers that use the "original" format
# fallback on the on_llm_start method implementation if they
# find that the on_chat_model_start method is not implemented.
# This can eventually be cleaned up by writing a "modern" tracer
# that has all the updated schema changes corresponding to
# the "streaming_events" format.
msg = (
f"Chat model tracing is not supported in "
f"for {self._schema_format} format."
)
raise NotImplementedError(msg)
start_time = datetime.now(timezone.utc)
if metadata:
kwargs.update({"metadata": metadata})
return Run(
id=run_id,
parent_run_id=parent_run_id,
serialized=serialized,
inputs={"messages": [[dumpd(msg) for msg in batch] for batch in messages]},
extra=kwargs,
events=[{"name": "start", "time": start_time}],
start_time=start_time,
# WARNING: This is valid ONLY for streaming_events.
# run_type="llm" is what's used by virtually all tracers.
# Changing this to "chat_model" may break triggering on_llm_start
run_type="chat_model",
tags=tags,
name=name, # type: ignore[arg-type]
)
def _create_llm_run(
self,
serialized: dict[str, Any],
prompts: list[str],
run_id: UUID,
tags: list[str] | None = None,
parent_run_id: UUID | None = None,
metadata: dict[str, Any] | None = None,
name: str | None = None,
**kwargs: Any,
) -> Run:
"""Create a llm run."""
start_time = datetime.now(timezone.utc)
if metadata:
kwargs.update({"metadata": metadata})
return Run(
id=run_id,
parent_run_id=parent_run_id,
serialized=serialized,
# TODO: Figure out how to expose kwargs here
inputs={"prompts": prompts},
extra=kwargs,
events=[{"name": "start", "time": start_time}],
start_time=start_time,
run_type="llm",
tags=tags or [],
name=name, # type: ignore[arg-type]
)
def _llm_run_with_token_event(
self,
token: str,
run_id: UUID,
chunk: GenerationChunk | ChatGenerationChunk | None = None,
parent_run_id: UUID | None = None, # noqa: ARG002
) -> Run:
"""Append token event to LLM run and return the run."""
llm_run = self._get_run(run_id, run_type={"llm", "chat_model"})
event_kwargs: dict[str, Any] = {"token": token}
if chunk:
event_kwargs["chunk"] = chunk
llm_run.events.append(
{
"name": "new_token",
"time": datetime.now(timezone.utc),
"kwargs": event_kwargs,
},
)
return llm_run
def _llm_run_with_retry_event(
self,
retry_state: RetryCallState,
run_id: UUID,
) -> Run:
llm_run = self._get_run(run_id)
retry_d: dict[str, Any] = {
"slept": retry_state.idle_for,
"attempt": retry_state.attempt_number,
}
if retry_state.outcome is None:
retry_d["outcome"] = "N/A"
elif retry_state.outcome.failed:
retry_d["outcome"] = "failed"
exception = retry_state.outcome.exception()
retry_d["exception"] = str(exception)
retry_d["exception_type"] = exception.__class__.__name__
else:
retry_d["outcome"] = "success"
retry_d["result"] = str(retry_state.outcome.result())
llm_run.events.append(
{
"name": "retry",
"time": datetime.now(timezone.utc),
"kwargs": retry_d,
},
)
return llm_run
def _complete_llm_run(self, response: LLMResult, run_id: UUID) -> Run:
llm_run = self._get_run(run_id, run_type={"llm", "chat_model"})
if getattr(llm_run, "outputs", None) is None:
llm_run.outputs = {}
else:
llm_run.outputs = cast("dict[str, Any]", llm_run.outputs)
if not llm_run.extra.get("__omit_auto_outputs", False):
llm_run.outputs.update(response.model_dump())
for i, generations in enumerate(response.generations):
for j, generation in enumerate(generations):
output_generation = llm_run.outputs["generations"][i][j]
if "message" in output_generation:
output_generation["message"] = dumpd(
cast("ChatGeneration", generation).message
)
llm_run.end_time = datetime.now(timezone.utc)
llm_run.events.append({"name": "end", "time": llm_run.end_time})
return llm_run
def _errored_llm_run(
self, error: BaseException, run_id: UUID, response: LLMResult | None = None
) -> Run:
llm_run = self._get_run(run_id, run_type={"llm", "chat_model"})
llm_run.error = self._get_stacktrace(error)
if response:
if getattr(llm_run, "outputs", None) is None:
llm_run.outputs = {}
else:
llm_run.outputs = cast("dict[str, Any]", llm_run.outputs)
if not llm_run.extra.get("__omit_auto_outputs", False):
llm_run.outputs.update(response.model_dump())
for i, generations in enumerate(response.generations):
for j, generation in enumerate(generations):
output_generation = llm_run.outputs["generations"][i][j]
if "message" in output_generation:
output_generation["message"] = dumpd(
cast("ChatGeneration", generation).message
)
llm_run.end_time = datetime.now(timezone.utc)
llm_run.events.append({"name": "error", "time": llm_run.end_time})
return llm_run
def _create_chain_run(
self,
serialized: dict[str, Any],
inputs: dict[str, Any],
run_id: UUID,
tags: list[str] | None = None,
parent_run_id: UUID | None = None,
metadata: dict[str, Any] | None = None,
run_type: str | None = None,
name: str | None = None,
**kwargs: Any,
) -> Run:
"""Create a chain Run."""
start_time = datetime.now(timezone.utc)
if metadata:
kwargs.update({"metadata": metadata})
return Run(
id=run_id,
parent_run_id=parent_run_id,
serialized=serialized,
inputs=self._get_chain_inputs(inputs),
extra=kwargs,
events=[{"name": "start", "time": start_time}],
start_time=start_time,
child_runs=[],
run_type=run_type or "chain",
name=name, # type: ignore[arg-type]
tags=tags or [],
)
def _get_chain_inputs(self, inputs: Any) -> Any:
"""Get the inputs for a chain run."""
if self._schema_format in {"original", "original+chat"}:
return inputs if isinstance(inputs, dict) else {"input": inputs}
if self._schema_format == "streaming_events":
return {
"input": inputs,
}
msg = f"Invalid format: {self._schema_format}"
raise ValueError(msg)
def _get_chain_outputs(self, outputs: Any) -> Any:
"""Get the outputs for a chain run."""
if self._schema_format in {"original", "original+chat"}:
return outputs if isinstance(outputs, dict) else {"output": outputs}
if self._schema_format == "streaming_events":
return {
"output": outputs,
}
msg = f"Invalid format: {self._schema_format}"
raise ValueError(msg)
def _complete_chain_run(
self,
outputs: dict[str, Any],
run_id: UUID,
inputs: dict[str, Any] | None = None,
) -> Run:
"""Update a chain run with outputs and end time."""
chain_run = self._get_run(run_id)
if getattr(chain_run, "outputs", None) is None:
chain_run.outputs = {}
if not chain_run.extra.get("__omit_auto_outputs", False):
cast("dict[str, Any]", chain_run.outputs).update(
self._get_chain_outputs(outputs)
)
chain_run.end_time = datetime.now(timezone.utc)
chain_run.events.append({"name": "end", "time": chain_run.end_time})
if inputs is not None:
chain_run.inputs = self._get_chain_inputs(inputs)
return chain_run
def _errored_chain_run(
self,
error: BaseException,
inputs: dict[str, Any] | None,
run_id: UUID,
) -> Run:
chain_run = self._get_run(run_id)
chain_run.error = self._get_stacktrace(error)
chain_run.end_time = datetime.now(timezone.utc)
chain_run.events.append({"name": "error", "time": chain_run.end_time})
if inputs is not None:
chain_run.inputs = self._get_chain_inputs(inputs)
return chain_run
def _create_tool_run(
self,
serialized: dict[str, Any],
input_str: str,
run_id: UUID,
tags: list[str] | None = None,
parent_run_id: UUID | None = None,
metadata: dict[str, Any] | None = None,
name: str | None = None,
inputs: dict[str, Any] | None = None,
**kwargs: Any,
) -> Run:
"""Create a tool run."""
start_time = datetime.now(timezone.utc)
if metadata:
kwargs.update({"metadata": metadata})
if self._schema_format in {"original", "original+chat"}:
inputs = {"input": input_str}
elif self._schema_format == "streaming_events":
inputs = {"input": inputs}
else:
msg = f"Invalid format: {self._schema_format}"
raise AssertionError(msg)
return Run(
id=run_id,
parent_run_id=parent_run_id,
serialized=serialized,
# Wrapping in dict since Run requires a dict object.
inputs=inputs,
extra=kwargs,
events=[{"name": "start", "time": start_time}],
start_time=start_time,
child_runs=[],
run_type="tool",
tags=tags or [],
name=name, # type: ignore[arg-type]
)
def _complete_tool_run(
self,
output: dict[str, Any],
run_id: UUID,
) -> Run:
"""Update a tool run with outputs and end time."""
tool_run = self._get_run(run_id, run_type="tool")
if getattr(tool_run, "outputs", None) is None:
tool_run.outputs = {}
if not tool_run.extra.get("__omit_auto_outputs", False):
cast("dict[str, Any]", tool_run.outputs).update({"output": output})
tool_run.end_time = datetime.now(timezone.utc)
tool_run.events.append({"name": "end", "time": tool_run.end_time})
return tool_run
def _errored_tool_run(
self,
error: BaseException,
run_id: UUID,
) -> Run:
"""Update a tool run with error and end time."""
tool_run = self._get_run(run_id, run_type="tool")
tool_run.error = self._get_stacktrace(error)
tool_run.end_time = datetime.now(timezone.utc)
tool_run.events.append({"name": "error", "time": tool_run.end_time})
return tool_run
def _create_retrieval_run(
self,
serialized: dict[str, Any],
query: str,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
name: str | None = None,
**kwargs: Any,
) -> Run:
"""Create a retrieval run."""
start_time = datetime.now(timezone.utc)
if metadata:
kwargs.update({"metadata": metadata})
return Run(
id=run_id,
name=name or "Retriever",
parent_run_id=parent_run_id,
serialized=serialized,
inputs={"query": query},
extra=kwargs,
events=[{"name": "start", "time": start_time}],
start_time=start_time,
tags=tags,
child_runs=[],
run_type="retriever",
)
def _complete_retrieval_run(
self,
documents: Sequence[Document],
run_id: UUID,
) -> Run:
"""Update a retrieval run with outputs and end time."""
retrieval_run = self._get_run(run_id, run_type="retriever")
if getattr(retrieval_run, "outputs", None) is None:
retrieval_run.outputs = {}
if not retrieval_run.extra.get("__omit_auto_outputs", False):
cast("dict[str, Any]", retrieval_run.outputs).update(
{"documents": documents}
)
retrieval_run.end_time = datetime.now(timezone.utc)
retrieval_run.events.append({"name": "end", "time": retrieval_run.end_time})
return retrieval_run
def _errored_retrieval_run(
self,
error: BaseException,
run_id: UUID,
) -> Run:
retrieval_run = self._get_run(run_id, run_type="retriever")
retrieval_run.error = self._get_stacktrace(error)
retrieval_run.end_time = datetime.now(timezone.utc)
retrieval_run.events.append({"name": "error", "time": retrieval_run.end_time})
return retrieval_run
def __deepcopy__(self, memo: dict) -> _TracerCore:
"""Return self deepcopied."""
return self
def __copy__(self) -> _TracerCore:
"""Return self copied."""
return self
def _end_trace(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
"""End a trace for a run.
Args:
run: The run.
"""
return None
def _on_run_create(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
"""Process a run upon creation.
Args:
run: The created run.
"""
return None
def _on_run_update(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
"""Process a run upon update.
Args:
run: The updated run.
"""
return None
def _on_llm_start(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
"""Process the LLM Run upon start.
Args:
run: The LLM run.
"""
return None
def _on_llm_new_token(
self,
run: Run, # noqa: ARG002
token: str, # noqa: ARG002
chunk: GenerationChunk | ChatGenerationChunk | None, # noqa: ARG002
) -> Coroutine[Any, Any, None] | None:
"""Process new LLM token.
Args:
run: The LLM run.
token: The new token.
chunk: Optional chunk.
"""
return None
def _on_llm_end(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
"""Process the LLM Run.
Args:
run: The LLM run.
"""
return None
def _on_llm_error(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
"""Process the LLM Run upon error.
Args:
run: The LLM run.
"""
return None
def _on_chain_start(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
"""Process the Chain Run upon start.
Args:
run: The chain run.
"""
return None
def _on_chain_end(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
"""Process the Chain Run.
Args:
run: The chain run.
"""
return None
def _on_chain_error(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
"""Process the Chain Run upon error.
Args:
run: The chain run.
"""
return None
def _on_tool_start(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
"""Process the Tool Run upon start.
Args:
run: The tool run.
"""
return None
def _on_tool_end(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
"""Process the Tool Run.
Args:
run: The tool run.
"""
return None
def _on_tool_error(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
"""Process the Tool Run upon error.
Args:
run: The tool run.
"""
return None
def _on_chat_model_start(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
"""Process the Chat Model Run upon start.
Args:
run: The chat model run.
"""
return None
def _on_retriever_start(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
"""Process the Retriever Run upon start.
Args:
run: The retriever run.
"""
return None
def _on_retriever_end(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
"""Process the Retriever Run.
Args:
run: The retriever run.
"""
return None
def _on_retriever_error(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
"""Process the Retriever Run upon error.
Args:
run: The retriever run.
"""
return None
|
_TracerCore
|
python
|
weaviate__weaviate-python-client
|
weaviate/backup/backup_location.py
|
{
"start": 418,
"end": 550
}
|
class ____(_BackupLocationConfig):
"""The dynamic location of a backup for S3."""
path: str
bucket: str
|
_BackupLocationS3
|
python
|
plotly__plotly.py
|
plotly/graph_objs/bar/marker/colorbar/_tickformatstop.py
|
{
"start": 233,
"end": 8529
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "bar.marker.colorbar"
_path_str = "bar.marker.colorbar.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs,
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.bar.marker.col
orbar.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super().__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.bar.marker.colorbar.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.bar.marker.colorbar.Tickformatstop`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("dtickrange", arg, dtickrange)
self._set_property("enabled", arg, enabled)
self._set_property("name", arg, name)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("value", arg, value)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Tickformatstop
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.