language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
pydata__xarray
|
xarray/tests/test_merge.py
|
{
"start": 35398,
"end": 38866
}
|
class ____:
def test_mixed(self) -> None:
tree = xr.DataTree()
ds = xr.Dataset()
with pytest.raises(
TypeError,
match="merge does not support mixed type arguments when one argument is a DataTree",
):
xr.merge([tree, ds]) # type: ignore[list-item]
def test_distinct(self) -> None:
tree1 = xr.DataTree.from_dict({"/a/b/c": 1})
tree2 = xr.DataTree.from_dict({"/a/d/e": 2})
expected = xr.DataTree.from_dict({"/a/b/c": 1, "/a/d/e": 2})
merged = xr.merge([tree1, tree2])
assert_equal(merged, expected)
def test_overlap(self) -> None:
tree1 = xr.DataTree.from_dict({"/a/b": 1})
tree2 = xr.DataTree.from_dict({"/a/c": 2})
tree3 = xr.DataTree.from_dict({"/a/d": 3})
expected = xr.DataTree.from_dict({"/a/b": 1, "/a/c": 2, "/a/d": 3})
merged = xr.merge([tree1, tree2, tree3])
assert_equal(merged, expected)
def test_inherited(self) -> None:
tree1 = xr.DataTree.from_dict({"/a/b": ("x", [1])}, coords={"x": [0]})
tree2 = xr.DataTree.from_dict({"/a/c": ("x", [2])})
expected = xr.DataTree.from_dict(
{"/a/b": ("x", [1]), "a/c": ("x", [2])}, coords={"x": [0]}
)
merged = xr.merge([tree1, tree2])
assert_equal(merged, expected)
def test_inherited_join(self) -> None:
tree1 = xr.DataTree.from_dict({"/a/b": ("x", [0, 1])}, coords={"x": [0, 1]})
tree2 = xr.DataTree.from_dict({"/a/c": ("x", [1, 2])}, coords={"x": [1, 2]})
expected = xr.DataTree.from_dict(
{"/a/b": ("x", [0, 1]), "a/c": ("x", [np.nan, 1])}, coords={"x": [0, 1]}
)
merged = xr.merge([tree1, tree2], join="left")
assert_equal(merged, expected)
expected = xr.DataTree.from_dict(
{"/a/b": ("x", [1, np.nan]), "a/c": ("x", [1, 2])}, coords={"x": [1, 2]}
)
merged = xr.merge([tree1, tree2], join="right")
assert_equal(merged, expected)
expected = xr.DataTree.from_dict(
{"/a/b": ("x", [1]), "a/c": ("x", [1])}, coords={"x": [1]}
)
merged = xr.merge([tree1, tree2], join="inner")
assert_equal(merged, expected)
expected = xr.DataTree.from_dict(
{"/a/b": ("x", [0, 1, np.nan]), "a/c": ("x", [np.nan, 1, 2])},
coords={"x": [0, 1, 2]},
)
merged = xr.merge([tree1, tree2], join="outer")
assert_equal(merged, expected)
with pytest.raises(
xr.AlignmentError,
match=re.escape("cannot align objects with join='exact'"),
):
xr.merge([tree1, tree2], join="exact")
def test_merge_error_includes_path(self) -> None:
tree1 = xr.DataTree.from_dict({"/a/b": ("x", [0, 1])})
tree2 = xr.DataTree.from_dict({"/a/b": ("x", [1, 2])})
with pytest.raises(
xr.MergeError,
match=re.escape(
"Raised whilst mapping function over node(s) with path 'a'"
),
):
xr.merge([tree1, tree2], join="exact")
def test_fill_value_errors(self) -> None:
trees = [xr.DataTree(), xr.DataTree()]
with pytest.raises(
NotImplementedError,
match=re.escape(
"fill_value is not yet supported for DataTree objects in merge"
),
):
xr.merge(trees, fill_value=None)
|
TestMergeDataTree
|
python
|
tensorflow__tensorflow
|
third_party/xla/xla/backends/cpu/codegen/elemental/concatenate_kernel_emitter_test.py
|
{
"start": 1087,
"end": 3352
}
|
class ____(parameterized.TestCase):
@parameterized.product(
cycle_layout=[True, False],
dtype=[
np.dtype(np.uint8),
np.dtype(np.uint16),
np.dtype(np.uint32),
np.dtype(np.uint64),
np.dtype(np.int8),
np.dtype(np.int16),
np.dtype(np.int32),
np.dtype(np.int64),
np.dtype(np.float16),
np.dtype(np.float32),
np.dtype(np.float64),
],
concat_dimension=[0, 1, 2],
)
def test_concatenate(self, cycle_layout, dtype, concat_dimension):
num_inputs = 5
shape = (4, 4, 4)
np_inputs = [
(np.random.rand(*shape) * 10).astype(dtype) for _ in range(num_inputs)
]
if cycle_layout:
# Give the inputs different layouts to test the slow path.
default_layout = [0, 1, 2]
input_literals = [
create_literal(input_array, np.roll(default_layout, idx))
for idx, input_array in enumerate(np_inputs)
]
else:
input_literals = [
create_literal(input_array) for input_array in np_inputs
]
expected_output = np.concatenate(np_inputs, axis=concat_dimension)
output_literal = create_literal(np.zeros_like(expected_output))
hlo_parameters = [
HloInstruction.create_parameter(idx, literal.shape(), f"input_{idx}")
for [idx, literal] in enumerate(input_literals)
]
hlo_op = HloInstruction.create_concatenate(
output_literal.shape(), hlo_parameters, concat_dimension
)
hlo_module, buffer_assignment = utilities.build_hlo_module(
hlo_op, *hlo_parameters
)
jit_compiler = cpu_testlib.JitCompiler(hlo_module.get_config())
emitter = cpu_testlib.ConcatenateKernelEmitter(
hlo_module.get_root_instruction(),
buffer_assignment,
jit_compiler.get_target_machine(),
)
kernel_definition = emitter.emit_kernel_definition()
self.assertIsNotNone(kernel_definition)
runner = cpu_testlib.KernelRunner.create(kernel_definition, jit_compiler)
runner.call(list(itertools.chain(input_literals, [output_literal])))
np.testing.assert_array_equal(np.asarray(output_literal), expected_output)
if __name__ == "__main__":
absltest.main()
|
ConcatenateKernelRunnerTest
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_transaction.py
|
{
"start": 56076,
"end": 65742
}
|
class ____(FixtureTest):
run_inserts = None
__sparse_driver_backend__ = True
@testing.requires.savepoints
@engines.close_open_connections
def test_contextmanager_nested_rollback(self):
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
def go():
with sess.begin_nested():
sess.add(User()) # name can't be null
sess.flush()
# and not InvalidRequestError
assert_raises(sa_exc.DBAPIError, go)
with sess.begin_nested():
sess.add(User(name="u1"))
eq_(sess.query(User).count(), 1)
def test_contextmanager_commit(self):
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
with sess.begin():
sess.add(User(name="u1"))
sess.rollback()
eq_(sess.query(User).count(), 1)
def test_contextmanager_rollback(self):
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
def go():
with sess.begin():
sess.add(User()) # name can't be null
assert_raises(sa_exc.DBAPIError, go)
eq_(sess.query(User).count(), 0)
sess.close()
with sess.begin():
sess.add(User(name="u1"))
eq_(sess.query(User).count(), 1)
def test_explicit_begin(self):
with fixture_session() as s1:
with s1.begin() as trans:
is_(trans, s1.get_transaction())
s1.connection()
is_(s1._transaction, None)
def test_no_double_begin_explicit(self):
with fixture_session() as s1:
s1.begin()
assert_raises_message(
sa_exc.InvalidRequestError,
"A transaction is already begun on this Session.",
s1.begin,
)
@testing.requires.savepoints
def test_rollback_is_global(self):
users = self.tables.users
with fixture_session() as s1:
s1.begin()
s1.connection().execute(users.insert(), [{"id": 1, "name": "n1"}])
s1.begin_nested()
s1.connection().execute(
users.insert(),
[{"id": 2, "name": "n2"}, {"id": 3, "name": "n3"}],
)
eq_(
s1.connection().scalar(
select(func.count()).select_from(users)
),
3,
)
# rolls back the whole transaction
s1.rollback()
is_(s1.get_transaction(), None)
eq_(
s1.connection().scalar(
select(func.count()).select_from(users)
),
0,
)
s1.commit()
is_(s1.get_transaction(), None)
def test_session_as_ctx_manager_one(self):
users = self.tables.users
with fixture_session() as sess:
is_(sess.get_transaction(), None)
sess.connection().execute(
users.insert().values(id=1, name="user1")
)
eq_(
sess.connection().execute(users.select()).all(), [(1, "user1")]
)
is_not(sess.get_transaction(), None)
is_(sess.get_transaction(), None)
# did not commit
eq_(sess.connection().execute(users.select()).all(), [])
def test_session_as_ctx_manager_two(self):
users = self.tables.users
try:
with fixture_session() as sess:
is_(sess.get_transaction(), None)
sess.connection().execute(
users.insert().values(id=1, name="user1")
)
raise Exception("force rollback")
except:
pass
is_(sess.get_transaction(), None)
def test_begin_context_manager(self):
users = self.tables.users
with fixture_session() as sess:
with sess.begin():
sess.connection().execute(
users.insert().values(id=1, name="user1")
)
eq_(
sess.connection().execute(users.select()).all(),
[(1, "user1")],
)
# committed
eq_(sess.connection().execute(users.select()).all(), [(1, "user1")])
def test_sessionmaker_begin_context_manager(self):
users = self.tables.users
session = sessionmaker(testing.db)
with session.begin() as sess:
sess.connection().execute(
users.insert().values(id=1, name="user1")
)
eq_(
sess.connection().execute(users.select()).all(),
[(1, "user1")],
)
# committed
eq_(sess.connection().execute(users.select()).all(), [(1, "user1")])
sess.close()
def test_begin_context_manager_rollback_trans(self):
users = self.tables.users
try:
with fixture_session() as sess:
with sess.begin():
sess.connection().execute(
users.insert().values(id=1, name="user1")
)
eq_(
sess.connection().execute(users.select()).all(),
[(1, "user1")],
)
raise Exception("force rollback")
except:
pass
# rolled back
eq_(sess.connection().execute(users.select()).all(), [])
sess.close()
def test_begin_context_manager_rollback_outer(self):
users = self.tables.users
try:
with fixture_session() as sess:
with sess.begin():
sess.connection().execute(
users.insert().values(id=1, name="user1")
)
eq_(
sess.connection().execute(users.select()).all(),
[(1, "user1")],
)
raise Exception("force rollback")
except:
pass
# committed
eq_(sess.connection().execute(users.select()).all(), [(1, "user1")])
sess.close()
def test_sessionmaker_begin_context_manager_rollback_trans(self):
users = self.tables.users
session = sessionmaker(testing.db)
try:
with session.begin() as sess:
sess.connection().execute(
users.insert().values(id=1, name="user1")
)
eq_(
sess.connection().execute(users.select()).all(),
[(1, "user1")],
)
raise Exception("force rollback")
except:
pass
# rolled back
eq_(sess.connection().execute(users.select()).all(), [])
sess.close()
def test_sessionmaker_begin_context_manager_rollback_outer(self):
users = self.tables.users
session = sessionmaker(testing.db)
try:
with session.begin() as sess:
sess.connection().execute(
users.insert().values(id=1, name="user1")
)
eq_(
sess.connection().execute(users.select()).all(),
[(1, "user1")],
)
raise Exception("force rollback")
except:
pass
# committed
eq_(sess.connection().execute(users.select()).all(), [(1, "user1")])
sess.close()
def test_interrupt_ctxmanager(self, trans_ctx_manager_fixture):
fn = trans_ctx_manager_fixture
session = fixture_session()
fn(session, trans_on_subject=True, execute_on_subject=True)
@testing.combinations((True,), (False,), argnames="rollback")
@testing.combinations((True,), (False,), argnames="expire_on_commit")
@testing.combinations(
("add",),
("modify",),
("delete",),
("begin",),
argnames="check_operation",
)
def test_interrupt_ctxmanager_ops(
self, rollback, expire_on_commit, check_operation
):
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
session = fixture_session(expire_on_commit=expire_on_commit)
with session.begin():
u1 = User(id=7, name="u1")
session.add(u1)
with session.begin():
u1.name # unexpire
u2 = User(id=8, name="u1")
session.add(u2)
session.flush()
if rollback:
session.rollback()
else:
session.commit()
with expect_raises_message(
sa_exc.InvalidRequestError,
"Can't operate on closed transaction "
"inside context manager. Please complete the context "
"manager before emitting further commands.",
):
if check_operation == "add":
u3 = User(id=9, name="u2")
session.add(u3)
elif check_operation == "begin":
session.begin()
elif check_operation == "modify":
u1.name = "newname"
elif check_operation == "delete":
session.delete(u1)
|
ContextManagerPlusFutureTest
|
python
|
ray-project__ray
|
python/ray/train/examples/horovod/horovod_tune_example.py
|
{
"start": 481,
"end": 3946
}
|
class ____(torch.nn.Module):
def __init__(self, mode="sq"):
super(Net, self).__init__()
if mode == "square":
self.mode = 0
self.param = torch.nn.Parameter(torch.FloatTensor([1.0, -1.0]))
else:
self.mode = 1
self.param = torch.nn.Parameter(torch.FloatTensor([1.0, -1.0, 1.0]))
def forward(self, x):
if ~self.mode:
return x * x + self.param[0] * x + self.param[1]
else:
return_val = 10 * x * x * x
return_val += self.param[0] * x * x
return_val += self.param[1] * x + self.param[2]
return return_val
def train_loop_per_worker(config):
import horovod.torch as hvd
import torch
hvd.init()
device = ray.train.torch.get_device()
mode = config["mode"]
net = Net(mode).to(device)
optimizer = torch.optim.SGD(
net.parameters(),
lr=config["lr"],
)
optimizer = hvd.DistributedOptimizer(optimizer)
num_steps = 5
print(hvd.size())
np.random.seed(1 + hvd.rank())
torch.manual_seed(1234)
# To ensure consistent initialization across workers,
hvd.broadcast_parameters(net.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
start = time.time()
x_max = config["x_max"]
for step in range(1, num_steps + 1):
features = torch.Tensor(np.random.rand(1) * 2 * x_max - x_max).to(device)
if mode == "square":
labels = sq(features)
else:
labels = qu(features)
optimizer.zero_grad()
outputs = net(features)
loss = torch.nn.MSELoss()(outputs, labels)
loss.backward()
optimizer.step()
time.sleep(0.1)
train.report(dict(loss=loss.item()))
total = time.time() - start
print(f"Took {total:0.3f} s. Avg: {total / num_steps:0.3f} s.")
def tune_horovod(num_workers, num_samples, use_gpu, mode="square", x_max=1.0):
horovod_trainer = HorovodTrainer(
train_loop_per_worker=train_loop_per_worker,
scaling_config=ScalingConfig(num_workers=num_workers, use_gpu=use_gpu),
train_loop_config={"mode": mode, "x_max": x_max},
)
tuner = Tuner(
horovod_trainer,
param_space={"train_loop_config": {"lr": tune.uniform(0.1, 1)}},
tune_config=TuneConfig(mode="min", metric="loss", num_samples=num_samples),
_tuner_kwargs={"fail_fast": True},
)
result_grid = tuner.fit()
print("Best hyperparameters found were: ", result_grid.get_best_result().config)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--mode", type=str, default="square", choices=["square", "cubic"]
)
parser.add_argument(
"--learning_rate", type=float, default=0.1, dest="learning_rate"
)
parser.add_argument("--x_max", type=float, default=1.0, dest="x_max")
parser.add_argument("--gpu", action="store_true")
parser.add_argument(
"--smoke-test", action="store_true", help=("Finish quickly for testing.")
)
parser.add_argument("--num-workers", type=int, default=2)
args, _ = parser.parse_known_args()
if args.smoke_test:
ray.init(num_cpus=3)
tune_horovod(
num_workers=args.num_workers,
num_samples=2 if args.smoke_test else 10,
use_gpu=args.gpu,
mode=args.mode,
x_max=args.x_max,
)
|
Net
|
python
|
simonw__datasette
|
datasette/events.py
|
{
"start": 3031,
"end": 3678
}
|
class ____(Event):
"""
Event name: ``insert-rows``
Rows were inserted into a table.
:ivar database: The name of the database where the rows were inserted.
:type database: str
:ivar table: The name of the table where the rows were inserted.
:type table: str
:ivar num_rows: The number of rows that were requested to be inserted.
:type num_rows: int
:ivar ignore: Was ignore set?
:type ignore: bool
:ivar replace: Was replace set?
:type replace: bool
"""
name = "insert-rows"
database: str
table: str
num_rows: int
ignore: bool
replace: bool
@dataclass
|
InsertRowsEvent
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-braintree/source_braintree/source.py
|
{
"start": 1195,
"end": 2262
}
|
class ____(RecordExtractor):
"""
Extractor Template for all BrainTree streams.
"""
@staticmethod
def _extract_as_array(results, attribute):
if attribute not in results:
return []
value = results[attribute]
if not isinstance(value, list):
value = [value]
return value
def _get_json_from_resource(self, resource_obj: Union[AttributeGetter, List[AttributeGetter]]):
if isinstance(resource_obj, list):
return [obj if not isinstance(obj, AttributeGetter) else self._get_json_from_resource(obj) for obj in resource_obj]
obj_dict = resource_obj.__dict__
result = dict()
for attr in obj_dict:
if not attr.startswith("_"):
if callable(obj_dict[attr]):
continue
result[attr] = (
self._get_json_from_resource(obj_dict[attr]) if isinstance(obj_dict[attr], (AttributeGetter, list)) else obj_dict[attr]
)
return result
@dataclass
|
BraintreeExtractor
|
python
|
getsentry__sentry
|
src/sentry/integrations/web/organization_integration_setup.py
|
{
"start": 541,
"end": 2075
}
|
class ____(ControlSiloOrganizationView):
required_scope = "org:integrations"
csrf_protect = False
def handle(self, request: HttpRequest, organization, provider_id) -> HttpResponseBase:
scope = sentry_sdk.get_current_scope()
scope.set_transaction_name(f"integration.{provider_id}", source=TransactionSource.VIEW)
pipeline = IntegrationPipeline(
request=request, organization=organization, provider_key=provider_id
)
is_feature_enabled = {}
assert isinstance(
pipeline.provider, IntegrationProvider
), "Pipeline must be an integration provider to get features"
for feature in pipeline.provider.features:
feature_flag_name = "organizations:integrations-%s" % feature.value
try:
features.get(feature_flag_name, None)
is_feature_enabled[feature_flag_name] = features.has(
feature_flag_name, organization
)
except FeatureNotRegistered:
is_feature_enabled[feature_flag_name] = True
if not any(is_feature_enabled.values()):
return pipeline.render_warning(
"At least one feature from this list has to be enabled in order to setup the integration:\n%s"
% "\n".join(is_feature_enabled)
)
if not pipeline.provider.can_add:
raise Http404
pipeline.initialize()
return pipeline.current_step()
|
OrganizationIntegrationSetupView
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/errors.py
|
{
"start": 7287,
"end": 8128
}
|
class ____(graphene.ObjectType):
class Meta:
interfaces = (GrapheneError,)
name = "GraphNotFoundError"
graph_name = graphene.NonNull(graphene.String)
repository_name = graphene.NonNull(graphene.String)
repository_location_name = graphene.NonNull(graphene.String)
def __init__(self, selector):
from dagster_graphql.implementation.utils import GraphSelector
super().__init__()
check.inst_param(selector, "selector", GraphSelector)
self.graph_name = selector.graph_name
self.repository_name = selector.repository_name
self.repository_location_name = selector.location_name
self.message = (
"Could not find Graph "
f"{selector.location_name}.{selector.repository_name}.{selector.graph_name}"
)
|
GrapheneGraphNotFoundError
|
python
|
getsentry__sentry
|
src/sentry/rules/processing/buffer_processing.py
|
{
"start": 1737,
"end": 8332
}
|
class ____(ABC):
buffer_key: ClassVar[str]
buffer_shards: ClassVar[int] = 1 # 1 shard will use the original buffer key
buffer_separator: ClassVar[str] = ":"
option: ClassVar[str | None]
def __init__(self, project_id: int):
self.project_id = project_id
@property
@abstractmethod
def hash_args(self) -> BufferHashKeys:
raise NotImplementedError
@property
@abstractmethod
def processing_task(self) -> Task:
raise NotImplementedError
@classmethod
def get_buffer_keys(cls) -> list[str]:
return [
f"{cls.buffer_key}{cls.buffer_separator}{shard}" if shard > 0 else cls.buffer_key
for shard in range(cls.buffer_shards)
]
@staticmethod
def buffer_backend() -> BufferProtocol:
raise NotImplementedError
delayed_processing_registry = Registry[type[DelayedProcessingBase]]()
def fetch_group_to_event_data(
buffer: BufferProtocol,
project_id: int,
model: type[models.Model],
batch_key: str | None = None,
) -> dict[str, str]:
field: dict[str, models.Model | int | str] = {
"project_id": project_id,
}
if batch_key:
field["batch_key"] = batch_key
return buffer.get_hash(model=model, field=field)
def bucket_num_groups(num_groups: int) -> str:
if num_groups > 1:
magnitude = 10 ** int(math.log10(num_groups))
return f">{magnitude}"
return "1"
def process_in_batches(buffer: BufferProtocol, project_id: int, processing_type: str) -> None:
"""
This will check the number of alertgroup_to_event_data items in the Redis buffer for a project.
If the number is larger than the batch size, it will chunk the items and process them in batches.
The batches are replicated into a new redis hash with a unique filter (a uuid) to identify the batch.
We need to use a UUID because these batches can be created in multiple processes and we need to ensure
uniqueness across all of them for the centralized redis buffer. The batches are stored in redis because
we shouldn't pass objects that need to be pickled and 10k items could be problematic in tasks
as arguments could be problematic. Finally, we can't use a pagination system on the data because
redis doesn't maintain the sort order of the hash keys.
`processing_task` will fetch the batch from redis and process the rules.
"""
batch_size = options.get("delayed_processing.batch_size")
should_emit_logs = options.get("delayed_processing.emit_logs")
log_format = "{}.{}"
try:
processing_info = delayed_processing_registry.get(processing_type)(project_id)
except NoRegistrationExistsError:
logger.exception(log_format.format(processing_type, "no_registration"))
return
hash_args = processing_info.hash_args
task = processing_info.processing_task
filters: dict[str, BufferField] = asdict(hash_args.filters)
event_count = buffer.get_hash_length(model=hash_args.model, field=filters)
metrics.incr(
f"{processing_type}.num_groups", tags={"num_groups": bucket_num_groups(event_count)}
)
metrics.distribution(f"{processing_type}.event_count", event_count)
if event_count < batch_size:
return task.apply_async(
kwargs={"project_id": project_id}, headers={"sentry-propagate-traces": False}
)
if should_emit_logs:
logger.info(
log_format.format(processing_type, "process_large_batch"),
extra={"project_id": project_id, "count": event_count},
)
# if the dictionary is large, get the items and chunk them.
alertgroup_to_event_data = fetch_group_to_event_data(buffer, project_id, hash_args.model)
with metrics.timer(f"{processing_type}.process_batch.duration"):
items = iter(alertgroup_to_event_data.items())
while batch := dict(islice(items, batch_size)):
batch_key = str(uuid.uuid4())
buffer.push_to_hash_bulk(
model=hash_args.model,
filters={**filters, "batch_key": batch_key},
data=batch,
)
# remove the batched items from the project alertgroup_to_event_data
buffer.delete_hash(**asdict(hash_args), fields=list(batch.keys()))
task.apply_async(
kwargs={"project_id": project_id, "batch_key": batch_key},
headers={"sentry-propagate-traces": False},
)
def process_buffer_for_type(processing_type: str, handler: type[DelayedProcessingBase]) -> None:
"""
Process buffers for a specific processing type and handler.
"""
should_emit_logs = options.get("delayed_processing.emit_logs")
if handler.option and not options.get(handler.option):
log_name = f"{processing_type}.disabled"
logger.info(log_name, extra={"option": handler.option})
return
buffer = handler.buffer_backend()
with metrics.timer(f"{processing_type}.process_all_conditions.duration"):
# We need to use a very fresh timestamp here; project scores (timestamps) are
# updated with each relevant event, and some can be updated every few milliseconds.
# The staler this timestamp, the more likely it'll miss some recently updated projects,
# and the more likely we'll have frequently updated projects that are never actually
# retrieved and processed here.
fetch_time = datetime.now(tz=timezone.utc).timestamp()
buffer_keys = handler.get_buffer_keys()
all_project_ids_and_timestamps = buffer.bulk_get_sorted_set(
buffer_keys,
min=0,
max=fetch_time,
)
if should_emit_logs:
log_str = ", ".join(
f"{project_id}: {timestamps}"
for project_id, timestamps in all_project_ids_and_timestamps.items()
)
log_name = f"{processing_type}.project_id_list"
logger.info(log_name, extra={"project_ids": log_str})
project_ids = list(all_project_ids_and_timestamps.keys())
for project_id in project_ids:
process_in_batches(buffer, project_id, processing_type)
buffer.delete_keys(
buffer_keys,
min=0,
max=fetch_time,
)
def process_buffer() -> None:
"""
Process all registered delayed processing types.
"""
for processing_type, handler in delayed_processing_registry.registrations.items():
process_buffer_for_type(processing_type, handler)
|
DelayedProcessingBase
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_eager_relations.py
|
{
"start": 218820,
"end": 223558
}
|
class ____(_fixtures.FixtureTest):
@classmethod
def setup_mappers(cls):
(
users,
Keyword,
items,
order_items,
Order,
Item,
User,
keywords,
item_keywords,
orders,
) = (
cls.tables.users,
cls.classes.Keyword,
cls.tables.items,
cls.tables.order_items,
cls.classes.Order,
cls.classes.Item,
cls.classes.User,
cls.tables.keywords,
cls.tables.item_keywords,
cls.tables.orders,
)
cls.mapper_registry.map_imperatively(Keyword, keywords)
cls.mapper_registry.map_imperatively(
Item,
items,
properties=dict(
keywords=relationship(
Keyword, item_keywords, order_by=item_keywords.c.item_id
)
),
)
cls.mapper_registry.map_imperatively(
Order,
orders,
properties=dict(
items=relationship(Item, order_items, order_by=items.c.id)
),
)
cls.mapper_registry.map_imperatively(
User,
users,
properties=dict(orders=relationship(Order, order_by=orders.c.id)),
)
def test_deep_options_1(self):
User = self.classes.User
sess = fixture_session()
# joinedload nothing.
u = sess.query(User).order_by(User.id).all()
def go():
u[0].orders[1].items[0].keywords[1]
self.assert_sql_count(testing.db, go, 3)
def test_deep_options_2(self):
"""test (joined|subquery)load_all() options"""
User, Order, Item = self.classes("User", "Order", "Item")
sess = fixture_session()
result = (
sess.query(User)
.order_by(User.id)
.options(
sa.orm.joinedload(User.orders)
.joinedload(Order.items)
.joinedload(Item.keywords)
)
).all()
def go():
result[0].orders[1].items[0].keywords[1]
self.sql_count_(0, go)
sess = fixture_session()
result = (
sess.query(User).options(
sa.orm.subqueryload(User.orders)
.subqueryload(Order.items)
.subqueryload(Item.keywords)
)
).all()
def go():
result[0].orders[1].items[0].keywords[1]
self.sql_count_(0, go)
def test_deep_options_4(self):
User, Order = (
self.classes.User,
self.classes.Order,
)
sess = fixture_session()
assert_raises_message(
sa.exc.ArgumentError,
r"Mapped class Mapper\[Order\(orders\)\] does not apply to any of "
"the "
r"root entities in this query, e.g. Mapper\[User\(users\)\]. "
"Please specify the full path from one of the root entities "
"to the target attribute.",
sess.query(User)
.options(sa.orm.joinedload(Order.items))
._compile_context,
)
def test_deep_options_5(self):
Item, User, Order = (
self.classes.Item,
self.classes.User,
self.classes.Order,
)
sess = fixture_session()
# joinedload "keywords" on items. it will lazy load "orders", then
# lazy load the "items" on the order, but on "items" it will eager
# load the "keywords"
q3 = (
sess.query(User)
.order_by(User.id)
.options(
sa.orm.defaultload(User.orders)
.defaultload(Order.items)
.joinedload(Item.keywords)
)
)
u = q3.all()
def go():
u[0].orders[1].items[0].keywords[1]
self.sql_count_(2, go)
def test_deep_options_6(self):
Item, User, Order = (
self.classes.Item,
self.classes.User,
self.classes.Order,
)
sess = fixture_session()
q3 = (
sess.query(User)
.order_by(User.id)
.options(
# this syntax means:
# defautload(User.orders).defaultload(Order.items).
# joinedload(Item.keywords)
#
# intuitive right ? :)
sa.orm.joinedload(User.orders, Order.items, Item.keywords)
)
)
u = q3.all()
def go():
u[0].orders[1].items[0].keywords[1]
self.sql_count_(2, go)
|
DeepOptionsTest
|
python
|
modin-project__modin
|
modin/tests/pandas/native_df_interoperability/test_compiler_caster.py
|
{
"start": 4354,
"end": 5029
}
|
class ____(CalculatorTestQc):
"Represents a local network cluster query compiler"
def get_backend(self):
return "Cluster"
@classmethod
def max_cost(cls):
return QCCoercionCost.COST_HIGH
def move_to_cost(self, other_qc_cls, api_cls_name, op, arguments):
return {
CloudQC: QCCoercionCost.COST_MEDIUM,
CloudQCHighSelf: QCCoercionCost.COST_MEDIUM,
ClusterQC: QCCoercionCost.COST_ZERO,
DefaultQC: None, # cluster qc knows nothing about default qc
LocalMachineQC: QCCoercionCost.COST_MEDIUM,
PicoQC: QCCoercionCost.COST_HIGH,
}.get(other_qc_cls)
|
ClusterQC
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/orm/instrumentation.py
|
{
"start": 2193,
"end": 2394
}
|
class ____(Protocol):
def __call__(
self,
state: state.InstanceState[Any],
toload: Set[str],
passive: base.PassiveFlag,
) -> None: ...
|
_ExpiredAttributeLoaderProto
|
python
|
django__django
|
tests/queries/tests.py
|
{
"start": 100846,
"end": 103125
}
|
class ____(TestCase):
def test_evaluated_queryset_as_argument(self):
"""
If a queryset is already evaluated, it can still be used as a query
arg.
"""
n = Note(note="Test1", misc="misc")
n.save()
e = ExtraInfo(info="good", note=n)
e.save()
n_list = Note.objects.all()
# Evaluate the Note queryset, populating the query cache
list(n_list)
# Make one of cached results unpickable.
n_list._result_cache[0].error = UnpickleableError()
with self.assertRaises(UnpickleableError):
pickle.dumps(n_list)
# Use the note queryset in a query, and evaluate
# that query in a way that involves cloning.
self.assertEqual(ExtraInfo.objects.filter(note__in=n_list)[0].info, "good")
def test_no_model_options_cloning(self):
"""
Cloning a queryset does not get out of hand. While complete
testing is impossible, this is a sanity check against invalid use of
deepcopy. refs #16759.
"""
opts_class = type(Note._meta)
note_deepcopy = getattr(opts_class, "__deepcopy__", None)
opts_class.__deepcopy__ = lambda obj, memo: self.fail(
"Model options shouldn't be cloned."
)
try:
Note.objects.filter(pk__lte=F("pk") + 1).all()
finally:
if note_deepcopy is None:
delattr(opts_class, "__deepcopy__")
else:
opts_class.__deepcopy__ = note_deepcopy
def test_no_fields_cloning(self):
"""
Cloning a queryset does not get out of hand. While complete
testing is impossible, this is a sanity check against invalid use of
deepcopy. refs #16759.
"""
opts_class = type(Note._meta.get_field("misc"))
note_deepcopy = getattr(opts_class, "__deepcopy__", None)
opts_class.__deepcopy__ = lambda obj, memo: self.fail(
"Model fields shouldn't be cloned"
)
try:
Note.objects.filter(note=F("misc")).all()
finally:
if note_deepcopy is None:
delattr(opts_class, "__deepcopy__")
else:
opts_class.__deepcopy__ = note_deepcopy
|
CloneTests
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_ingress_tls.py
|
{
"start": 383,
"end": 5274
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'hosts': 'list[str]',
'secret_name': 'str'
}
attribute_map = {
'hosts': 'hosts',
'secret_name': 'secretName'
}
def __init__(self, hosts=None, secret_name=None, local_vars_configuration=None): # noqa: E501
"""V1IngressTLS - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._hosts = None
self._secret_name = None
self.discriminator = None
if hosts is not None:
self.hosts = hosts
if secret_name is not None:
self.secret_name = secret_name
@property
def hosts(self):
"""Gets the hosts of this V1IngressTLS. # noqa: E501
hosts is a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified. # noqa: E501
:return: The hosts of this V1IngressTLS. # noqa: E501
:rtype: list[str]
"""
return self._hosts
@hosts.setter
def hosts(self, hosts):
"""Sets the hosts of this V1IngressTLS.
hosts is a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified. # noqa: E501
:param hosts: The hosts of this V1IngressTLS. # noqa: E501
:type: list[str]
"""
self._hosts = hosts
@property
def secret_name(self):
"""Gets the secret_name of this V1IngressTLS. # noqa: E501
secretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the \"Host\" header is used for routing. # noqa: E501
:return: The secret_name of this V1IngressTLS. # noqa: E501
:rtype: str
"""
return self._secret_name
@secret_name.setter
def secret_name(self, secret_name):
"""Sets the secret_name of this V1IngressTLS.
secretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the \"Host\" header is used for routing. # noqa: E501
:param secret_name: The secret_name of this V1IngressTLS. # noqa: E501
:type: str
"""
self._secret_name = secret_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1IngressTLS):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1IngressTLS):
return True
return self.to_dict() != other.to_dict()
|
V1IngressTLS
|
python
|
wandb__wandb
|
wandb/apis/public/artifacts.py
|
{
"start": 11183,
"end": 23714
}
|
class ____:
"""An artifact collection that represents a group of related artifacts.
Args:
client: The client instance to use for querying W&B.
entity: The entity (user or team) that owns the project.
project: The name of the project to query for artifact collections.
name: The name of the artifact collection.
type: The type of the artifact collection (e.g., "dataset", "model").
organization: Optional organization name if applicable.
attrs: Optional mapping of attributes to initialize the artifact collection.
If not provided, the object will load its attributes from W&B upon
initialization.
<!-- lazydoc-ignore-init: internal -->
"""
_saved: ArtifactCollectionData
"""The saved artifact collection data as last fetched from the W&B server."""
_current: ArtifactCollectionData
"""The local, editable artifact collection data."""
def __init__(
self,
client: Client,
entity: str,
project: str,
name: str,
type: str,
organization: str | None = None,
attrs: ArtifactCollectionFragment | None = None,
):
self.client = client
# FIXME: Make this lazy, so we don't (re-)fetch the attributes until they are needed
self._update_data(attrs or self.load(entity, project, type, name))
self.organization = organization
def _update_data(self, fragment: ArtifactCollectionFragment) -> None:
"""Update the saved/current state of this collection with the given fragment.
Can be used after receiving a GraphQL response with ArtifactCollection data.
"""
# Separate "saved" vs "current" copies of the artifact collection data
validated = ArtifactCollectionData.from_fragment(fragment)
self._saved = validated
self._current = validated.model_copy(deep=True)
@property
def id(self) -> str:
"""The unique identifier of the artifact collection."""
return self._current.id
@property
def entity(self) -> str:
"""The entity (user or team) that owns the project."""
return self._current.entity
@property
def project(self) -> str:
"""The project that contains the artifact collection."""
return self._current.project
@normalize_exceptions
def artifacts(self, per_page: int = 50) -> Artifacts:
"""Get all artifacts in the collection."""
return Artifacts(
client=self.client,
entity=self.entity,
project=self.project,
# Use the saved name and type, as they're mutable attributes
# and may have been edited locally.
collection_name=self._saved.name,
type=self._saved.type,
per_page=per_page,
)
@property
def aliases(self) -> list[str]:
"""The aliases for all artifact versions contained in this collection."""
if self._saved.aliases is None:
aliases = list(
_ArtifactCollectionAliases(self.client, collection_id=self.id)
)
self._saved.aliases = aliases
self._current.aliases = aliases.copy()
return list(self._saved.aliases)
@property
def created_at(self) -> str:
"""The creation date of the artifact collection."""
return self._saved.created_at
def load(
self, entity: str, project: str, artifact_type: str, name: str
) -> ArtifactCollectionFragment:
"""Fetch and return the validated artifact collection data from W&B.
<!-- lazydoc-ignore: internal -->
"""
from wandb.sdk.artifacts._generated import (
PROJECT_ARTIFACT_COLLECTION_GQL,
ProjectArtifactCollection,
)
gql_op = gql_compat(PROJECT_ARTIFACT_COLLECTION_GQL)
gql_vars = {
"entity": entity,
"project": project,
"artifactType": artifact_type,
"name": name,
}
data = self.client.execute(gql_op, variable_values=gql_vars)
result = ProjectArtifactCollection.model_validate(data)
if not (
result.project
and (proj := result.project)
and (type_ := proj.artifact_type)
and (collection := type_.artifact_collection)
):
raise ValueError(f"Could not find artifact type {artifact_type!s}")
return collection
@normalize_exceptions
def change_type(self, new_type: str) -> None:
"""Deprecated, change type directly with `save` instead."""
from wandb.sdk.artifacts._generated import (
UPDATE_ARTIFACT_SEQUENCE_TYPE_GQL,
MoveArtifactSequenceInput,
)
from wandb.sdk.artifacts._validators import validate_artifact_type
warn_and_record_deprecation(
feature=Deprecated(artifact_collection__change_type=True),
message="ArtifactCollection.change_type(type) is deprecated, use ArtifactCollection.save() instead.",
)
if (old_type := self._saved.type) != new_type:
try:
validate_artifact_type(old_type, self.name)
except ValueError as e:
raise ValueError(
f"The current type {old_type!r} is an internal type and cannot be changed."
) from e
# Check that the new type is not going to conflict with internal types
new_type = validate_artifact_type(new_type, self.name)
if not self.is_sequence():
raise ValueError("Artifact collection needs to be a sequence")
termlog(f"Changing artifact collection type of {old_type!r} to {new_type!r}")
gql_op = gql(UPDATE_ARTIFACT_SEQUENCE_TYPE_GQL)
gql_input = MoveArtifactSequenceInput(
artifact_sequence_id=self.id,
destination_artifact_type_name=new_type,
)
self.client.execute(gql_op, variable_values={"input": gql_input.model_dump()})
self._saved.type = new_type
self._current.type = new_type
def is_sequence(self) -> bool:
"""Return whether the artifact collection is a sequence."""
return self._saved.is_sequence
@normalize_exceptions
def delete(self) -> None:
"""Delete the entire artifact collection."""
from wandb.sdk.artifacts._generated import (
DELETE_ARTIFACT_PORTFOLIO_GQL,
DELETE_ARTIFACT_SEQUENCE_GQL,
)
gql_op = gql(
DELETE_ARTIFACT_SEQUENCE_GQL
if self.is_sequence()
else DELETE_ARTIFACT_PORTFOLIO_GQL
)
self.client.execute(gql_op, variable_values={"id": self.id})
@property
def description(self) -> str | None:
"""A description of the artifact collection."""
return self._current.description
@description.setter
def description(self, description: str | None) -> None:
"""Set the description of the artifact collection."""
self._current.description = description
@property
def tags(self) -> list[str]:
"""The tags associated with the artifact collection."""
return self._current.tags
@tags.setter
def tags(self, tags: Collection[str]) -> None:
"""Set the tags associated with the artifact collection."""
self._current.tags = tags
@property
def name(self) -> str:
"""The name of the artifact collection."""
return self._current.name
@name.setter
def name(self, name: str) -> None:
"""Set the name of the artifact collection."""
self._current.name = name
@property
def type(self):
"""Returns the type of the artifact collection."""
return self._current.type
@type.setter
def type(self, type: str) -> None:
"""Set the type of the artifact collection."""
if not self.is_sequence():
raise ValueError(
"Type can only be changed if the artifact collection is a sequence."
)
self._current.type = type
def _update_collection(self) -> None:
from wandb.sdk.artifacts._generated import (
UPDATE_ARTIFACT_PORTFOLIO_GQL,
UPDATE_ARTIFACT_SEQUENCE_GQL,
UpdateArtifactPortfolioInput,
UpdateArtifactSequenceInput,
)
if self.is_sequence():
gql_op = gql(UPDATE_ARTIFACT_SEQUENCE_GQL)
gql_input = UpdateArtifactSequenceInput(
artifact_sequence_id=self.id,
name=self.name,
description=self.description,
)
else:
gql_op = gql(UPDATE_ARTIFACT_PORTFOLIO_GQL)
gql_input = UpdateArtifactPortfolioInput(
artifact_portfolio_id=self.id,
name=self.name,
description=self.description,
)
self.client.execute(gql_op, variable_values={"input": gql_input.model_dump()})
self._saved.name = self._current.name
self._saved.description = self._current.description
def _update_sequence_type(self) -> None:
from wandb.sdk.artifacts._generated import (
UPDATE_ARTIFACT_SEQUENCE_TYPE_GQL,
MoveArtifactSequenceInput,
)
gql_op = gql(UPDATE_ARTIFACT_SEQUENCE_TYPE_GQL)
gql_input = MoveArtifactSequenceInput(
artifact_sequence_id=self.id,
destination_artifact_type_name=self.type,
)
self.client.execute(gql_op, variable_values={"input": gql_input.model_dump()})
self._saved.type = self._current.type
def _add_tags(self, tag_names: Iterable[str]) -> None:
from wandb.sdk.artifacts._generated import (
ADD_ARTIFACT_COLLECTION_TAGS_GQL,
CreateArtifactCollectionTagAssignmentsInput,
)
gql_op = gql(ADD_ARTIFACT_COLLECTION_TAGS_GQL)
gql_input = CreateArtifactCollectionTagAssignmentsInput(
entity_name=self.entity,
project_name=self.project,
artifact_collection_name=self._saved.name,
tags=[{"tagName": tag} for tag in tag_names],
)
self.client.execute(gql_op, variable_values={"input": gql_input.model_dump()})
def _delete_tags(self, tag_names: Iterable[str]) -> None:
from wandb.sdk.artifacts._generated import (
DELETE_ARTIFACT_COLLECTION_TAGS_GQL,
DeleteArtifactCollectionTagAssignmentsInput,
)
gql_op = gql(DELETE_ARTIFACT_COLLECTION_TAGS_GQL)
gql_input = DeleteArtifactCollectionTagAssignmentsInput(
entity_name=self.entity,
project_name=self.project,
artifact_collection_name=self._saved.name,
tags=[{"tagName": tag} for tag in tag_names],
)
self.client.execute(gql_op, variable_values={"input": gql_input.model_dump()})
@normalize_exceptions
def save(self) -> None:
"""Persist any changes made to the artifact collection."""
from wandb.sdk.artifacts._validators import validate_artifact_type
if (old_type := self._saved.type) != (new_type := self.type):
try:
validate_artifact_type(new_type, self.name)
except ValueError as e:
reason = str(e)
raise ValueError(
f"Failed to save artifact collection {self.name!r}: {reason}"
) from e
try:
validate_artifact_type(old_type, self.name)
except ValueError as e:
reason = f"The current type {old_type!r} is an internal type and cannot be changed."
raise ValueError(
f"Failed to save artifact collection {self.name!r}: {reason}"
) from e
# FIXME: Consider consolidating the multiple GQL mutations into a single call.
self._update_collection()
if self.is_sequence() and (old_type != new_type):
self._update_sequence_type()
if (new_tags := set(self._current.tags)) != (old_tags := set(self._saved.tags)):
if added_tags := (new_tags - old_tags):
self._add_tags(added_tags)
if deleted_tags := (old_tags - new_tags):
self._delete_tags(deleted_tags)
self._saved.tags = copy(new_tags)
def __repr__(self) -> str:
return f"<ArtifactCollection {self.name} ({self.type})>"
|
ArtifactCollection
|
python
|
rapidsai__cudf
|
docs/cudf/source/conf.py
|
{
"start": 22112,
"end": 23964
}
|
class ____(ClassDocumenter):
objtype = "enum"
directivetype = "attribute"
priority = 10 + ClassDocumenter.priority
option_spec = dict(ClassDocumenter.option_spec)
@classmethod
def can_document_member(
cls, member: Any, membername: str, isattr: bool, parent: Any
) -> bool:
try:
return issubclass(
member, (IntEnum, IntFlag)
) and member.__module__.startswith("pylibcudf")
except TypeError:
return False
def add_directive_header(self, sig: str) -> None:
self.directivetype = "attribute"
super().add_directive_header(sig)
def add_content(self, more_content) -> None:
doc_as_attr = self.doc_as_attr
self.doc_as_attr = False
super().add_content(more_content)
self.doc_as_attr = doc_as_attr
source_name = self.get_sourcename()
enum_object: IntEnum = self.object
if self.object.__name__ != "Kind":
self.add_line(
f"See also :cpp:enum:`{self.object.__name__}`.",
source_name,
)
self.add_line("", source_name)
self.add_line("Enum members", source_name)
self.add_line("", source_name)
for the_member_name in enum_object.__members__: # type: ignore[attr-defined]
self.add_line(f"* ``{the_member_name}``", source_name)
self.add_line("", source_name)
def setup(app):
app.add_css_file("https://docs.rapids.ai/assets/css/custom.css")
app.add_js_file(
"https://docs.rapids.ai/assets/js/custom.js", loading_method="defer"
)
app.connect("doctree-read", resolve_aliases)
app.connect("missing-reference", on_missing_reference)
app.setup_extension("sphinx.ext.autodoc")
app.add_autodocumenter(PLCIntEnumDocumenter)
|
PLCIntEnumDocumenter
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/util/langhelpers.py
|
{
"start": 54143,
"end": 59634
}
|
class ____(str):
"""A string subclass that can only be hashed on a maximum amount
of unique values.
This is used for warnings so that we can send out parameterized warnings
without the __warningregistry__ of the module, or the non-overridable
"once" registry within warnings.py, overloading memory,
"""
_hash: int
def __new__(
cls, value: str, num: int, args: Sequence[Any]
) -> _hash_limit_string:
interpolated = (value % args) + (
" (this warning may be suppressed after %d occurrences)" % num
)
self = super().__new__(cls, interpolated)
self._hash = hash("%s_%d" % (value, hash(interpolated) % num))
return self
def __hash__(self) -> int:
return self._hash
def __eq__(self, other: Any) -> bool:
return hash(self) == hash(other)
def warn(msg: str, code: Optional[str] = None) -> None:
"""Issue a warning.
If msg is a string, :class:`.exc.SAWarning` is used as
the category.
"""
if code:
_warnings_warn(exc.SAWarning(msg, code=code))
else:
_warnings_warn(msg, exc.SAWarning)
def warn_limited(msg: str, args: Sequence[Any]) -> None:
"""Issue a warning with a parameterized string, limiting the number
of registrations.
"""
if args:
msg = _hash_limit_string(msg, 10, args)
_warnings_warn(msg, exc.SAWarning)
_warning_tags: Dict[CodeType, Tuple[str, Type[Warning]]] = {}
def tag_method_for_warnings(
message: str, category: Type[Warning]
) -> Callable[[_F], _F]:
def go(fn):
_warning_tags[fn.__code__] = (message, category)
return fn
return go
_not_sa_pattern = re.compile(r"^(?:sqlalchemy\.(?!testing)|alembic\.)")
def _warnings_warn(
message: Union[str, Warning],
category: Optional[Type[Warning]] = None,
stacklevel: int = 2,
) -> None:
if category is None and isinstance(message, Warning):
category = type(message)
# adjust the given stacklevel to be outside of SQLAlchemy
try:
frame = sys._getframe(stacklevel)
except ValueError:
# being called from less than 3 (or given) stacklevels, weird,
# but don't crash
stacklevel = 0
except:
# _getframe() doesn't work, weird interpreter issue, weird,
# ok, but don't crash
stacklevel = 0
else:
stacklevel_found = warning_tag_found = False
while frame is not None:
# using __name__ here requires that we have __name__ in the
# __globals__ of the decorated string functions we make also.
# we generate this using {"__name__": fn.__module__}
if not stacklevel_found and not re.match(
_not_sa_pattern, frame.f_globals.get("__name__", "")
):
# stop incrementing stack level if an out-of-SQLA line
# were found.
stacklevel_found = True
# however, for the warning tag thing, we have to keep
# scanning up the whole traceback
if frame.f_code in _warning_tags:
warning_tag_found = True
(_suffix, _category) = _warning_tags[frame.f_code]
category = category or _category
message = f"{message} ({_suffix})"
frame = frame.f_back # type: ignore[assignment]
if not stacklevel_found:
stacklevel += 1
elif stacklevel_found and warning_tag_found:
break
if category is not None:
warnings.warn(message, category, stacklevel=stacklevel + 1)
else:
warnings.warn(message, stacklevel=stacklevel + 1)
def only_once(
fn: Callable[..., _T], retry_on_exception: bool
) -> Callable[..., Optional[_T]]:
"""Decorate the given function to be a no-op after it is called exactly
once."""
once = [fn]
def go(*arg: Any, **kw: Any) -> Optional[_T]:
# strong reference fn so that it isn't garbage collected,
# which interferes with the event system's expectations
strong_fn = fn # noqa
if once:
once_fn = once.pop()
try:
return once_fn(*arg, **kw)
except:
if retry_on_exception:
once.insert(0, once_fn)
raise
return None
return go
_SQLA_RE = re.compile(r"sqlalchemy/([a-z_]+/){0,2}[a-z_]+\.py")
_UNITTEST_RE = re.compile(r"unit(?:2|test2?/)")
def chop_traceback(
tb: List[str],
exclude_prefix: re.Pattern[str] = _UNITTEST_RE,
exclude_suffix: re.Pattern[str] = _SQLA_RE,
) -> List[str]:
"""Chop extraneous lines off beginning and end of a traceback.
:param tb:
a list of traceback lines as returned by ``traceback.format_stack()``
:param exclude_prefix:
a regular expression object matching lines to skip at beginning of
``tb``
:param exclude_suffix:
a regular expression object matching lines to skip at end of ``tb``
"""
start = 0
end = len(tb) - 1
while start <= end and exclude_prefix.search(tb[start]):
start += 1
while start <= end and exclude_suffix.search(tb[end]):
end -= 1
return tb[start : end + 1]
def attrsetter(attrname):
code = "def set(obj, value): obj.%s = value" % attrname
env = locals().copy()
exec(code, env)
return env["set"]
_dunders = re.compile("^__.+__$")
|
_hash_limit_string
|
python
|
astropy__astropy
|
astropy/io/fits/hdu/compressed/_quantization.py
|
{
"start": 503,
"end": 3988
}
|
class ____:
"""
Quantization of floating-point data following the FITS standard.
"""
def __init__(
self, *, row: int, dither_method: int, quantize_level: int, bitpix: int
):
super().__init__()
self.row = row
# TODO: pass dither method as a string instead of int?
self.quantize_level = quantize_level
self.dither_method = dither_method
self.bitpix = bitpix
# NOTE: below we use decode_quantized and encode_quantized instead of
# decode and encode as we need to break with the numcodec API and take/return
# scale and zero in addition to quantized value. We should figure out how
# to properly use the numcodec API for this use case.
def decode_quantized(self, buf, scale, zero):
"""
Unquantize data.
Parameters
----------
buf : bytes or array_like
The buffer to unquantize.
Returns
-------
np.ndarray
The unquantized buffer.
"""
qbytes = np.asarray(buf)
qbytes = qbytes.astype(qbytes.dtype.newbyteorder("="))
# TODO: figure out if we need to support null checking
if self.dither_method == -1:
# For NO_DITHER we should just use the scale and zero directly
return qbytes * scale + zero
if self.bitpix == -32:
ubytes = unquantize_float_c(
qbytes.tobytes(),
self.row,
qbytes.size,
scale,
zero,
self.dither_method,
0,
0,
0.0,
qbytes.dtype.itemsize,
)
elif self.bitpix == -64:
ubytes = unquantize_double_c(
qbytes.tobytes(),
self.row,
qbytes.size,
scale,
zero,
self.dither_method,
0,
0,
0.0,
qbytes.dtype.itemsize,
)
else:
raise TypeError("bitpix should be one of -32 or -64")
return np.frombuffer(ubytes, dtype=BITPIX2DTYPE[self.bitpix]).data
def encode_quantized(self, buf):
"""
Quantize data.
Parameters
----------
buf : bytes or array_like
The buffer to quantize.
Returns
-------
np.ndarray
A buffer with quantized data.
"""
uarray = np.asarray(buf)
uarray = uarray.astype(uarray.dtype.newbyteorder("="))
# TODO: figure out if we need to support null checking
if uarray.dtype.itemsize == 4:
qbytes, status, scale, zero = quantize_float_c(
uarray.tobytes(),
self.row,
uarray.size,
1,
0,
0,
self.quantize_level,
self.dither_method,
)[:4]
elif uarray.dtype.itemsize == 8:
qbytes, status, scale, zero = quantize_double_c(
uarray.tobytes(),
self.row,
uarray.size,
1,
0,
0,
self.quantize_level,
self.dither_method,
)[:4]
if status == 0:
raise QuantizationFailedException()
else:
return np.frombuffer(qbytes, dtype=np.int32), scale, zero
|
Quantize
|
python
|
pyqtgraph__pyqtgraph
|
pyqtgraph/dockarea/Dock.py
|
{
"start": 142,
"end": 8456
}
|
class ____(QtWidgets.QWidget):
sigStretchChanged = QtCore.Signal()
sigClosed = QtCore.Signal(object)
def __init__(self, name, area=None, size=(10, 10), widget=None, hideTitle=False, autoOrientation=True, label=None, **kargs):
QtWidgets.QWidget.__init__(self)
self.dockdrop = DockDrop(self)
self._container = None
self._name = name
self.area = area
self.label = label
if self.label is None:
self.label = DockLabel(name, **kargs)
self.label.dock = self
if self.label.isClosable():
self.label.sigCloseClicked.connect(self.close)
self.labelHidden = False
self.moveLabel = True ## If false, the dock is no longer allowed to move the label.
self.autoOrient = autoOrientation
self.orientation = 'horizontal'
#self.label.setAlignment(QtCore.Qt.AlignmentFlag.AlignHCenter)
self.topLayout = QtWidgets.QGridLayout()
self.topLayout.setContentsMargins(0, 0, 0, 0)
self.topLayout.setSpacing(0)
self.setLayout(self.topLayout)
self.topLayout.addWidget(self.label, 0, 1)
self.widgetArea = QtWidgets.QWidget()
self.topLayout.addWidget(self.widgetArea, 1, 1)
self.layout = QtWidgets.QGridLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setSpacing(0)
self.widgetArea.setLayout(self.layout)
self.widgetArea.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Expanding)
self.widgets = []
self.currentRow = 0
#self.titlePos = 'top'
self.dockdrop.raiseOverlay()
self.hStyle = """
Dock > QWidget {
border: 1px solid #000;
border-radius: 5px;
border-top-left-radius: 0px;
border-top-right-radius: 0px;
border-top-width: 0px;
}"""
self.vStyle = """
Dock > QWidget {
border: 1px solid #000;
border-radius: 5px;
border-top-left-radius: 0px;
border-bottom-left-radius: 0px;
border-left-width: 0px;
}"""
self.nStyle = """
Dock > QWidget {
border: 1px solid #000;
border-radius: 5px;
}"""
self.dragStyle = """
Dock > QWidget {
border: 4px solid #00F;
border-radius: 5px;
}"""
self.setAutoFillBackground(False)
self.widgetArea.setStyleSheet(self.hStyle)
self.setStretch(*size)
if widget is not None:
self.addWidget(widget)
if hideTitle:
self.hideTitleBar()
def implements(self, name=None):
if name is None:
return ['dock']
else:
return name == 'dock'
def setStretch(self, x=None, y=None):
"""
Set the 'target' size for this Dock.
The actual size will be determined by comparing this Dock's
stretch value to the rest of the docks it shares space with.
"""
if x is None:
x = 0
if y is None:
y = 0
self._stretch = (x, y)
self.sigStretchChanged.emit()
def stretch(self):
return self._stretch
def hideTitleBar(self):
"""
Hide the title bar for this Dock.
This will prevent the Dock being moved by the user.
"""
self.label.hide()
self.labelHidden = True
self.dockdrop.removeAllowedArea('center')
self.updateStyle()
def showTitleBar(self):
"""
Show the title bar for this Dock.
"""
self.label.show()
self.labelHidden = False
self.dockdrop.addAllowedArea('center')
self.updateStyle()
def title(self):
"""
Gets the text displayed in the title bar for this dock.
"""
return self.label.text()
def setTitle(self, text):
"""
Sets the text displayed in title bar for this Dock.
"""
self.label.setText(text)
def setOrientation(self, o='auto', force=False):
"""
Sets the orientation of the title bar for this Dock.
Must be one of 'auto', 'horizontal', or 'vertical'.
By default ('auto'), the orientation is determined
based on the aspect ratio of the Dock.
"""
# setOrientation may be called before the container is set in some cases
# (via resizeEvent), so there's no need to do anything here until called
# again by containerChanged
if self.container() is None:
return
if o == 'auto' and self.autoOrient:
if self.container().type() == 'tab':
o = 'horizontal'
elif self.width() > self.height()*1.5:
o = 'vertical'
else:
o = 'horizontal'
if force or self.orientation != o:
self.orientation = o
self.label.setOrientation(o)
self.updateStyle()
def updateStyle(self):
## updates orientation and appearance of title bar
if self.labelHidden:
self.widgetArea.setStyleSheet(self.nStyle)
elif self.orientation == 'vertical':
self.label.setOrientation('vertical')
if self.moveLabel:
self.topLayout.addWidget(self.label, 1, 0)
self.widgetArea.setStyleSheet(self.vStyle)
else:
self.label.setOrientation('horizontal')
if self.moveLabel:
self.topLayout.addWidget(self.label, 0, 1)
self.widgetArea.setStyleSheet(self.hStyle)
def resizeEvent(self, ev):
self.setOrientation()
self.dockdrop.resizeOverlay(self.size())
def name(self):
return self._name
def addWidget(self, widget, row=None, col=0, rowspan=1, colspan=1):
"""
Add a new widget to the interior of this Dock.
Each Dock uses a QGridLayout to arrange widgets within.
"""
if row is None:
row = self.currentRow
self.currentRow = max(row+1, self.currentRow)
self.widgets.append(widget)
self.layout.addWidget(widget, row, col, rowspan, colspan)
self.dockdrop.raiseOverlay()
def startDrag(self):
self.drag = QtGui.QDrag(self)
mime = QtCore.QMimeData()
self.drag.setMimeData(mime)
self.widgetArea.setStyleSheet(self.dragStyle)
self.update()
action = self.drag.exec() if hasattr(self.drag, 'exec') else self.drag.exec_()
self.updateStyle()
def float(self):
self.area.floatDock(self)
def container(self):
return self._container
def containerChanged(self, c):
if self._container is not None:
# ask old container to close itself if it is no longer needed
self._container.apoptose()
self._container = c
if c is None:
self.area = None
else:
self.area = c.area
if c.type() != 'tab':
self.moveLabel = True
self.label.setDim(False)
else:
self.moveLabel = False
self.setOrientation(force=True)
def raiseDock(self):
"""If this Dock is stacked underneath others, raise it to the top."""
self.container().raiseDock(self)
def close(self):
"""Remove this dock from the DockArea it lives inside."""
if self._container is None:
warnings.warn(f"Cannot close dock {self} because it is not open.", RuntimeWarning, stacklevel=2)
return
self.setParent(None)
QtWidgets.QLabel.close(self.label)
self.label.setParent(None)
self._container.apoptose()
self._container = None
self.sigClosed.emit(self)
def __repr__(self):
return "<Dock %s %s>" % (self.name(), self.stretch())
def dragEnterEvent(self, *args):
self.dockdrop.dragEnterEvent(*args)
def dragMoveEvent(self, *args):
self.dockdrop.dragMoveEvent(*args)
def dragLeaveEvent(self, *args):
self.dockdrop.dragLeaveEvent(*args)
def dropEvent(self, *args):
self.dockdrop.dropEvent(*args)
|
Dock
|
python
|
keras-team__keras
|
keras/src/layers/core/identity.py
|
{
"start": 197,
"end": 848
}
|
class ____(Layer):
"""Identity layer.
This layer should be used as a placeholder when no operation is to be
performed. The layer just returns its `inputs` argument as output.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
self._build_at_init()
def call(self, inputs):
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_spec(self, inputs):
return tree.map_structure(
lambda x: KerasTensor(x.shape, dtype=x.dtype, sparse=x.sparse),
inputs,
)
|
Identity
|
python
|
giampaolo__psutil
|
psutil/_common.py
|
{
"start": 6160,
"end": 6589
}
|
class ____(Error):
"""Exception raised when a process with a certain PID doesn't
or no longer exists.
"""
__module__ = 'psutil'
def __init__(self, pid, name=None, msg=None):
Error.__init__(self)
self.pid = pid
self.name = name
self.msg = msg or "process no longer exists"
def __reduce__(self):
return (self.__class__, (self.pid, self.name, self.msg))
|
NoSuchProcess
|
python
|
openai__openai-python
|
src/openai/types/beta/realtime/session_update_event_param.py
|
{
"start": 2666,
"end": 3248
}
|
class ____(TypedDict, total=False):
group_id: str
"""
The group id to attach to this trace to enable filtering and grouping in the
traces dashboard.
"""
metadata: object
"""
The arbitrary metadata to attach to this trace to enable filtering in the traces
dashboard.
"""
workflow_name: str
"""The name of the workflow to attach to this trace.
This is used to name the trace in the traces dashboard.
"""
SessionTracing: TypeAlias = Union[Literal["auto"], SessionTracingTracingConfiguration]
|
SessionTracingTracingConfiguration
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximum-profitable-triplets-with-increasing-prices-i.py
|
{
"start": 6724,
"end": 9238
}
|
class ____(object):
def maxProfit(self, prices, profits):
"""
:type prices: List[int]
:type profits: List[int]
:rtype: int
"""
NEG_INF = float("-inf")
# Range Maximum Query
class SegmentTree(object):
def __init__(self, N,
build_fn=lambda _: None,
query_fn=lambda x, y: max(x, y),
update_fn=lambda x, y: max(x, y)):
self.tree = [None]*(2*2**((N-1).bit_length()))
self.base = len(self.tree)//2
self.query_fn = query_fn
self.update_fn = update_fn
for i in xrange(self.base, self.base+N):
self.tree[i] = build_fn(i-self.base)
for i in reversed(xrange(1, self.base)):
self.tree[i] = query_fn(self.tree[2*i], self.tree[2*i+1])
def update(self, i, h):
x = self.base+i
self.tree[x] = self.update_fn(self.tree[x], h)
while x > 1:
x //= 2
self.tree[x] = self.query_fn(self.tree[x*2], self.tree[x*2+1])
def query(self, L, R):
if L > R:
return None
L += self.base
R += self.base
left = right = None
while L <= R:
if L & 1:
left = self.query_fn(left, self.tree[L])
L += 1
if R & 1 == 0:
right = self.query_fn(self.tree[R], right)
R -= 1
L //= 2
R //= 2
return self.query_fn(left, right)
price_to_idx = {x:i for i, x in enumerate(sorted(set(prices)))}
right = [NEG_INF]*len(prices)
st = SegmentTree(len(price_to_idx))
for i in reversed(xrange(len(prices))):
right[i] = st.query(price_to_idx[prices[i]]+1, len(price_to_idx)-1)
st.update(price_to_idx[prices[i]], profits[i])
result = NEG_INF
st = SegmentTree(len(price_to_idx))
for i in xrange(len(prices)):
left = st.query(0, price_to_idx[prices[i]]-1)
if left is not None and right[i] is not None:
result = max(result, left+profits[i]+right[i])
st.update(price_to_idx[prices[i]], profits[i])
return result if result != NEG_INF else -1
|
Solution5
|
python
|
apache__thrift
|
test/py/TestSocket.py
|
{
"start": 915,
"end": 2590
}
|
class ____(unittest.TestCase):
def setUp(self):
for i in range(50):
try:
# find a port we can use
self.listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = random.randint(10000, 30000)
self.listen_sock.bind(('localhost', self.port))
self.listen_sock.listen(5)
break
except Exception:
if i == 49:
raise
def testConnectTimeout(self):
starttime = time.time()
try:
leaky = []
for i in range(100):
socket = TSocket.TSocket('localhost', self.port)
socket.setTimeout(10)
socket.open()
leaky.append(socket)
except Exception:
self.assertTrue(time.time() - starttime < 5.0)
def testWriteTimeout(self):
starttime = time.time()
try:
socket = TSocket.TSocket('localhost', self.port)
socket.setTimeout(10)
socket.open()
lsock = self.listen_sock.accept()
while True:
lsock.write("hi" * 100)
except Exception:
self.assertTrue(time.time() - starttime < 5.0)
if __name__ == '__main__':
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(TimeoutTest))
testRunner = unittest.TextTestRunner(verbosity=2)
result = testRunner.run(suite)
# Exit with non-zero code if tests failed
if result.failures or result.errors:
sys.exit(1)
else:
sys.exit(0)
|
TimeoutTest
|
python
|
pytorch__pytorch
|
torch/utils/data/datapipes/datapipe.py
|
{
"start": 10223,
"end": 15435
}
|
class ____(Dataset[_T_co], metaclass=_DataPipeMeta):
r"""
Map-style DataPipe.
All datasets that represent a map from keys to data samples should subclass this.
Subclasses should overwrite :meth:`__getitem__`, supporting fetching a
data sample for a given, unique key. Subclasses can also optionally overwrite
:meth:`__len__`, which is expected to return the size of the dataset by many
:class:`~torch.utils.data.Sampler` implementations and the default options
of :class:`~torch.utils.data.DataLoader`.
These DataPipes can be invoked in two ways, using the class constructor or applying their
functional form onto an existing `MapDataPipe` (recommend, available to most but not all DataPipes).
Note:
:class:`~torch.utils.data.DataLoader` by default constructs an index
sampler that yields integral indices. To make it work with a map-style
DataPipe with non-integral indices/keys, a custom sampler must be provided.
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.map import SequenceWrapper, Mapper
>>> dp = SequenceWrapper(range(10))
>>> map_dp_1 = dp.map(lambda x: x + 1) # Using functional form (recommended)
>>> list(map_dp_1)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> map_dp_2 = Mapper(dp, lambda x: x + 1) # Using class constructor
>>> list(map_dp_2)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> batch_dp = map_dp_1.batch(batch_size=2)
>>> list(batch_dp)
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
"""
functions: dict[str, Callable] = {}
reduce_ex_hook: Callable | None = None
getstate_hook: Callable | None = None
str_hook: Callable | None = None
repr_hook: Callable | None = None
def __getattr__(self, attribute_name):
if attribute_name in MapDataPipe.functions:
if attribute_name in _map_deprecated_functional_names:
kwargs = _map_deprecated_functional_names[attribute_name]
_deprecation_warning(**kwargs)
f = MapDataPipe.functions[attribute_name]
function = functools.partial(f, self)
functools.update_wrapper(wrapper=function, wrapped=f, assigned=("__doc__",))
return function
else:
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute '{attribute_name}"
)
@classmethod
def register_function(cls, function_name, function) -> None:
cls.functions[function_name] = function
@classmethod
def register_datapipe_as_function(cls, function_name, cls_to_register) -> None:
if function_name in cls.functions:
raise Exception( # noqa: TRY002
f"Unable to add DataPipe function name {function_name} as it is already taken"
)
def class_function(cls, source_dp, *args, **kwargs):
result_pipe = cls(source_dp, *args, **kwargs)
return result_pipe
function = functools.partial(class_function, cls_to_register)
functools.update_wrapper(
wrapper=function, wrapped=cls_to_register, assigned=("__doc__",)
)
cls.functions[function_name] = function
def __getstate__(self):
"""
Serialize `lambda` functions when `dill` is available.
If this doesn't cover your custom DataPipe's use case, consider writing custom methods for
`__getstate__` and `__setstate__`, or use `pickle.dumps` for serialization.
"""
state = self.__dict__
if MapDataPipe.getstate_hook is not None:
return MapDataPipe.getstate_hook(state)
return state
def __reduce_ex__(self, *args, **kwargs):
if MapDataPipe.reduce_ex_hook is not None:
try:
return MapDataPipe.reduce_ex_hook(self)
except NotImplementedError:
pass
return super().__reduce_ex__(*args, **kwargs)
@classmethod
def set_getstate_hook(cls, hook_fn) -> None:
if MapDataPipe.getstate_hook is not None and hook_fn is not None:
raise RuntimeError("Attempt to override existing getstate_hook")
MapDataPipe.getstate_hook = hook_fn
@classmethod
def set_reduce_ex_hook(cls, hook_fn) -> None:
if MapDataPipe.reduce_ex_hook is not None and hook_fn is not None:
raise RuntimeError("Attempt to override existing reduce_ex_hook")
MapDataPipe.reduce_ex_hook = hook_fn
def __repr__(self) -> str:
if self.repr_hook is not None:
return self.repr_hook(self)
# Instead of showing <torch. ... .MapperMapDataPipe object at 0x.....>, return the class name
return str(self.__class__.__qualname__)
def __str__(self) -> str:
if self.str_hook is not None:
return self.str_hook(self)
# Instead of showing <torch. ... .MapperMapDataPipe object at 0x.....>, return the class name
return str(self.__class__.__qualname__)
def __dir__(self):
# for auto-completion in a REPL (e.g. Jupyter notebook)
return list(super().__dir__()) + list(self.functions.keys())
|
MapDataPipe
|
python
|
modin-project__modin
|
modin/experimental/core/io/glob/glob_dispatcher.py
|
{
"start": 1173,
"end": 6302
}
|
class ____(FileDispatcher):
"""Class implements reading/writing different formats, parallelizing by the number of files."""
@classmethod
def _read(cls, **kwargs):
"""
Read data from `filepath_or_buffer` according to `kwargs` parameters.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
`filepath_or_buffer` parameter of `read_*` function.
**kwargs : dict
Parameters of `read_*` function.
Returns
-------
new_query_compiler : BaseQueryCompiler
Query compiler with imported data for further processing.
Notes
-----
The number of partitions is equal to the number of input files.
"""
if "filepath_or_buffer" in kwargs:
path_key = "filepath_or_buffer"
elif "path" in kwargs:
path_key = "path"
elif "path_or_buf" in kwargs:
path_key = "path_or_buf"
elif "path_or_buffer" in kwargs:
path_key = "path_or_buffer"
filepath_or_buffer = kwargs.pop(path_key)
filepath_or_buffer = stringify_path(filepath_or_buffer)
if not (isinstance(filepath_or_buffer, str) and "*" in filepath_or_buffer):
return cls.single_worker_read(
filepath_or_buffer,
single_worker_read=True,
reason="Buffers and single files are not supported",
**kwargs,
)
filepath_or_buffer = sorted(glob.glob(filepath_or_buffer))
if len(filepath_or_buffer) == 0:
raise ValueError(
f"There are no files matching the pattern: {filepath_or_buffer}"
)
partition_ids = [None] * len(filepath_or_buffer)
lengths_ids = [None] * len(filepath_or_buffer)
widths_ids = [None] * len(filepath_or_buffer)
if len(filepath_or_buffer) != NPartitions.get():
# do we need to do a repartitioning?
warnings.warn("can be inefficient partitioning")
for idx, file_name in enumerate(filepath_or_buffer):
*partition_ids[idx], lengths_ids[idx], widths_ids[idx] = cls.deploy(
func=cls.parse,
f_kwargs={
"fname": file_name,
**kwargs,
},
num_returns=3,
)
lengths = cls.materialize(lengths_ids)
widths = cls.materialize(widths_ids)
# while num_splits is 1, need only one value
partition_ids = cls.build_partition(partition_ids, lengths, [widths[0]])
new_index, _ = cls.frame_cls._partition_mgr_cls.get_indices(0, partition_ids)
new_columns, _ = cls.frame_cls._partition_mgr_cls.get_indices(1, partition_ids)
return cls.query_compiler_cls(
cls.frame_cls(partition_ids, new_index, new_columns)
)
@classmethod
def write(cls, qc, **kwargs):
"""
When `*` is in the filename, all partitions are written to their own separate file.
The filenames is determined as follows:
- if `*` is in the filename, then it will be replaced by the ascending sequence 0, 1, 2, …
- if `*` is not in the filename, then the default implementation will be used.
Parameters
----------
qc : BaseQueryCompiler
The query compiler of the Modin dataframe that we want
to run ``to_<format>_glob`` on.
**kwargs : dict
Parameters for ``pandas.to_<format>(**kwargs)``.
"""
if "filepath_or_buffer" in kwargs:
path_key = "filepath_or_buffer"
elif "path" in kwargs:
path_key = "path"
elif "path_or_buf" in kwargs:
path_key = "path_or_buf"
elif "path_or_buffer" in kwargs:
path_key = "path_or_buffer"
filepath_or_buffer = kwargs.pop(path_key)
filepath_or_buffer = stringify_path(filepath_or_buffer)
if not (
isinstance(filepath_or_buffer, str) and "*" in filepath_or_buffer
) or not isinstance(qc, PandasQueryCompiler):
warnings.warn("Defaulting to Modin core implementation")
cls.base_write(qc, filepath_or_buffer, **kwargs)
return
# Be careful, this is a kind of limitation, but at the time of the first implementation,
# getting a name in this way is quite convenient.
# We can use this attribute because the names of the BaseIO's methods match pandas API.
write_func_name = cls.base_write.__name__
def func(df, **kw): # pragma: no cover
idx = str(kw["partition_idx"])
path = filepath_or_buffer.replace("*", idx)
getattr(df, write_func_name)(path, **kwargs)
return pandas.DataFrame()
result = qc._modin_frame.apply_full_axis(
1, func, new_index=[], new_columns=[], enumerate_partitions=True
)
cls.materialize(
[part.list_of_blocks[0] for row in result._partitions for part in row]
)
|
ExperimentalGlobDispatcher
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/metrics.py
|
{
"start": 96508,
"end": 97477
}
|
class ____(MeanMetricWrapper):
"""Computes the squared hinge metric between `y_true` and `y_pred`.
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.SquaredHinge()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result().numpy()
1.86
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result().numpy()
1.46
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.SquaredHinge()])
```
"""
def __init__(self, name='squared_hinge', dtype=None):
super(SquaredHinge, self).__init__(squared_hinge, name, dtype=dtype)
|
SquaredHinge
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/_config.py
|
{
"start": 300432,
"end": 303753
}
|
class ____(TypedDict, total=False):
"""
:class:`altair.ViewConfig` ``TypedDict`` wrapper.
Parameters
----------
clip
Whether the view should be clipped.
continuousHeight
The default height when the plot has a continuous y-field for x or latitude, or has
arc marks.
**Default value:** ``300``
continuousWidth
The default width when the plot has a continuous field for x or longitude, or has
arc marks.
**Default value:** ``300``
cornerRadius
The radius in pixels of rounded rectangles or arcs' corners.
**Default value:** ``0``
cursor
The mouse cursor used over the view. Any valid `CSS cursor type
<https://developer.mozilla.org/en-US/docs/Web/CSS/cursor#Values>`__ can be used.
discreteHeight
The default height when the plot has non arc marks and either a discrete y-field or
no y-field. The height can be either a number indicating a fixed height or an object
in the form of ``{step: number}`` defining the height per discrete step.
**Default value:** a step size based on ``config.view.step``.
discreteWidth
The default width when the plot has non-arc marks and either a discrete x-field or
no x-field. The width can be either a number indicating a fixed width or an object
in the form of ``{step: number}`` defining the width per discrete step.
**Default value:** a step size based on ``config.view.step``.
fill
The fill color.
**Default value:** ``undefined``
fillOpacity
The fill opacity (value between [0,1]).
**Default value:** ``1``
opacity
The overall opacity (value between [0,1]).
**Default value:** ``0.7`` for non-aggregate plots with ``point``, ``tick``,
``circle``, or ``square`` marks or layered ``bar`` charts and ``1`` otherwise.
step
Default step size for x-/y- discrete fields.
stroke
The stroke color.
**Default value:** ``"#ddd"``
strokeCap
The stroke cap for line ending style. One of ``"butt"``, ``"round"``, or
``"square"``.
**Default value:** ``"butt"``
strokeDash
An array of alternating stroke, space lengths for creating dashed or dotted lines.
strokeDashOffset
The offset (in pixels) into which to begin drawing with the stroke dash array.
strokeJoin
The stroke line join method. One of ``"miter"``, ``"round"`` or ``"bevel"``.
**Default value:** ``"miter"``
strokeMiterLimit
The miter limit at which to bevel a line join.
strokeOpacity
The stroke opacity (value between [0,1]).
**Default value:** ``1``
strokeWidth
The stroke width, in pixels.
"""
clip: bool
continuousHeight: float
continuousWidth: float
cornerRadius: float
cursor: Cursor_T
discreteHeight: float
discreteWidth: float
fill: ColorHex | ColorName_T | None
fillOpacity: float
opacity: float
step: float
stroke: ColorHex | ColorName_T | None
strokeCap: StrokeCap_T
strokeDash: Sequence[float]
strokeDashOffset: float
strokeJoin: StrokeJoin_T
strokeMiterLimit: float
strokeOpacity: float
strokeWidth: float
|
ViewConfigKwds
|
python
|
pandas-dev__pandas
|
pandas/util/version/__init__.py
|
{
"start": 3296,
"end": 5691
}
|
class ____:
_key: tuple[Any, ...]
def __hash__(self) -> int:
return hash(self._key)
# Please keep the duplicated `isinstance` check
# in the six comparisons hereunder
# unless you find a way to avoid adding overhead function calls.
def __lt__(self, other: _BaseVersion) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key < other._key
def __le__(self, other: _BaseVersion) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key <= other._key
def __eq__(self, other: object) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key == other._key
def __ge__(self, other: _BaseVersion) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key >= other._key
def __gt__(self, other: _BaseVersion) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key > other._key
def __ne__(self, other: object) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key != other._key
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
_VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>alpha|a|beta|b|preview|pre|c|rc)
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
VERSION_PATTERN = _VERSION_PATTERN
|
_BaseVersion
|
python
|
getsentry__sentry
|
src/sentry/migrations/0939_rm_eventattachment_fileid_part2.py
|
{
"start": 239,
"end": 1537
}
|
class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("sentry", "0938_rm_eventattachment_fileid_part1"),
]
operations = [
SafeRemoveField(
model_name="eventattachment",
name="file_id",
deletion_action=DeletionAction.DELETE,
),
]
|
Migration
|
python
|
ZoranPandovski__al-go-rithms
|
data_structures/Graphs/graphsearch/cycle_undirected_graph/python/cycle_undirected_graph.py
|
{
"start": 0,
"end": 1291
}
|
class ____:
def __init__(self):
self.neighbors = {}
def add_vertex(self, v):
if v not in self.neighbors:
self.neighbors[v] = []
def add_edge(self, u, v):
self.neighbors[u].append(v)
# if u == v, do not connect u to itself twice
if u != v:
self.neighbors[v].append(u)
def vertices(self):
return list(self.neighbors.keys())
def vertex_neighbors(self, v):
return self.neighbors[v]
def is_cyclic_graph(G):
Q = []
V = G.vertices()
# initially all vertices are unexplored
layer = { v: -1 for v in V }
for v in V:
# v has already been explored; move on
if layer[v] != -1:
continue
# take v as a starting vertex
layer[v] = 0
Q.append(v)
# as long as Q is not empty
while len(Q) > 0:
# get the next vertex u of Q that must be looked at
u = Q.pop(0)
C = G.vertex_neighbors(u)
for z in C:
# if z is being found for the first time
if layer[z] == -1:
layer[z] = layer[u] + 1
Q.append(z)
elif layer[z] >= layer[u]:
return True
return False
|
graph
|
python
|
chroma-core__chroma
|
chromadb/execution/expression/operator.py
|
{
"start": 9786,
"end": 9965
}
|
class ____(Where):
"""Not equal comparison"""
key: str
value: Any
def to_dict(self) -> Dict[str, Any]:
return {self.key: {"$ne": self.value}}
@dataclass
|
Ne
|
python
|
getsentry__sentry
|
src/sentry/analytics/events/inapp_request.py
|
{
"start": 278,
"end": 411
}
|
class ____(InAppRequestSentEvent, abc.ABC):
invited_member_id: int
@analytics.eventclass("invite_request.sent")
|
InviteOrJoinRequest
|
python
|
huggingface__transformers
|
tests/models/udop/test_modeling_udop.py
|
{
"start": 15162,
"end": 19570
}
|
class ____:
def __init__(
self,
parent,
vocab_size=99,
batch_size=13,
seq_length=7,
# For common tests
is_training=False,
use_attention_mask=True,
hidden_size=32,
num_hidden_layers=2,
decoder_layers=2,
num_attention_heads=4,
d_ff=37,
relative_attention_num_buckets=32,
dropout_rate=0.1,
initializer_factor=0.002,
eos_token_id=1,
pad_token_id=0,
scope=None,
range_bbox=1000,
):
self.parent = parent
self.batch_size = batch_size
# For common tests
self.seq_length = seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.decoder_layers = decoder_layers
self.num_attention_heads = num_attention_heads
self.d_ff = d_ff
self.relative_attention_num_buckets = relative_attention_num_buckets
self.dropout_rate = dropout_rate
self.initializer_factor = initializer_factor
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.scope = None
self.range_bbox = range_bbox
def get_config(self):
return UdopConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
d_ff=self.d_ff,
d_kv=self.hidden_size // self.num_attention_heads,
num_layers=self.num_hidden_layers,
num_decoder_layers=self.decoder_layers,
num_heads=self.num_attention_heads,
relative_attention_num_buckets=self.relative_attention_num_buckets,
dropout_rate=self.dropout_rate,
initializer_factor=self.initializer_factor,
eos_token_id=self.eos_token_id,
bos_token_id=self.pad_token_id,
pad_token_id=self.pad_token_id,
is_encoder_decoder=False,
)
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
bbox = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox).float()
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
t = bbox[i, j, 3]
bbox[i, j, 3] = bbox[i, j, 1]
bbox[i, j, 1] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
t = bbox[i, j, 2]
bbox[i, j, 2] = bbox[i, j, 0]
bbox[i, j, 0] = t
attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
config = self.get_config()
return (
config,
input_ids,
bbox,
attention_mask,
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
bbox,
attention_mask,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"bbox": bbox,
"attention_mask": attention_mask,
}
return config, inputs_dict
def create_and_check_model(
self,
config,
input_ids,
bbox,
attention_mask,
):
model = UdopEncoderModel(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids=input_ids,
bbox=bbox,
attention_mask=attention_mask,
)
encoder_output = result.last_hidden_state
self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_model_fp16_forward(
self,
config,
input_ids,
bbox,
attention_mask,
):
model = UdopEncoderModel(config=config).to(torch_device).half().eval()
output = model(input_ids, bbox=bbox, attention_mask=attention_mask)["last_hidden_state"]
self.parent.assertFalse(torch.isnan(output).any().item())
|
UdopEncoderOnlyModelTester
|
python
|
doocs__leetcode
|
solution/0800-0899/0891.Sum of Subsequence Widths/Solution.py
|
{
"start": 0,
"end": 279
}
|
class ____:
def sumSubseqWidths(self, nums: List[int]) -> int:
mod = 10**9 + 7
nums.sort()
ans, p = 0, 1
for i, v in enumerate(nums):
ans = (ans + (v - nums[-i - 1]) * p) % mod
p = (p << 1) % mod
return ans
|
Solution
|
python
|
OmkarPathak__pygorithm
|
tests/test_pathing.py
|
{
"start": 1612,
"end": 1820
}
|
class ____(SimplePathfindingTestCaseTimed):
def find_path(self, my_graph, v1, v2):
my_pathfinder = dijkstra.Dijkstra()
return my_pathfinder.find_path(my_graph, (0, 0), (3, 0))
|
TestDijkstraTimed
|
python
|
PrefectHQ__prefect
|
src/prefect/_internal/schemas/fields.py
|
{
"start": 91,
"end": 465
}
|
class ____(BaseModel):
id: Optional[UUID] = Field(
default=None, description="The id of the creator of the object."
)
type: Optional[str] = Field(
default=None, description="The type of the creator of the object."
)
display_value: Optional[str] = Field(
default=None, description="The display value for the creator."
)
|
CreatedBy
|
python
|
getsentry__sentry
|
tests/sentry/api/test_authentication.py
|
{
"start": 31439,
"end": 35303
}
|
class ____(TestCase):
def test_system_tokens(self) -> None:
sys_token = SystemToken()
auth_token = AuthenticatedToken.from_token(sys_token)
assert auth_token is not None
assert auth_token.entity_id is None
assert auth_token.user_id is None
assert is_system_auth(sys_token) and is_system_auth(auth_token)
assert auth_token.organization_id is None
assert auth_token.application_id is None
assert auth_token.allowed_origins == sys_token.get_allowed_origins()
assert auth_token.scopes == sys_token.get_scopes()
assert auth_token.audit_log_data == sys_token.get_audit_log_data()
def test_api_tokens(self) -> None:
app = self.create_sentry_app(user=self.user, organization_id=self.organization.id)
app_install = self.create_sentry_app_installation(
organization=self.organization, user=self.user, slug=app.slug
)
with assume_test_silo_mode(SiloMode.CONTROL):
at = app_install.api_token
with assume_test_silo_mode(SiloMode.REGION):
atr = ApiTokenReplica.objects.get(apitoken_id=at.id)
assert at.organization_id
for token in [at, atr]:
auth_token = AuthenticatedToken.from_token(token)
assert auth_token is not None
assert auth_token.entity_id == at.id
assert auth_token.user_id == app.proxy_user_id
assert is_api_token_auth(token) and is_api_token_auth(auth_token)
assert auth_token.organization_id == self.organization.id
assert auth_token.application_id == app.application_id
assert auth_token.allowed_origins == token.get_allowed_origins()
assert auth_token.scopes == token.get_scopes()
assert auth_token.audit_log_data == token.get_audit_log_data()
def test_api_keys(self) -> None:
ak = self.create_api_key(organization=self.organization, scope_list=["projects:read"])
with assume_test_silo_mode(SiloMode.REGION):
akr = ApiKeyReplica.objects.get(apikey_id=ak.id)
for token in [ak, akr]:
auth_token = AuthenticatedToken.from_token(token)
assert auth_token is not None
assert auth_token.entity_id == ak.id
assert auth_token.user_id is None
assert is_api_key_auth(token) and is_api_key_auth(auth_token)
assert auth_token.organization_id == self.organization.id
assert auth_token.application_id is None
assert auth_token.allowed_origins == token.get_allowed_origins()
assert auth_token.scopes == token.get_scopes()
assert auth_token.audit_log_data == token.get_audit_log_data()
def test_org_auth_tokens(self) -> None:
oat = OrgAuthToken.objects.create(
organization_id=self.organization.id,
name="token 1",
token_hashed="ABCDEF",
token_last_characters="xyz1",
scope_list=["org:ci"],
date_last_used=None,
)
with assume_test_silo_mode(SiloMode.REGION):
oatr = OrgAuthTokenReplica.objects.get(orgauthtoken_id=oat.id)
for token in (oat, oatr):
auth_token = AuthenticatedToken.from_token(token)
assert auth_token is not None
assert auth_token.entity_id == oat.id
assert auth_token.user_id is None
assert is_org_auth_token_auth(token) and is_org_auth_token_auth(auth_token)
assert auth_token.organization_id == self.organization.id
assert auth_token.application_id is None
assert auth_token.allowed_origins == token.get_allowed_origins()
assert auth_token.scopes == token.get_scopes()
assert auth_token.audit_log_data == token.get_audit_log_data()
|
TestAuthTokens
|
python
|
pytorch__pytorch
|
test/distributed/tensor/test_dtensor_ops.py
|
{
"start": 17510,
"end": 26872
}
|
class ____(TestCase):
__test__ = False
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
cls.__test__ = True
@property
def world_size(self) -> int:
return OP_DB_WORLD_SIZE
def run_opinfo_test(
self, dtype, op, requires_grad=True, sample_inputs_filter=lambda s: True
):
self.mesh = init_device_mesh(DEVICE_TYPE, (self.world_size,))
# test each op with dist tensor inputs and normal inputs
def test():
samples = op.sample_inputs(DEVICE_TYPE, dtype, requires_grad=requires_grad)
for sample_input in samples:
if not sample_inputs_filter(sample_input):
continue
args = [sample_input.input] + list(sample_input.args)
kwargs = sample_input.kwargs
self.run_dtensor_crossref(op.op, args, kwargs)
# we need to figure out a way to test the out variant, out variant testing
# is tricky, as we need to pre allocate the dtensor out, some of them rely
# on sharding placements to be pre-known (i.e. mm.out)
# if isinstance(expected, torch.Tensor) and op.supports_out:
# func(*args, **kwargs, out=expected)
self.check_dtensor_func(test, op)
def assert_ref_dtensor_equal(self, dtensor_rs, rs):
flat_dtensor_rs = pytree.tree_leaves(dtensor_rs)
flat_rs = pytree.tree_leaves(rs)
self.assertEqual(len(flat_dtensor_rs), len(flat_rs))
for dtensor_r, r in zip(flat_dtensor_rs, flat_rs):
if not isinstance(r, torch.Tensor):
continue
self.assertIsInstance(dtensor_r, torch.Tensor)
self.assertEqualOnRank(
dtensor_r.shape,
r.shape,
f"Shape mismatch! original shape:{r.shape}, dtensor shape: {dtensor_r.shape}",
)
self.assertEqualOnRank(
dtensor_r.requires_grad,
r.requires_grad,
"op result requires_grad mismatch!"
f"original requires_grad: {r.requires_grad}, "
f"dtensor requires_grad: {dtensor_r.requires_grad}",
)
self.assertEqualOnRank(dtensor_r, r)
def assertEqualOnRank(self, x, y, msg=None, *, rank=0) -> None:
raise NotImplementedError
def run_dtensor_crossref(self, func, args, kwargs):
to_dtensor = DTensorConverter(self.mesh, args, kwargs)
def concat_res_if_necessary(func, res: object) -> object:
# concat the result on corresponding dim for ops like
# split, so that we can call backward on a single tensor
if (resolve_name(func) is not None) and ("split" in resolve_name(func)):
dim = args[2] if len(args) == 3 else 0
return torch.cat(res, dim=dim)
else:
return res
# TODO: also handle cases where func raise an exception
op_args, op_kwargs = reconcile_args(args, kwargs)
rs = func(*op_args, **op_kwargs)
rs = concat_res_if_necessary(func, rs)
def to_replicate(e: object) -> object:
return e.full_tensor() if isinstance(e, DTensor) else e
# Suppress warnings, this doesn't matter for test_meta.py
# but it does matter if you want to use this decorator
# for cross-ref testing, as some tests may be looking at
# errors
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# for every comb of sharding choices, we test if it works
for dtensor_args, dtensor_kwargs in to_dtensor:
# Only attempt if we managed to convert all tensors to DTensor
# (if any of them failed, we're in a mixed tensor situation and
# this is not allowed in DTensor)
try:
if to_dtensor.successful():
# Handle special cases first if there's any
# Suppress warnings, this doesn't matter for test_meta.py
# but it does matter if you want to use this decorator
# for cross-ref testing, as some tests may be looking at
# errors
dtensor_rs = func(*dtensor_args, **dtensor_kwargs)
# we need to skip tests containing tensors of zero elements for now.
# see issue: https://github.com/pytorch/PiPPy/issues/470
# TODO remove this once issue above fixed.
flat_args = pytree.tree_leaves(dtensor_rs)
if any(
isinstance(e, torch.Tensor) and e.numel() == 0
for e in flat_args
):
continue
# redistribute/all_gather the results to compare with normal output
dtensor_rs = tree_map(to_replicate, dtensor_rs)
dtensor_rs = concat_res_if_necessary(func, dtensor_rs)
try:
if resolve_name(func) not in skip_bw:
if isinstance(dtensor_rs, DTensor):
dtensor_rs.to_local().sum().backward()
elif isinstance(dtensor_rs, tuple):
dtensor_rs[0].to_local().sum().backward()
except Exception as e:
# TODO(anj): Remove this guard exception after gaining more confidence.
if torch.distributed.get_rank() == 0:
print(
f"failed to run BW: {resolve_name(func)}, {func}, {str(e)})"
)
self.assert_ref_dtensor_equal(dtensor_rs, rs)
else:
raise RuntimeError(
f"Failed to convert args to DTensor; "
f"originally (*{args}, **{kwargs})"
)
except Exception as e:
raise RuntimeError(
f"{str(e)}\n\nFailed to run: {resolve_name(func)}, with (*{dtensor_args}, **{dtensor_kwargs})"
) from e
return rs
def check_dtensor_func(self, test_func, opinfo, dry_run=False):
try:
test_func()
except Exception:
if not dry_run:
raise
if dist.get_rank() == 0:
if opinfo.variant_test_name:
print(f"xfail('{opinfo.name}', '{opinfo.variant_test_name}'),")
else:
print(f"xfail('{opinfo.name}'),")
def run_one_hot(self):
ops = [op for op in op_db if op.name == "nn.functional.one_hot"]
assert len(ops) == 1
op = ops[0]
# num_classes = -1 appears to have a bug with dtensor.max().item()
self.run_opinfo_test(
torch.int64,
op,
requires_grad=False,
sample_inputs_filter=lambda s: s.kwargs["num_classes"] != -1,
)
def run_mean(self):
self.mesh = init_device_mesh(DEVICE_TYPE, (self.world_size,))
shape = [2 * self.world_size + 1, 2 * self.world_size]
tensor = (
torch.arange(shape[0] * shape[1], dtype=torch.float32)
.reshape(shape)
.to(DEVICE_TYPE)
)
for is_evenly_shardable in [True, False]:
if is_evenly_shardable:
placement = [Shard(1)]
reduce_dim = 1
else:
placement = [Shard(0)]
reduce_dim = 0
dtensor = distribute_tensor(tensor, self.mesh, placement)
with DebugMode(record_torchfunction=False) as debug_mode:
mean = dtensor.mean(dim=reduce_dim)
full_tensor = mean.full_tensor()
self.assertEqual(full_tensor, tensor.mean(dim=reduce_dim))
if is_evenly_shardable:
self.assertTrue("P(avg)->R" in debug_mode.debug_string())
else:
self.assertTrue("S(0)->R" in debug_mode.debug_string())
def test_embedding_error_msg(self):
self.mesh_2d = init_device_mesh(
DEVICE_TYPE, (2, self.world_size // 2), mesh_dim_names=("dp", "tp")
)
self.mesh_1d = self.mesh_2d["tp"]
weight_global = torch.randn(2048, 256, device=DEVICE_TYPE)
weight_dtensor = distribute_tensor(weight_global, self.mesh_1d, [Shard(0)])
input_global = torch.randint(0, 2048, (16, 2048), device=DEVICE_TYPE)
input_dtensor = distribute_tensor(
input_global, self.mesh_2d, [Shard(0), Replicate()]
)
expected_error_msg = (
"Sharding propagation failed for aten.embedding.default"
"(Spec(f32[2048, 256](S(0))), Spec(i64[16, 2048](S(0)R))) "
"on DeviceMesh((dp=2, tp=2), "
)
with self.assertRaisesRegex(RuntimeError, re.escape(expected_error_msg)):
_ = torch.ops.aten.embedding.default(weight_dtensor, input_dtensor)
|
TestDTensorOps
|
python
|
django__django
|
tests/auth_tests/models/with_last_login_attr.py
|
{
"start": 61,
"end": 139
}
|
class ____(AbstractBaseUser):
last_login = None
|
UserWithDisabledLastLoginField
|
python
|
jina-ai__jina
|
jina/jaml/parsers/deployment/legacy.py
|
{
"start": 323,
"end": 1922
}
|
class ____(BaseLegacyParser):
"""Legacy parser for gateway."""
def parse(
self,
cls: Type['Deployment'],
data: Dict,
runtime_args: Optional[Dict[str, Any]] = None,
) -> 'Deployment':
"""
:param cls: target class type to parse into, must be a :class:`JAMLCompatible` type
:param data: deployment yaml file loaded as python dict
:param runtime_args: Optional runtime_args to be directly passed without being parsed into a yaml config
:return: the Deployment YAML parser given the syntax version number
"""
cls._init_from_yaml = True
# tmp_p = {kk: expand_env_var(vv) for kk, vv in data.get('with', {}).items()}
obj = cls(
**data.get('with', {}),
needs=data.get('needs'),
runtime_args=runtime_args,
)
cls._init_from_yaml = False
obj.is_updated = False
return obj
def dump(self, data: 'Deployment') -> Dict:
"""
:param data: versioned deployment object
:return: the dictionary given a versioned deployment object
"""
r = {}
r['with'] = {}
parser = set_deployment_parser()
non_default_kw = ArgNamespace.get_non_defaults_args(data.args, parser)
for t in _get_taboo(parser):
if t in non_default_kw:
non_default_kw.pop(t)
if non_default_kw:
r['with'].update(non_default_kw)
if data._gateway_kwargs:
r['with'].update(data._gateway_kwargs)
return r
|
DeploymentLegacyParser
|
python
|
python__mypy
|
mypy/nodes.py
|
{
"start": 74463,
"end": 74999
}
|
class ____(Expression):
"""Unary operation"""
__slots__ = ("op", "expr", "method_type")
__match_args__ = ("op", "expr")
op: str # TODO: Enum?
expr: Expression
# Inferred operator method type
method_type: mypy.types.Type | None
def __init__(self, op: str, expr: Expression) -> None:
super().__init__()
self.op = op
self.expr = expr
self.method_type = None
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_unary_expr(self)
|
UnaryExpr
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_13/tasks.py
|
{
"start": 235171,
"end": 237821
}
|
class ____(Response):
"""
Response of tasks.enqueue endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
:param queued: Number of tasks queued (0 or 1)
:type queued: int
"""
_service = "tasks"
_action = "enqueue"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"queued": {
"description": "Number of tasks queued (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(
self, updated: Optional[int] = None, fields: Optional[dict] = None, queued: Optional[int] = None, **kwargs: Any
) -> None:
super(EnqueueResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
self.queued = queued
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self) -> Optional[dict]:
return self._property_fields
@fields.setter
def fields(self, value: Optional[dict]) -> None:
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
@schema_property("queued")
def queued(self) -> Optional[int]:
return self._property_queued
@queued.setter
def queued(self, value: Optional[int]) -> None:
if value is None:
self._property_queued = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "queued", six.integer_types)
self._property_queued = value
|
EnqueueResponse
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 645789,
"end": 646450
}
|
class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("TeamDiscussionCommentEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("TeamDiscussionComment"), graphql_name="nodes"
)
page_info = sgqlc.types.Field(
sgqlc.types.non_null(PageInfo), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
|
TeamDiscussionCommentConnection
|
python
|
ansible__ansible
|
lib/ansible/module_utils/errors.py
|
{
"start": 2734,
"end": 2820
}
|
class ____(AnsibleValidationError):
"""Missing a required parameter"""
|
RequiredError
|
python
|
django__django
|
django/template/context.py
|
{
"start": 3763,
"end": 5124
}
|
class ____(BaseContext):
"A stack container for variable context"
def __init__(self, dict_=None, autoescape=True, use_l10n=None, use_tz=None):
self.autoescape = autoescape
self.use_l10n = use_l10n
self.use_tz = use_tz
self.template_name = "unknown"
self.render_context = RenderContext()
# Set to the original template -- as opposed to extended or included
# templates -- during rendering, see bind_template.
self.template = None
super().__init__(dict_)
@contextmanager
def bind_template(self, template):
if self.template is not None:
raise RuntimeError("Context is already bound to a template")
self.template = template
try:
yield
finally:
self.template = None
def __copy__(self):
duplicate = super().__copy__()
duplicate.render_context = copy(self.render_context)
return duplicate
def update(self, other_dict):
"Push other_dict to the stack of dictionaries in the Context"
if not hasattr(other_dict, "__getitem__"):
raise TypeError("other_dict must be a mapping (dictionary-like) object.")
if isinstance(other_dict, BaseContext):
other_dict = other_dict.dicts[1:].pop()
return ContextDict(self, other_dict)
|
Context
|
python
|
Lightning-AI__lightning
|
tests/tests_pytorch/loops/test_flow_warnings.py
|
{
"start": 701,
"end": 1319
}
|
class ____(BoringModel):
def training_step(self, batch, batch_idx):
return self.step(batch[0])
def test_no_depre_without_epoch_end(tmp_path):
"""Tests that only training_step can be used."""
model = TestModel()
trainer = Trainer(
default_root_dir=tmp_path,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=2,
log_every_n_steps=1,
enable_model_summary=False,
)
with warnings.catch_warnings(record=True) as w:
trainer.fit(model)
for msg in w:
assert "should not return anything " not in str(msg)
|
TestModel
|
python
|
scikit-learn__scikit-learn
|
sklearn/base.py
|
{
"start": 42284,
"end": 46941
}
|
class ____:
"""Mark estimators that are non-determinstic on 32bit or PowerPC"""
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.non_deterministic = _IS_32BIT or platform.machine().startswith(
("ppc", "powerpc")
)
return tags
def is_classifier(estimator):
"""Return True if the given estimator is (probably) a classifier.
Parameters
----------
estimator : estimator instance
Estimator object to test.
Returns
-------
out : bool
True if estimator is a classifier and False otherwise.
Examples
--------
>>> from sklearn.base import is_classifier
>>> from sklearn.cluster import KMeans
>>> from sklearn.svm import SVC, SVR
>>> classifier = SVC()
>>> regressor = SVR()
>>> kmeans = KMeans()
>>> is_classifier(classifier)
True
>>> is_classifier(regressor)
False
>>> is_classifier(kmeans)
False
"""
return get_tags(estimator).estimator_type == "classifier"
def is_regressor(estimator):
"""Return True if the given estimator is (probably) a regressor.
Parameters
----------
estimator : estimator instance
Estimator object to test.
Returns
-------
out : bool
True if estimator is a regressor and False otherwise.
Examples
--------
>>> from sklearn.base import is_regressor
>>> from sklearn.cluster import KMeans
>>> from sklearn.svm import SVC, SVR
>>> classifier = SVC()
>>> regressor = SVR()
>>> kmeans = KMeans()
>>> is_regressor(classifier)
False
>>> is_regressor(regressor)
True
>>> is_regressor(kmeans)
False
"""
return get_tags(estimator).estimator_type == "regressor"
def is_clusterer(estimator):
"""Return True if the given estimator is (probably) a clusterer.
.. versionadded:: 1.6
Parameters
----------
estimator : estimator instance
Estimator object to test.
Returns
-------
out : bool
True if estimator is a clusterer and False otherwise.
Examples
--------
>>> from sklearn.base import is_clusterer
>>> from sklearn.cluster import KMeans
>>> from sklearn.svm import SVC, SVR
>>> classifier = SVC()
>>> regressor = SVR()
>>> kmeans = KMeans()
>>> is_clusterer(classifier)
False
>>> is_clusterer(regressor)
False
>>> is_clusterer(kmeans)
True
"""
return get_tags(estimator).estimator_type == "clusterer"
def is_outlier_detector(estimator):
"""Return True if the given estimator is (probably) an outlier detector.
Parameters
----------
estimator : estimator instance
Estimator object to test.
Returns
-------
out : bool
True if estimator is an outlier detector and False otherwise.
"""
return get_tags(estimator).estimator_type == "outlier_detector"
def _fit_context(*, prefer_skip_nested_validation):
"""Decorator to run the fit methods of estimators within context managers.
Parameters
----------
prefer_skip_nested_validation : bool
If True, the validation of parameters of inner estimators or functions
called during fit will be skipped.
This is useful to avoid validating many times the parameters passed by the
user from the public facing API. It's also useful to avoid validating
parameters that we pass internally to inner functions that are guaranteed to
be valid by the test suite.
It should be set to True for most estimators, except for those that receive
non-validated objects as parameters, such as meta-estimators that are given
estimator objects.
Returns
-------
decorated_fit : method
The decorated fit method.
"""
def decorator(fit_method):
@functools.wraps(fit_method)
def wrapper(estimator, *args, **kwargs):
global_skip_validation = get_config()["skip_parameter_validation"]
# we don't want to validate again for each call to partial_fit
partial_fit_and_fitted = (
fit_method.__name__ == "partial_fit" and _is_fitted(estimator)
)
if not global_skip_validation and not partial_fit_and_fitted:
estimator._validate_params()
with config_context(
skip_parameter_validation=(
prefer_skip_nested_validation or global_skip_validation
)
):
return fit_method(estimator, *args, **kwargs)
return wrapper
return decorator
|
_UnstableArchMixin
|
python
|
encode__django-rest-framework
|
rest_framework/generics.py
|
{
"start": 720,
"end": 6678
}
|
class ____(views.APIView):
"""
Base class for all other generic views.
"""
# You'll need to either set these attributes,
# or override `get_queryset()`/`get_serializer_class()`.
# If you are overriding a view method, it is important that you call
# `get_queryset()` instead of accessing the `queryset` property directly,
# as `queryset` will get evaluated only once, and those results are cached
# for all subsequent requests.
queryset = None
serializer_class = None
# If you want to use object lookups other than pk, set 'lookup_field'.
# For more complex lookup requirements override `get_object()`.
lookup_field = 'pk'
lookup_url_kwarg = None
# The filter backend classes to use for queryset filtering
filter_backends = api_settings.DEFAULT_FILTER_BACKENDS
# The style to use for queryset pagination.
pagination_class = api_settings.DEFAULT_PAGINATION_CLASS
# Allow generic typing checking for generic views.
def __class_getitem__(cls, *args, **kwargs):
return cls
def get_queryset(self):
"""
Get the list of items for this view.
This must be an iterable, and may be a queryset.
Defaults to using `self.queryset`.
This method should always be used rather than accessing `self.queryset`
directly, as `self.queryset` gets evaluated only once, and those results
are cached for all subsequent requests.
You may want to override this if you need to provide different
querysets depending on the incoming request.
(Eg. return a list of items that is specific to the user)
"""
assert self.queryset is not None, (
"'%s' should either include a `queryset` attribute, "
"or override the `get_queryset()` method."
% self.__class__.__name__
)
queryset = self.queryset
if isinstance(queryset, QuerySet):
# Ensure queryset is re-evaluated on each request.
queryset = queryset.all()
return queryset
def get_object(self):
"""
Returns the object the view is displaying.
You may want to override this if you need to provide non-standard
queryset lookups. Eg if objects are referenced using multiple
keyword arguments in the url conf.
"""
queryset = self.filter_queryset(self.get_queryset())
# Perform the lookup filtering.
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
assert lookup_url_kwarg in self.kwargs, (
'Expected view %s to be called with a URL keyword argument '
'named "%s". Fix your URL conf, or set the `.lookup_field` '
'attribute on the view correctly.' %
(self.__class__.__name__, lookup_url_kwarg)
)
filter_kwargs = {self.lookup_field: self.kwargs[lookup_url_kwarg]}
obj = get_object_or_404(queryset, **filter_kwargs)
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj
def get_serializer(self, *args, **kwargs):
"""
Return the serializer instance that should be used for validating and
deserializing input, and for serializing output.
"""
serializer_class = self.get_serializer_class()
kwargs.setdefault('context', self.get_serializer_context())
return serializer_class(*args, **kwargs)
def get_serializer_class(self):
"""
Return the class to use for the serializer.
Defaults to using `self.serializer_class`.
You may want to override this if you need to provide different
serializations depending on the incoming request.
(Eg. admins get full serialization, others get basic serialization)
"""
assert self.serializer_class is not None, (
"'%s' should either include a `serializer_class` attribute, "
"or override the `get_serializer_class()` method."
% self.__class__.__name__
)
return self.serializer_class
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
return {
'request': self.request,
'format': self.format_kwarg,
'view': self
}
def filter_queryset(self, queryset):
"""
Given a queryset, filter it with whichever filter backend is in use.
You are unlikely to want to override this method, although you may need
to call it either from a list view, or from a custom `get_object`
method if you want to apply the configured filtering backend to the
default queryset.
"""
for backend in list(self.filter_backends):
queryset = backend().filter_queryset(self.request, queryset, self)
return queryset
@property
def paginator(self):
"""
The paginator instance associated with the view, or `None`.
"""
if not hasattr(self, '_paginator'):
if self.pagination_class is None:
self._paginator = None
else:
self._paginator = self.pagination_class()
return self._paginator
def paginate_queryset(self, queryset):
"""
Return a single page of results, or `None` if pagination is disabled.
"""
if self.paginator is None:
return None
return self.paginator.paginate_queryset(queryset, self.request, view=self)
def get_paginated_response(self, data):
"""
Return a paginated style `Response` object for the given output data.
"""
assert self.paginator is not None
return self.paginator.get_paginated_response(data)
# Concrete view classes that provide method handlers
# by composing the mixin classes with the base view.
|
GenericAPIView
|
python
|
doocs__leetcode
|
solution/1100-1199/1140.Stone Game II/Solution2.py
|
{
"start": 0,
"end": 403
}
|
class ____:
def stoneGameII(self, piles: List[int]) -> int:
@cache
def dfs(i: int, m: int = 1) -> int:
if i >= len(piles):
return 0
t = inf
for x in range(1, m << 1 | 1):
t = min(t, dfs(i + x, max(m, x)))
return s[-1] - s[i] - t
s = list(accumulate(piles, initial=0))
return dfs(0)
|
Solution
|
python
|
realpython__materials
|
python-self-type/accounts_typevar.py
|
{
"start": 806,
"end": 1843
}
|
class ____(BankAccount):
interest_rate: float
@classmethod
def from_application(
cls: type[TBankAccount], deposit: float = 0, interest_rate: float = 1
) -> TBankAccount:
# Generate a random seven-digit bank account number
account_number = random.randint(1000000, 9999999)
return cls(account_number, deposit, interest_rate)
def calculate_interest(self) -> float:
return self.balance * self.interest_rate / 100
def add_interest(self: TBankAccount) -> TBankAccount:
self.deposit(self.calculate_interest())
return self
account = BankAccount(account_number=1534899324, balance=50)
(
account.display_balance()
.deposit(50)
.display_balance()
.withdraw(30)
.display_balance()
)
savings = SavingsAccount.from_application(deposit=100, interest_rate=5)
(
savings.display_balance()
.add_interest()
.display_balance()
.deposit(50)
.display_balance()
.withdraw(30)
.add_interest()
.display_balance()
)
|
SavingsAccount
|
python
|
fastai__fastai
|
fastai/callback/comet.py
|
{
"start": 434,
"end": 3392
}
|
class ____(Callback):
"Log losses, metrics, model weights, model architecture summary to neptune"
order = Recorder.order + 1
def __init__(self, project_name, log_model_weights=True):
self.log_model_weights = log_model_weights
self.keep_experiment_running = keep_experiment_running
self.project_name = project_name
self.experiment = None
def before_fit(self):
try:
self.experiment = comet_ml.Experiment(project_name=self.project_name)
except ValueError:
print("No active experiment")
try:
self.experiment.log_parameter("n_epoch", str(self.learn.n_epoch))
self.experiment.log_parameter("model_class", str(type(self.learn.model)))
except:
print(f"Did not log all properties.")
try:
with tempfile.NamedTemporaryFile(mode="w") as f:
with open(f.name, "w") as g:
g.write(repr(self.learn.model))
self.experiment.log_asset(f.name, "model_summary.txt")
except:
print("Did not log model summary. Check if your model is PyTorch model.")
if self.log_model_weights and not hasattr(self.learn, "save_model"):
print(
"Unable to log model to Comet.\n",
)
def after_batch(self):
# log loss and opt.hypers
if self.learn.training:
self.experiment.log_metric("batch__smooth_loss", self.learn.smooth_loss)
self.experiment.log_metric("batch__loss", self.learn.loss)
self.experiment.log_metric("batch__train_iter", self.learn.train_iter)
for i, h in enumerate(self.learn.opt.hypers):
for k, v in h.items():
self.experiment.log_metric(f"batch__opt.hypers.{k}", v)
def after_epoch(self):
# log metrics
for n, v in zip(self.learn.recorder.metric_names, self.learn.recorder.log):
if n not in ["epoch", "time"]:
self.experiment.log_metric(f"epoch__{n}", v)
if n == "time":
self.experiment.log_text(f"epoch__{n}", str(v))
# log model weights
if self.log_model_weights and hasattr(self.learn, "save_model"):
if self.learn.save_model.every_epoch:
_file = join_path_file(
f"{self.learn.save_model.fname}_{self.learn.save_model.epoch}",
self.learn.path / self.learn.model_dir,
ext=".pth",
)
else:
_file = join_path_file(
self.learn.save_model.fname,
self.learn.path / self.learn.model_dir,
ext=".pth",
)
self.experiment.log_asset(_file)
def after_fit(self):
try:
self.experiment.end()
except:
print("No neptune experiment to stop.")
|
CometCallback
|
python
|
tensorflow__tensorflow
|
tensorflow/python/autograph/pyct/static_analysis/reaching_definitions_test.py
|
{
"start": 1278,
"end": 3059
}
|
class ____(test.TestCase):
def _parse_and_analyze(self, test_fn):
# TODO(mdan): Use a custom FunctionTransformer here.
node, source = parser.parse_entity(test_fn, future_features=())
entity_info = transformer.EntityInfo(
name=test_fn.__name__,
source_code=source,
source_file=None,
future_features=(),
namespace={})
node = qual_names.resolve(node)
namer = naming.Namer({})
ctx = transformer.Context(entity_info, namer, None)
node = activity.resolve(node, ctx)
graphs = cfg.build(node)
node = reaching_definitions.resolve(node, ctx, graphs,
reaching_definitions.Definition)
return node
def assertHasDefs(self, node, num):
defs = anno.getanno(node, anno.Static.DEFINITIONS)
self.assertEqual(len(defs), num)
for r in defs:
self.assertIsInstance(r, reaching_definitions.Definition)
def assertHasDefinedIn(self, node, expected):
defined_in = anno.getanno(node, anno.Static.DEFINED_VARS_IN)
defined_in_str = set(str(v) for v in defined_in)
if not expected:
expected = ()
if not isinstance(expected, tuple):
expected = (expected,)
self.assertSetEqual(defined_in_str, set(expected))
def assertSameDef(self, first, second):
self.assertHasDefs(first, 1)
self.assertHasDefs(second, 1)
self.assertIs(
anno.getanno(first, anno.Static.DEFINITIONS)[0],
anno.getanno(second, anno.Static.DEFINITIONS)[0])
def assertNotSameDef(self, first, second):
self.assertHasDefs(first, 1)
self.assertHasDefs(second, 1)
self.assertIsNot(
anno.getanno(first, anno.Static.DEFINITIONS)[0],
anno.getanno(second, anno.Static.DEFINITIONS)[0])
|
ReachingDefinitionsAnalyzerTestBase
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/layers/pooling.py
|
{
"start": 19650,
"end": 23904
}
|
class ____(Pooling2D):
"""Average pooling operation for spatial data.
Downsamples the input along its spatial dimensions (height and width)
by taking the average value over an input window
(of size defined by `pool_size`) for each channel of the input.
The window is shifted by `strides` along each dimension.
The resulting output when using `"valid"` padding option has a shape
(number of rows or columns) of:
`output_shape = math.floor((input_shape - pool_size) / strides) + 1`
(when `input_shape >= pool_size`)
The resulting output shape when using the `"same"` padding option is:
`output_shape = math.floor((input_shape - 1) / strides) + 1`
For example, for `strides=(1, 1)` and `padding="valid"`:
>>> x = tf.constant([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = tf.reshape(x, [1, 3, 3, 1])
>>> avg_pool_2d = tf.keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(1, 1), padding='valid')
>>> avg_pool_2d(x)
<tf.Tensor: shape=(1, 2, 2, 1), dtype=float32, numpy=
array([[[[3.],
[4.]],
[[6.],
[7.]]]], dtype=float32)>
For example, for `stride=(2, 2)` and `padding="valid"`:
>>> x = tf.constant([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.]])
>>> x = tf.reshape(x, [1, 3, 4, 1])
>>> avg_pool_2d = tf.keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(2, 2), padding='valid')
>>> avg_pool_2d(x)
<tf.Tensor: shape=(1, 1, 2, 1), dtype=float32, numpy=
array([[[[3.5],
[5.5]]]], dtype=float32)>
For example, for `strides=(1, 1)` and `padding="same"`:
>>> x = tf.constant([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = tf.reshape(x, [1, 3, 3, 1])
>>> avg_pool_2d = tf.keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(1, 1), padding='same')
>>> avg_pool_2d(x)
<tf.Tensor: shape=(1, 3, 3, 1), dtype=float32, numpy=
array([[[[3.],
[4.],
[4.5]],
[[6.],
[7.],
[7.5]],
[[7.5],
[8.5],
[9.]]]], dtype=float32)>
Args:
pool_size: integer or tuple of 2 integers,
factors by which to downscale (vertical, horizontal).
`(2, 2)` will halve the input in both spatial dimension.
If only one integer is specified, the same window length
will be used for both dimensions.
strides: Integer, tuple of 2 integers, or None.
Strides values.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, rows, cols, channels)`.
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, rows, cols)`.
Output shape:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, pooled_rows, pooled_cols, channels)`.
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, pooled_rows, pooled_cols)`.
"""
def __init__(self,
pool_size=(2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
super(AveragePooling2D, self).__init__(
nn.avg_pool,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, **kwargs)
|
AveragePooling2D
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/input.py
|
{
"start": 13618,
"end": 16162
}
|
class ____(NamedTuple):
"""Defines an input mapping for a graph.
Args:
graph_input_name (str): Name of the input in the graph being mapped from.
mapped_node_name (str): Named of the node (op/graph) that the input is being mapped to.
mapped_node_input_name (str): Name of the input in the node (op/graph) that is being mapped to.
fan_in_index (Optional[int]): The index in to a fanned input, otherwise None.
graph_input_description (Optional[str]): A description of the input in the graph being mapped from.
dagster_type (Optional[DagsterType]): The dagster type of the graph's input
being mapped from.
Examples:
.. code-block:: python
from dagster import InputMapping, GraphDefinition, op, graph
@op
def needs_input(x):
return x + 1
# The following two graph definitions are equivalent
GraphDefinition(
name="the_graph",
node_defs=[needs_input],
input_mappings=[
InputMapping(
graph_input_name="maps_x", mapped_node_name="needs_input",
mapped_node_input_name="x"
)
]
)
@graph
def the_graph(maps_x):
needs_input(maps_x)
"""
graph_input_name: str
mapped_node_name: str
mapped_node_input_name: str
fan_in_index: Optional[int] = None
graph_input_description: Optional[str] = None
dagster_type: Optional[DagsterType] = None
@property
def maps_to(self) -> Union[InputPointer, FanInInputPointer]:
if self.fan_in_index is not None:
return FanInInputPointer(
self.mapped_node_name, self.mapped_node_input_name, self.fan_in_index
)
return InputPointer(self.mapped_node_name, self.mapped_node_input_name)
@property
def maps_to_fan_in(self) -> bool:
return isinstance(self.maps_to, FanInInputPointer)
def describe(self) -> str:
idx = self.maps_to.fan_in_index if isinstance(self.maps_to, FanInInputPointer) else ""
return f"{self.graph_input_name} -> {self.maps_to.node_name}:{self.maps_to.input_name}{idx}"
def get_definition(self) -> "InputDefinition":
return InputDefinition(
name=self.graph_input_name,
description=self.graph_input_description,
dagster_type=self.dagster_type,
)
@public
|
InputMapping
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/sparse_ops/sparse_reshape_op_test.py
|
{
"start": 15279,
"end": 18091
}
|
class ____(test.TestCase, parameterized.TestCase):
"""Tests for reshaping 0-sized SparseTensors, compared w/ dense tensors."""
def _MakeAndReshapeTensor(self, tensor_class, original_shape, target_shape):
if tensor_class == "sparse":
ind = np.zeros([0, len(original_shape)]).astype(np.int64)
val = np.array([]).astype(np.float64)
shape = np.array(original_shape).astype(np.int64)
sp_input = sparse_tensor.SparseTensorValue(ind, val, shape)
sp_output = self.evaluate(
sparse_ops.sparse_reshape(sp_input, target_shape))
return sp_output.dense_shape
else:
dense_input = array_ops.zeros(original_shape)
dense_output = self.evaluate(array_ops.reshape(dense_input, target_shape))
return dense_output.shape
@parameterized.named_parameters([
("Dense", "dense"),
("Sparse", "sparse"),
])
def testImpliedReshapeEmpty1DTensor(self, tensor_class):
self.assertAllEqual(
self._MakeAndReshapeTensor(tensor_class, [0], [-1, 1]), [0, 1])
self.assertAllEqual(
self._MakeAndReshapeTensor(tensor_class, [0], [-1, 1, 2]), [0, 1, 2])
@parameterized.named_parameters([
("Dense", "dense"),
("Sparse", "sparse"),
])
def testImpliedReshapeEmpty2DTensor(self, tensor_class):
self.assertAllEqual(
self._MakeAndReshapeTensor(tensor_class, [1, 0], [-1, 1]), [0, 1])
self.assertAllEqual(
self._MakeAndReshapeTensor(tensor_class, [1, 0], [-1, 2, 3]), [0, 2, 3])
@parameterized.named_parameters([
("Dense", "dense"),
("Sparse", "sparse"),
])
def testImpliedReshapeEmpty3DTensor(self, tensor_class):
self.assertAllEqual(
self._MakeAndReshapeTensor(tensor_class, [1, 0, 0], [-1, 2, 3]),
[0, 2, 3])
@parameterized.named_parameters([
("Dense", "dense"),
("Sparse", "sparse"),
])
def testImpliedReshapeEmpty4DTensor(self, tensor_class):
self.assertAllEqual(
self._MakeAndReshapeTensor(tensor_class, [2, 4, 0, 6], [-1, 4, 6, 2]),
[0, 4, 6, 2])
def testImpliedDimTogetherWithZeroDimCausesError(self):
# NOTE: When implied dimensions and zero dimensions coexist in the target
# shape, the behavior currently differs between sparse and regular tensors.
with self.assertRaises(errors.InvalidArgumentError):
self._MakeAndReshapeTensor("sparse", [0], [-1, 0])
with self.assertRaises(errors.InvalidArgumentError):
self._MakeAndReshapeTensor("sparse", [1, 0], [-1, 0])
with self.assertRaises(errors.InvalidArgumentError):
self._MakeAndReshapeTensor("sparse", [1, 2, 0], [2, -1, 0])
with self.assertRaises(errors.InvalidArgumentError):
self._MakeAndReshapeTensor("sparse", [1, 2, 3, 0], [2, 0, -1, 3])
if __name__ == "__main__":
test.main()
|
EmptySparseTensorReshapeTest
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pydoclint/DOC201_numpy.py
|
{
"start": 346,
"end": 1001
}
|
class ____:
# OK
def foo(self) -> str:
"""
Do something
Parameters
----------
num : int
A number
Returns
-------
str
A string
"""
return 'test'
# DOC201
def bar(self) -> str:
"""
Do something
Parameters
----------
num : int
A number
"""
return 'test'
# OK
@property
def baz(self) -> str:
"""
Do something
Parameters
----------
num : int
A number
"""
return 'test'
import abc
|
Bar
|
python
|
pypa__warehouse
|
tests/unit/accounts/test_forms.py
|
{
"start": 33186,
"end": 36571
}
|
class ____:
def test_validate(self):
form = forms.ResetPasswordForm(
formdata=MultiDict(
{
"new_password": "MyStr0ng!shPassword",
"password_confirm": "MyStr0ng!shPassword",
"username": "username",
"full_name": "full_name",
"email": "email",
}
),
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
assert form.validate(), str(form.errors)
def test_password_confirm_required_error(self):
form = forms.ResetPasswordForm(
formdata=MultiDict({"password_confirm": ""}),
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
assert not form.validate()
assert form.password_confirm.errors.pop() == "This field is required."
def test_passwords_mismatch_error(self, pyramid_config):
form = forms.ResetPasswordForm(
formdata=MultiDict(
{
"new_password": "password",
"password_confirm": "mismatch",
"username": "username",
"full_name": "full_name",
"email": "email",
}
),
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
assert not form.validate()
assert (
str(form.password_confirm.errors.pop())
== "Your passwords don't match. Try again."
)
@pytest.mark.parametrize(
("password", "expected"),
[("foobar", False), ("somethingalittlebetter9", True), ("1aDeCent!1", True)],
)
def test_password_strength(self, password, expected):
form = forms.ResetPasswordForm(
formdata=MultiDict(
{
"new_password": password,
"password_confirm": password,
"username": "username",
"full_name": "full_name",
"email": "email",
}
),
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
assert form.validate() == expected
def test_password_breached(self):
form = forms.ResetPasswordForm(
formdata=MultiDict(
{
"new_password": "MyStr0ng!shPassword",
"password_confirm": "MyStr0ng!shPassword",
"username": "username",
"full_name": "full_name",
"email": "email",
}
),
user_service=pretend.stub(
find_userid=pretend.call_recorder(lambda _: None)
),
breach_service=pretend.stub(
check_password=lambda pw, tags=None: True,
failure_message=(
"This password has appeared in a breach or has otherwise been "
"compromised and cannot be used."
),
),
)
assert not form.validate()
assert form.new_password.errors.pop() == (
"This password has appeared in a breach or has otherwise been "
"compromised and cannot be used."
)
|
TestResetPasswordForm
|
python
|
pyca__cryptography
|
tests/x509/test_x509_ext.py
|
{
"start": 87161,
"end": 87885
}
|
class ____:
def test_uri(self, backend):
cert = _load_cert(
os.path.join("x509", "custom", "ian_uri.pem"),
x509.load_pem_x509_certificate,
)
ext = cert.extensions.get_extension_for_class(
x509.IssuerAlternativeName
)
assert list(ext.value) == [
x509.UniformResourceIdentifier("http://path.to.root/root.crt"),
]
def test_malformed(self):
cert = _load_cert(
os.path.join("x509", "custom", "malformed-ian.pem"),
x509.load_pem_x509_certificate,
)
with pytest.raises(ValueError, match="issuer_alternative_name"):
cert.extensions
|
TestRSAIssuerAlternativeNameExtension
|
python
|
django__django
|
tests/queries/tests.py
|
{
"start": 79181,
"end": 80446
}
|
class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.n1 = Note.objects.create(note="n1", misc="foo", id=1)
cls.e1 = ExtraInfo.objects.create(info="e1", note=cls.n1)
def test_ticket7872(self):
# Another variation on the disjunctive filtering theme.
# For the purposes of this regression test, it's important that there
# is no Join object related to the LeafA we create.
l1 = LeafA.objects.create(data="first")
self.assertSequenceEqual(LeafA.objects.all(), [l1])
self.assertSequenceEqual(
LeafA.objects.filter(Q(data="first") | Q(join__b__data="second")),
[l1],
)
def test_ticket8283(self):
# Checking that applying filters after a disjunction works correctly.
self.assertSequenceEqual(
(
ExtraInfo.objects.filter(note=self.n1)
| ExtraInfo.objects.filter(info="e2")
).filter(note=self.n1),
[self.e1],
)
self.assertSequenceEqual(
(
ExtraInfo.objects.filter(info="e2")
| ExtraInfo.objects.filter(note=self.n1)
).filter(note=self.n1),
[self.e1],
)
|
DisjunctiveFilterTests
|
python
|
tensorflow__tensorflow
|
tensorflow/python/training/monitored_session.py
|
{
"start": 25878,
"end": 27948
}
|
class ____(SessionCreator):
"""Creates a tf.compat.v1.Session for a chief."""
def __init__(self,
scaffold=None,
master='',
config=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None):
"""Initializes a chief session creator.
Args:
scaffold: A `Scaffold` used for gathering or building supportive ops. If
not specified a default one is created. It's used to finalize the graph.
master: `String` representation of the TensorFlow master to use.
config: `ConfigProto` proto used to configure the session.
checkpoint_dir: A string. Optional path to a directory where to restore
variables.
checkpoint_filename_with_path: Full file name path to the checkpoint file.
"""
self._checkpoint_dir = checkpoint_dir
self._checkpoint_filename_with_path = checkpoint_filename_with_path
self._scaffold = scaffold or Scaffold()
self._session_manager = None
self._master = master
self._config = config
def _get_session_manager(self):
"""Gets or creates a SessionManager."""
if self._session_manager:
return self._session_manager
self._session_manager = sm.SessionManager(
local_init_op=self._scaffold.local_init_op,
local_init_feed_dict=self._scaffold.local_init_feed_dict,
ready_op=self._scaffold.ready_op,
ready_for_local_init_op=self._scaffold.ready_for_local_init_op,
graph=ops.get_default_graph())
return self._session_manager
def create_session(self):
self._scaffold.finalize()
return self._get_session_manager().prepare_session(
self._master,
saver=self._scaffold.saver,
checkpoint_dir=self._checkpoint_dir,
checkpoint_filename_with_path=self._checkpoint_filename_with_path,
config=self._config,
init_op=self._scaffold.init_op,
init_feed_dict=self._scaffold.init_feed_dict,
init_fn=self._scaffold.init_fn)
@tf_export(v1=['train.WorkerSessionCreator'])
|
ChiefSessionCreator
|
python
|
doocs__leetcode
|
solution/0500-0599/0545.Boundary of Binary Tree/Solution.py
|
{
"start": 192,
"end": 1436
}
|
class ____:
def boundaryOfBinaryTree(self, root: Optional[TreeNode]) -> List[int]:
def dfs(nums: List[int], root: Optional[TreeNode], i: int):
if root is None:
return
if i == 0:
if root.left != root.right:
nums.append(root.val)
if root.left:
dfs(nums, root.left, i)
else:
dfs(nums, root.right, i)
elif i == 1:
if root.left == root.right:
nums.append(root.val)
else:
dfs(nums, root.left, i)
dfs(nums, root.right, i)
else:
if root.left != root.right:
nums.append(root.val)
if root.right:
dfs(nums, root.right, i)
else:
dfs(nums, root.left, i)
ans = [root.val]
if root.left == root.right:
return ans
left, leaves, right = [], [], []
dfs(left, root.left, 0)
dfs(leaves, root, 1)
dfs(right, root.right, 2)
ans += left + leaves + right[::-1]
return ans
|
Solution
|
python
|
jazzband__pip-tools
|
piptools/repositories/pypi.py
|
{
"start": 1649,
"end": 1731
}
|
class ____(_t.NamedTuple):
stream: _t.BinaryIO
size: float | None
|
FileStream
|
python
|
gawel__pyquery
|
tests/test_pyquery.py
|
{
"start": 31485,
"end": 32070
}
|
class ____(TestCase):
def setUp(self):
def app(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
time.sleep(2)
return [b'foobar\n']
self.s = http.StopableWSGIServer.create(app)
self.s.wait()
self.application_url = self.s.application_url.rstrip('/')
def test_get(self):
pq(url=self.application_url)
with self.assertRaises(Exception):
pq(url=self.application_url, timeout=1)
def tearDown(self):
self.s.shutdown()
|
TestWebScrappingTimeouts
|
python
|
mkdocs__mkdocs
|
mkdocs/contrib/search/search_index.py
|
{
"start": 446,
"end": 5251
}
|
class ____:
"""
Search index is a collection of pages and sections (heading
tags and their following content are sections).
"""
def __init__(self, **config) -> None:
self._entries: list[dict] = []
self.config = config
def _find_toc_by_id(self, toc, id_: str | None) -> AnchorLink | None:
"""
Given a table of contents and HTML ID, iterate through
and return the matched item in the TOC.
"""
for toc_item in toc:
if toc_item.id == id_:
return toc_item
toc_item_r = self._find_toc_by_id(toc_item.children, id_)
if toc_item_r is not None:
return toc_item_r
return None
def _add_entry(self, title: str | None, text: str, loc: str) -> None:
"""A simple wrapper to add an entry, dropping bad characters."""
text = text.replace('\u00a0', ' ')
text = re.sub(r'[ \t\n\r\f\v]+', ' ', text.strip())
self._entries.append({'title': title, 'text': text, 'location': loc})
def add_entry_from_context(self, page: Page) -> None:
"""
Create a set of entries in the index for a page. One for
the page itself and then one for each of its' heading
tags.
"""
# Create the content parser and feed in the HTML for the
# full page. This handles all the parsing and prepares
# us to iterate through it.
parser = ContentParser()
assert page.content is not None
parser.feed(page.content)
parser.close()
# Get the absolute URL for the page, this is then
# prepended to the urls of the sections
url = page.url
# Create an entry for the full page.
text = parser.stripped_html.rstrip('\n') if self.config['indexing'] == 'full' else ''
self._add_entry(title=page.title, text=text, loc=url)
if self.config['indexing'] in ['full', 'sections']:
for section in parser.data:
self.create_entry_for_section(section, page.toc, url)
def create_entry_for_section(
self, section: ContentSection, toc: TableOfContents, abs_url: str
) -> None:
"""
Given a section on the page, the table of contents and
the absolute url for the page create an entry in the
index.
"""
toc_item = self._find_toc_by_id(toc, section.id)
text = ' '.join(section.text) if self.config['indexing'] == 'full' else ''
if toc_item is not None:
self._add_entry(title=toc_item.title, text=text, loc=abs_url + toc_item.url)
def generate_search_index(self) -> str:
"""Python to json conversion."""
page_dicts = {'docs': self._entries, 'config': self.config}
data = json.dumps(page_dicts, sort_keys=True, separators=(',', ':'), default=str)
if self.config['prebuild_index'] in (True, 'node'):
try:
script_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'prebuild-index.js'
)
p = subprocess.Popen(
['node', script_path],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding='utf-8',
)
idx, err = p.communicate(data)
if not err:
page_dicts['index'] = json.loads(idx)
data = json.dumps(page_dicts, sort_keys=True, separators=(',', ':'))
log.debug('Pre-built search index created successfully.')
else:
log.warning(f'Failed to pre-build search index. Error: {err}')
except (OSError, ValueError) as e:
log.warning(f'Failed to pre-build search index. Error: {e}')
elif self.config['prebuild_index'] == 'python':
if haslunrpy:
lunr_idx = lunr(
ref='location',
fields=('title', 'text'),
documents=self._entries,
languages=self.config['lang'],
)
page_dicts['index'] = lunr_idx.serialize()
data = json.dumps(page_dicts, sort_keys=True, separators=(',', ':'))
else:
log.warning(
"Failed to pre-build search index. The 'python' method was specified; "
"however, the 'lunr.py' library does not appear to be installed. Try "
"installing it with 'pip install lunr'. If you are using any language "
"other than English you will also need to install 'lunr[languages]'."
)
return data
|
SearchIndex
|
python
|
astropy__astropy
|
astropy/io/fits/tests/test_convenience.py
|
{
"start": 532,
"end": 18600
}
|
class ____(FitsTestCase):
def test_resource_warning(self):
warnings.simplefilter("always", ResourceWarning)
_ = fits.getdata(self.data("test0.fits"))
_ = fits.getheader(self.data("test0.fits"))
def test_fileobj_not_closed(self):
"""
Tests that file-like objects are not closed after being passed
to convenience functions.
Regression test for https://github.com/astropy/astropy/issues/5063
"""
f = open(self.data("test0.fits"), "rb")
_ = fits.getdata(f)
assert not f.closed
f.seek(0)
_ = fits.getheader(f)
assert not f.closed
f.close() # Close it now
def test_table_to_hdu(self):
table = Table(
[[1, 2, 3], ["a", "b", "c"], [2.3, 4.5, 6.7]],
names=["a", "b", "c"],
dtype=["i", "U1", "f"],
)
table["a"].unit = "m/s"
table["b"].unit = "not-a-unit"
with pytest.warns(
u.UnitsWarning, match="'not-a-unit' did not parse as fits unit"
) as w:
hdu = fits.table_to_hdu(table, name="MYTABLE")
assert len(w) == 1
assert hdu.header["EXTNAME"] == "MYTABLE"
# Check that TUNITn cards appear in the correct order
# (https://github.com/astropy/astropy/pull/5720)
assert hdu.header.index("TUNIT1") < hdu.header.index("TTYPE2")
assert isinstance(hdu, fits.BinTableHDU)
filename = self.temp("test_table_to_hdu.fits")
hdu.writeto(filename)
assert fits.getval(filename, "EXTNAME", ext=1) == "MYTABLE"
def test_masked_table_to_hdu(self):
i = np.ma.MaskedArray([1, 2, 3], mask=[True, False, False])
s = np.ma.MaskedArray(["a", "b", "c"], mask=[False, True, True])
c = np.ma.MaskedArray([2.3 + 1j, 4.5 + 0j, 6.7 - 1j], mask=[True, False, True])
f = np.ma.MaskedArray([2.3, 4.5, 6.7], mask=[True, False, True])
table = Table([i, s, c, f], names=["i", "s", "c", "f"])
# Check that FITS standard is used in replacing masked values.
hdu = fits.table_to_hdu(table)
assert isinstance(hdu, fits.BinTableHDU)
assert hdu.header["TNULL1"] == i.fill_value
assert_array_equal(hdu.data["i"], i.filled())
assert_array_equal(hdu.data["s"], s.filled(""))
assert_array_equal(hdu.data["c"], c.filled(np.nan))
assert_array_equal(hdu.data["c"].real, c.real.filled(np.nan))
assert_array_equal(hdu.data["c"].imag, c.imag.filled(np.nan))
assert_array_equal(hdu.data["c"], c.filled(complex(np.nan, np.nan)))
assert_array_equal(hdu.data["f"], f.filled(np.nan))
filename = self.temp("test_table_to_hdu.fits")
hdu.writeto(filename, overwrite=True)
def test_masked_integer_arrays(self):
# Regression test for #18817
testfile = self.temp("test_masked_integer_arrays.fits")
t_w = Table(
rows=[
[[np.ma.masked, np.ma.masked]],
[[1, 2]],
[[1, np.ma.masked]],
],
names=["a"],
)
t_w.write(testfile, overwrite=True)
t_r = Table.read(testfile)
assert repr(t_w) == repr(t_r)
def test_table_non_stringifyable_unit_to_hdu(self):
table = Table(
[[1, 2, 3], ["a", "b", "c"], [2.3, 4.5, 6.7]],
names=["a", "b", "c"],
dtype=["i", "U1", "f"],
)
table["a"].unit = u.core.IrreducibleUnit("test")
with pytest.warns(
AstropyUserWarning, match="The unit 'test' could not be saved"
) as w:
fits.table_to_hdu(table)
assert len(w) == 1
def test_table_to_hdu_convert_comment_convention(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6079
"""
table = Table(
[[1, 2, 3], ["a", "b", "c"], [2.3, 4.5, 6.7]],
names=["a", "b", "c"],
dtype=["i", "U1", "f"],
)
table.meta["comments"] = ["This", "is", "a", "comment"]
hdu = fits.table_to_hdu(table)
assert hdu.header.get("comment") == ["This", "is", "a", "comment"]
with pytest.raises(ValueError):
hdu.header.index("comments")
def test_table_to_hdu_filter_reserved(self):
"""
Regression test for https://github.com/astropy/astropy/issues/9387
"""
diag = "be ignored since it conflicts with a FITS reserved keyword"
ins_cards = {
"EXPTIME": 32.1,
"XTENSION": "NEWTABLE",
"NAXIS": 1,
"NAXIS1": 3,
"NAXIS2": 9,
"PCOUNT": 42,
"OBSERVER": "Adams",
}
table = Table(
[[1, 2, 3], ["a", "b", "c"], [2.3, 4.5, 6.7]],
names=["a", "b", "c"],
dtype=["i4", "U1", "f8"],
)
table.meta.update(ins_cards)
with pytest.warns(
AstropyUserWarning, match=rf"Meta-data keyword \w+ will {diag}"
) as w:
hdu = fits.table_to_hdu(table)
# This relies on the warnings being raised in the order of the
# meta dict (note that the first and last card are legitimate keys)
assert len(w) == len(ins_cards) - 2
for i, key in enumerate(list(ins_cards)[1:-1]):
assert f"Meta-data keyword {key}" in str(w[i].message)
assert hdu.header.get("XTENSION") == "BINTABLE"
assert hdu.header.get("NAXIS") == 2
assert hdu.header.get("NAXIS1") == 13
assert hdu.header.get("NAXIS2") == 3
assert hdu.header.get("PCOUNT") == 0
np.testing.assert_almost_equal(hdu.header.get("EXPTIME"), 3.21e1)
@pytest.mark.parametrize("card", REMOVE_KEYWORDS)
def test_table_to_hdu_warn_reserved(self, card):
"""
Test warning for each keyword in ..connect.REMOVE_KEYWORDS, 1 by 1
"""
diag = "be ignored since it conflicts with a FITS reserved keyword"
res_cards = {
"XTENSION": "BINTABLE",
"BITPIX": 8,
"NAXIS": 2,
"NAXIS1": 12,
"NAXIS2": 3,
"PCOUNT": 0,
"GCOUNT": 1,
"TFIELDS": 2,
"THEAP": None,
}
ins_cards = {
"XTENSION": "TABLE",
"BITPIX": 16,
"NAXIS": 1,
"NAXIS1": 2,
"NAXIS2": 6,
"PCOUNT": 2,
"GCOUNT": 2,
"TFIELDS": 4,
"THEAP": 36,
}
table = Table(
[[1.0, 2.0, 3.0], [2.3, 4.5, 6.7]],
names=["wavelength", "flux"],
dtype=["f8", "f4"],
)
table.meta["ORIGIN"] = "Min.Silly Walks"
table.meta[card] = ins_cards[card]
assert table.meta.get(card) != res_cards[card]
with pytest.warns(
AstropyUserWarning, match=f"Meta-data keyword {card} will {diag}"
):
hdu = fits.table_to_hdu(table)
assert hdu.header.get(card) == res_cards[card]
assert hdu.header.get("ORIGIN") == "Min.Silly Walks"
def test_table_to_hdu_filter_incompatible(self):
"""
Test removal of unsupported data types from header
"""
table = Table(
[[1, 2, 3], ["a", "b", "c"], [2.3, 4.5, 6.7]],
names=["a", "b", "c"],
dtype=["i4", "U1", "f8"],
)
table.meta.update(
{
"OBSDATE": "2001-05-26",
"RAMP": np.arange(5),
"TARGETS": {"PRIMARY": 1, "SECONDAR": 3},
}
)
with pytest.warns(
AstropyUserWarning,
match=r"Attribute \S+ of type "
r".+ cannot be added to FITS Header - skipping",
):
hdu = fits.table_to_hdu(table)
assert hdu.header.get("OBSDATE") == "2001-05-26"
assert "RAMP" not in hdu.header
assert "TARGETS" not in hdu.header
def test_table_writeto_header(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5988
"""
data = np.zeros((5,), dtype=[("x", float), ("y", int)])
h_in = fits.Header()
h_in["ANSWER"] = (42.0, "LTU&E")
filename = self.temp("tabhdr42.fits")
fits.writeto(filename, data=data, header=h_in, overwrite=True)
h_out = fits.getheader(filename, ext=1)
assert h_out["ANSWER"] == 42
def test_image_extension_update_header(self, home_is_temp):
"""
Test that _makehdu correctly includes the header. For example in the
fits.update convenience function.
"""
filename = self.temp("twoextension.fits")
hdus = [fits.PrimaryHDU(np.zeros((10, 10))), fits.ImageHDU(np.zeros((10, 10)))]
# Try to update a non-existent file
with pytest.raises(FileNotFoundError, match="No such file"):
fits.update(
filename, np.zeros((10, 10)), header=fits.Header([("WHAT", 100)]), ext=1
)
fits.HDUList(hdus).writeto(filename)
fits.update(
filename, np.zeros((10, 10)), header=fits.Header([("WHAT", 100)]), ext=1
)
h_out = fits.getheader(filename, ext=1)
assert h_out["WHAT"] == 100
def test_printdiff(self):
"""
Test that FITSDiff can run the different inputs without crashing.
"""
# Testing different string input options
assert printdiff(self.data("arange.fits"), self.data("blank.fits")) is None
assert (
printdiff(self.data("arange.fits"), self.data("blank.fits"), ext=0) is None
)
assert (
printdiff(
self.data("o4sp040b0_raw.fits"),
self.data("o4sp040b0_raw.fits"),
extname="sci",
)
is None
)
# This may seem weird, but check printdiff to see, need to test
# incorrect second file
with pytest.raises(OSError):
printdiff("o4sp040b0_raw.fits", "fakefile.fits", extname="sci")
# Test HDU object inputs
with fits.open(self.data("stddata.fits"), mode="readonly") as in1:
with fits.open(self.data("checksum.fits"), mode="readonly") as in2:
assert printdiff(in1[0], in2[0]) is None
with pytest.raises(ValueError):
printdiff(in1[0], in2[0], ext=0)
assert printdiff(in1, in2) is None
with pytest.raises(NotImplementedError):
printdiff(in1, in2, 0)
def test_tabledump(self):
"""
A simple test of the dump method.
Also regression test for https://github.com/astropy/astropy/issues/6937
"""
datastr = (
'" 1" "abc" " 3.70000007152557" " 0"\n'
'" 2" "xy " " 6.69999971389771" " 1"\n'
)
cdstr = (
'c1 1J I11 "" ""'
' -2147483647 "" "" \n'
'c2 3A A3 "" ""'
' "" "" "" \n'
'c3 1E G15.7 "" ""'
' "" 3 0.4 \n'
'c4 1L L6 "" ""'
' "" "" "" \n'
)
# copy fits file to the temp directory
testfile = self.copy_file("tb.fits")
# test without datafile
fits.tabledump(testfile)
assert os.path.isfile(self.temp("tb_1.txt"))
# test with datafile
fits.tabledump(testfile, datafile=self.temp("test_tb.txt"))
assert os.path.isfile(self.temp("test_tb.txt"))
# test with datafile and cdfile
datafile = self.temp("data.txt")
cdfile = self.temp("coldefs.txt")
fits.tabledump(testfile, datafile, cdfile)
assert os.path.isfile(datafile)
with open(datafile) as data:
assert data.read() == datastr
with open(cdfile) as coldefs:
assert coldefs.read() == cdstr
@pytest.mark.parametrize("tablename", ["table.fits", "tb.fits"])
def test_dump_load_round_trip(self, tablename):
"""
A simple test of the dump/load methods; dump the data, column, and
header files and try to reload the table from them.
"""
# copy fits file to the temp directory
testfile = self.copy_file(tablename)
datafile = self.temp("data.txt")
cdfile = self.temp("coldefs.txt")
hfile = self.temp("header.txt")
fits.tabledump(testfile, datafile, cdfile, hfile)
new_tbhdu = fits.tableload(datafile, cdfile, hfile)
with fits.open(testfile) as hdul:
_assert_attr_col(new_tbhdu, hdul[1])
def test_append_filename(self, home_is_temp):
"""
Test fits.append with a filename argument.
"""
data = np.arange(6)
testfile = self.temp("test_append_1.fits")
# Test case 1: creation of file
fits.append(testfile, data=data, checksum=True)
# Test case 2: append to existing file, with verify=True
# Also test that additional keyword can be passed to fitsopen
fits.append(testfile, data=data * 2, checksum=True, ignore_blank=True)
# Test case 3: append to existing file, with verify=False
fits.append(testfile, data=data * 3, checksum=True, verify=False)
with fits.open(testfile, checksum=True) as hdu1:
np.testing.assert_array_equal(hdu1[0].data, data)
np.testing.assert_array_equal(hdu1[1].data, data * 2)
np.testing.assert_array_equal(hdu1[2].data, data * 3)
@pytest.mark.parametrize("mode", ["wb", "wb+", "ab", "ab+"])
def test_append_filehandle(self, tmp_path, mode):
"""
Test fits.append with a file handle argument.
"""
append_file = tmp_path / "append.fits"
with append_file.open(mode) as handle:
fits.append(filename=handle, data=np.ones((4, 4)))
def test_append_with_header(self):
"""
Test fits.append with a fits Header, which triggers detection of the
HDU class. Regression test for
https://github.com/astropy/astropy/issues/8660
"""
testfile = self.temp("test_append_1.fits")
with fits.open(self.data("test0.fits")) as hdus:
for hdu in hdus:
fits.append(testfile, hdu.data, hdu.header, checksum=True)
with fits.open(testfile, checksum=True) as hdus:
assert len(hdus) == 5
def test_pathlib(self):
testfile = pathlib.Path(self.temp("test.fits"))
data = np.arange(10)
hdulist = fits.HDUList([fits.PrimaryHDU(data)])
hdulist.writeto(testfile)
with fits.open(testfile) as hdul:
np.testing.assert_array_equal(hdul[0].data, data)
def test_getdata_ext_given(self):
prihdu = fits.PrimaryHDU(data=np.zeros((5, 5), dtype=int))
exthdu1 = fits.ImageHDU(data=np.ones((5, 5), dtype=int))
exthdu2 = fits.ImageHDU(data=2 * np.ones((5, 5), dtype=int))
hdulist = fits.HDUList([prihdu, exthdu1, exthdu2])
buf = io.BytesIO()
hdulist.writeto(buf)
for ext in [0, 1, 2]:
buf.seek(0)
data = fits.getdata(buf, ext=ext)
assert data[0, 0] == ext
def test_getdata_ext_given_nodata(self):
prihdu = fits.PrimaryHDU(data=np.zeros((5, 5), dtype=int))
exthdu1 = fits.ImageHDU(data=np.ones((5, 5), dtype=int))
exthdu2 = fits.ImageHDU(data=None)
hdulist = fits.HDUList([prihdu, exthdu1, exthdu2])
buf = io.BytesIO()
hdulist.writeto(buf)
buf.seek(0)
with pytest.raises(IndexError, match="No data in HDU #2."):
fits.getdata(buf, ext=2)
def test_getdata_ext_not_given_with_data_in_primary(self):
prihdu = fits.PrimaryHDU(data=np.zeros((5, 5), dtype=int))
exthdu1 = fits.ImageHDU(data=None)
exthdu2 = fits.ImageHDU(data=None)
hdulist = fits.HDUList([prihdu, exthdu1, exthdu2])
buf = io.BytesIO()
hdulist.writeto(buf)
buf.seek(0)
data = fits.getdata(buf)
assert data[0, 0] == 0
def test_getdata_ext_not_given_with_data_in_ext(self):
# tests fallback mechanism
prihdu = fits.PrimaryHDU(data=None)
exthdu1 = fits.ImageHDU(data=np.ones((5, 5), dtype=int))
exthdu2 = fits.ImageHDU(data=None)
hdulist = fits.HDUList([prihdu, exthdu1, exthdu2])
buf = io.BytesIO()
hdulist.writeto(buf)
buf.seek(0)
data = fits.getdata(buf)
assert data[0, 0] == 1
def test_getdata_ext_not_given_nodata_any(self):
# tests exception raised when there is no data in either
# Primary HDU or first extension HDU
prihdu = fits.PrimaryHDU(data=None)
exthdu1 = fits.ImageHDU(data=None)
exthdu2 = fits.ImageHDU(data=np.ones((5, 5), dtype=int))
hdulist = fits.HDUList([prihdu, exthdu1, exthdu2])
buf = io.BytesIO()
hdulist.writeto(buf)
buf.seek(0)
with pytest.raises(
IndexError, match="No data in either Primary or first extension HDUs."
):
fits.getdata(buf)
def test_getdata_ext_not_given_nodata_noext(self):
# tests exception raised when there is no data in the
# Primary HDU and there are no extension HDUs
prihdu = fits.PrimaryHDU(data=None)
hdulist = fits.HDUList([prihdu])
buf = io.BytesIO()
hdulist.writeto(buf)
buf.seek(0)
with pytest.raises(
IndexError, match="No data in Primary HDU and no extension HDU found."
):
fits.getdata(buf)
|
TestConvenience
|
python
|
fluentpython__example-code-2e
|
14-inheritance/uppermixin.py
|
{
"start": 2806,
"end": 2878
}
|
class ____(UpperCaseMixin, collections.UserDict): # <1>
pass
|
UpperDict
|
python
|
jazzband__django-oauth-toolkit
|
oauth2_provider/oauth2_backends.py
|
{
"start": 376,
"end": 9368
}
|
class ____:
"""
Wrapper for oauth Server providing django-specific interfaces.
Meant for things like extracting request data and converting
everything to formats more palatable for oauthlib's Server.
"""
def __init__(self, server=None):
"""
:params server: An instance of oauthlib.oauth2.Server class
"""
validator_class = oauth2_settings.OAUTH2_VALIDATOR_CLASS
validator = validator_class()
server_kwargs = oauth2_settings.server_kwargs
self.server = server or oauth2_settings.OAUTH2_SERVER_CLASS(validator, **server_kwargs)
def _get_escaped_full_path(self, request):
"""
Django considers "safe" some characters that aren't so for oauthlib.
We have to search for them and properly escape.
"""
parsed = list(urlparse(request.get_full_path()))
unsafe = set(c for c in parsed[4]).difference(urlencoded)
for c in unsafe:
parsed[4] = parsed[4].replace(c, quote(c, safe=b""))
return urlunparse(parsed)
def _get_extra_credentials(self, request):
"""
Produce extra credentials for token response. This dictionary will be
merged with the response.
See also: `oauthlib.oauth2.rfc6749.TokenEndpoint.create_token_response`
:param request: The current django.http.HttpRequest object
:return: dictionary of extra credentials or None (default)
"""
return None
def _extract_params(self, request):
"""
Extract parameters from the Django request object.
Such parameters will then be passed to OAuthLib to build its own
Request object. The body should be encoded using OAuthLib urlencoded.
"""
uri = self._get_escaped_full_path(request)
http_method = request.method
headers = self.extract_headers(request)
body = urlencode(self.extract_body(request))
return uri, http_method, body, headers
def extract_headers(self, request):
"""
Extracts headers from the Django request object
:param request: The current django.http.HttpRequest object
:return: a dictionary with OAuthLib needed headers
"""
headers = request.META.copy()
if "wsgi.input" in headers:
del headers["wsgi.input"]
if "wsgi.errors" in headers:
del headers["wsgi.errors"]
if "HTTP_AUTHORIZATION" in headers:
headers["Authorization"] = headers["HTTP_AUTHORIZATION"]
if "CONTENT_TYPE" in headers:
headers["Content-Type"] = headers["CONTENT_TYPE"]
# Add Access-Control-Allow-Origin header to the token endpoint response for authentication code grant,
# if the origin is allowed by RequestValidator.is_origin_allowed.
# https://github.com/oauthlib/oauthlib/pull/791
if "HTTP_ORIGIN" in headers:
headers["Origin"] = headers["HTTP_ORIGIN"]
if request.is_secure():
headers["X_DJANGO_OAUTH_TOOLKIT_SECURE"] = "1"
elif "X_DJANGO_OAUTH_TOOLKIT_SECURE" in headers:
del headers["X_DJANGO_OAUTH_TOOLKIT_SECURE"]
return headers
def extract_body(self, request):
"""
Extracts the POST body from the Django request object
:param request: The current django.http.HttpRequest object
:return: provided POST parameters
"""
return request.POST.items()
def validate_authorization_request(self, request):
"""
A wrapper method that calls validate_authorization_request on `server_class` instance.
:param request: The current django.http.HttpRequest object
"""
try:
uri, http_method, body, headers = self._extract_params(request)
scopes, credentials = self.server.validate_authorization_request(
uri, http_method=http_method, body=body, headers=headers
)
return scopes, credentials
except oauth2.FatalClientError as error:
raise FatalClientError(error=error)
except oauth2.OAuth2Error as error:
raise OAuthToolkitError(error=error)
def create_authorization_response(self, request, scopes, credentials, allow):
"""
A wrapper method that calls create_authorization_response on `server_class`
instance.
:param request: The current django.http.HttpRequest object
:param scopes: A list of provided scopes
:param credentials: Authorization credentials dictionary containing
`client_id`, `state`, `redirect_uri`, `response_type`
:param allow: True if the user authorize the client, otherwise False
"""
try:
if not allow:
raise oauth2.AccessDeniedError(state=credentials.get("state", None))
# add current user to credentials. this will be used by OAUTH2_VALIDATOR_CLASS
credentials["user"] = request.user
request_uri, http_method, _, request_headers = self._extract_params(request)
headers, body, status = self.server.create_authorization_response(
uri=request_uri,
http_method=http_method,
headers=request_headers,
scopes=scopes,
credentials=credentials,
)
uri = headers.get("Location", None)
return uri, headers, body, status
except oauth2.FatalClientError as error:
raise FatalClientError(error=error, redirect_uri=credentials["redirect_uri"])
except oauth2.OAuth2Error as error:
raise OAuthToolkitError(error=error, redirect_uri=credentials["redirect_uri"])
def create_device_authorization_response(self, request: HttpRequest):
uri, http_method, body, headers = self._extract_params(request)
try:
headers, body, status = self.server.create_device_authorization_response(
uri, http_method, body, headers
)
return headers, body, status
except OAuth2Error as exc:
return exc.headers, exc.json, exc.status_code
def create_token_response(self, request):
"""
A wrapper method that calls create_token_response on `server_class` instance.
:param request: The current django.http.HttpRequest object
"""
uri, http_method, body, headers = self._extract_params(request)
extra_credentials = self._get_extra_credentials(request)
try:
headers, body, status = self.server.create_token_response(
uri, http_method, body, headers, extra_credentials
)
uri = headers.get("Location", None)
return uri, headers, body, status
except OAuth2Error as exc:
return None, exc.headers, exc.json, exc.status_code
def create_revocation_response(self, request):
"""
A wrapper method that calls create_revocation_response on a
`server_class` instance.
:param request: The current django.http.HttpRequest object
"""
uri, http_method, body, headers = self._extract_params(request)
headers, body, status = self.server.create_revocation_response(uri, http_method, body, headers)
uri = headers.get("Location", None)
return uri, headers, body, status
def create_userinfo_response(self, request):
"""
A wrapper method that calls create_userinfo_response on a
`server_class` instance.
:param request: The current django.http.HttpRequest object
"""
uri, http_method, body, headers = self._extract_params(request)
try:
headers, body, status = self.server.create_userinfo_response(uri, http_method, body, headers)
uri = headers.get("Location", None)
return uri, headers, body, status
except OAuth2Error as exc:
return None, exc.headers, exc.json, exc.status_code
def verify_request(self, request, scopes):
"""
A wrapper method that calls verify_request on `server_class` instance.
:param request: The current django.http.HttpRequest object
:param scopes: A list of scopes required to verify so that request is verified
"""
uri, http_method, body, headers = self._extract_params(request)
valid, r = self.server.verify_request(uri, http_method, body, headers, scopes=scopes)
return valid, r
def authenticate_client(self, request):
"""Wrapper to call `authenticate_client` on `server_class` instance.
:param request: The current django.http.HttpRequest object
"""
uri, http_method, body, headers = self._extract_params(request)
oauth_request = OauthlibRequest(uri, http_method, body, headers)
return self.server.request_validator.authenticate_client(oauth_request)
|
OAuthLibCore
|
python
|
sympy__sympy
|
sympy/core/containers.py
|
{
"start": 606,
"end": 6060
}
|
class ____(Basic):
"""
Wrapper around the builtin tuple object.
Explanation
===========
The Tuple is a subclass of Basic, so that it works well in the
SymPy framework. The wrapped tuple is available as self.args, but
you can also access elements or slices with [:] syntax.
Parameters
==========
sympify : bool
If ``False``, ``sympify`` is not called on ``args``. This
can be used for speedups for very large tuples where the
elements are known to already be SymPy objects.
Examples
========
>>> from sympy import Tuple, symbols
>>> a, b, c, d = symbols('a b c d')
>>> Tuple(a, b, c)[1:]
(b, c)
>>> Tuple(a, b, c).subs(a, d)
(d, b, c)
"""
def __new__(cls, *args, **kwargs):
if kwargs.get('sympify', True):
args = (sympify(arg) for arg in args)
obj = Basic.__new__(cls, *args)
return obj
def __getitem__(self, i):
if isinstance(i, slice):
indices = i.indices(len(self))
return Tuple(*(self.args[j] for j in range(*indices)))
return self.args[i]
def __len__(self):
return len(self.args)
def __contains__(self, item):
return item in self.args
def __iter__(self):
return iter(self.args)
def __add__(self, other):
if isinstance(other, Tuple):
return Tuple(*(self.args + other.args))
elif isinstance(other, tuple):
return Tuple(*(self.args + other))
else:
return NotImplemented
def __radd__(self, other):
if isinstance(other, Tuple):
return Tuple(*(other.args + self.args))
elif isinstance(other, tuple):
return Tuple(*(other + self.args))
else:
return NotImplemented
def __mul__(self, other):
try:
n = as_int(other)
except ValueError:
raise TypeError("Can't multiply sequence by non-integer of type '%s'" % type(other))
return self.func(*(self.args*n))
__rmul__ = __mul__
def __eq__(self, other):
if isinstance(other, Basic):
return super().__eq__(other)
return self.args == other
def __ne__(self, other):
if isinstance(other, Basic):
return super().__ne__(other)
return self.args != other
def __hash__(self):
return hash(self.args)
def _to_mpmath(self, prec):
return tuple(a._to_mpmath(prec) for a in self.args)
def __lt__(self, other):
return _sympify(self.args < other.args)
def __le__(self, other):
return _sympify(self.args <= other.args)
# XXX: Basic defines count() as something different, so we can't
# redefine it here. Originally this lead to cse() test failure.
def tuple_count(self, value) -> int:
"""Return number of occurrences of value."""
return self.args.count(value)
def index(self, value, start=None, stop=None):
"""Searches and returns the first index of the value."""
# XXX: One would expect:
#
# return self.args.index(value, start, stop)
#
# here. Any trouble with that? Yes:
#
# >>> (1,).index(1, None, None)
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# TypeError: slice indices must be integers or None or have an __index__ method
#
# See: http://bugs.python.org/issue13340
if start is None and stop is None:
return self.args.index(value)
elif stop is None:
return self.args.index(value, start)
else:
return self.args.index(value, start, stop)
@property
def kind(self):
"""
The kind of a Tuple instance.
The kind of a Tuple is always of :class:`TupleKind` but
parametrised by the number of elements and the kind of each element.
Examples
========
>>> from sympy import Tuple, Matrix
>>> Tuple(1, 2).kind
TupleKind(NumberKind, NumberKind)
>>> Tuple(Matrix([1, 2]), 1).kind
TupleKind(MatrixKind(NumberKind), NumberKind)
>>> Tuple(1, 2).kind.element_kind
(NumberKind, NumberKind)
See Also
========
sympy.matrices.kind.MatrixKind
sympy.core.kind.NumberKind
"""
return TupleKind(*(i.kind for i in self.args))
_sympy_converter[tuple] = lambda tup: Tuple(*tup)
def tuple_wrapper(method):
"""
Decorator that converts any tuple in the function arguments into a Tuple.
Explanation
===========
The motivation for this is to provide simple user interfaces. The user can
call a function with regular tuples in the argument, and the wrapper will
convert them to Tuples before handing them to the function.
Explanation
===========
>>> from sympy.core.containers import tuple_wrapper
>>> def f(*args):
... return args
>>> g = tuple_wrapper(f)
The decorated function g sees only the Tuple argument:
>>> g(0, (1, 2), 3)
(0, (1, 2), 3)
"""
def wrap_tuples(*args, **kw_args):
newargs = []
for arg in args:
if isinstance(arg, tuple):
newargs.append(Tuple(*arg))
else:
newargs.append(arg)
return method(*newargs, **kw_args)
return wrap_tuples
|
Tuple
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/declarative_automation/operands/operands.py
|
{
"start": 7158,
"end": 7493
}
|
class ____(SubsetAutomationCondition):
@property
def name(self) -> str:
return "newly_requested"
def compute_subset(self, context: AutomationContext) -> EntitySubset:
return context.get_previous_requested_subset(context.key) or context.get_empty_subset()
@whitelist_for_serdes
@record
|
NewlyRequestedCondition
|
python
|
langchain-ai__langchain
|
libs/langchain/langchain_classic/agents/react/base.py
|
{
"start": 1240,
"end": 2839
}
|
class ____(Agent):
"""Agent for the ReAct chain."""
output_parser: AgentOutputParser = Field(default_factory=ReActOutputParser)
@classmethod
@override
def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser:
return ReActOutputParser()
@property
def _agent_type(self) -> str:
"""Return Identifier of an agent type."""
return AgentType.REACT_DOCSTORE
@classmethod
@override
def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate:
"""Return default prompt."""
return WIKI_PROMPT
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
validate_tools_single_input(cls.__name__, tools)
super()._validate_tools(tools)
if len(tools) != len(_LOOKUP_AND_SEARCH_TOOLS):
msg = f"Exactly two tools must be specified, but got {tools}"
raise ValueError(msg)
tool_names = {tool.name for tool in tools}
if tool_names != _LOOKUP_AND_SEARCH_TOOLS:
msg = f"Tool names should be Lookup and Search, got {tool_names}"
raise ValueError(msg)
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
return "Observation: "
@property
def _stop(self) -> list[str]:
return ["\nObservation:"]
@property
def llm_prefix(self) -> str:
"""Prefix to append the LLM call with."""
return "Thought:"
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
)
|
ReActDocstoreAgent
|
python
|
PrefectHQ__prefect
|
tests/custom_types/test_self_validating_types.py
|
{
"start": 273,
"end": 1053
}
|
class ____:
@pytest.mark.parametrize(
"integer_type,valid_value",
[
(PositiveInteger, 1),
(NonNegativeInteger, 0),
],
)
def test_valid_integer(self, integer_type, valid_value):
class Model(BaseModel):
value: integer_type
m = Model(value=valid_value)
assert m.value == valid_value
@pytest.mark.parametrize(
"integer_type,invalid_value",
[
(PositiveInteger, 0),
(NonNegativeInteger, -1),
],
)
def test_invalid_integer(self, integer_type, invalid_value):
class Model(BaseModel):
value: integer_type
with pytest.raises(ValidationError):
Model(value=invalid_value)
|
TestConstrainedIntegers
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximum-elegance-of-a-k-length-subsequence.py
|
{
"start": 1220,
"end": 3321
}
|
class ____(object):
def findMaximumElegance(self, items, k):
"""
:type items: List[List[int]]
:type k: int
:rtype: int
"""
def nth_element(nums, n, left=0, compare=lambda a, b: a < b):
def tri_partition(nums, left, right, target, compare):
mid = left
while mid <= right:
if nums[mid] == target:
mid += 1
elif compare(nums[mid], target):
nums[left], nums[mid] = nums[mid], nums[left]
left += 1
mid += 1
else:
nums[mid], nums[right] = nums[right], nums[mid]
right -= 1
return left, right
right = len(nums)-1
while left <= right:
pivot_idx = random.randint(left, right)
pivot_left, pivot_right = tri_partition(nums, left, right, nums[pivot_idx], compare)
if pivot_left <= n <= pivot_right:
return
elif pivot_left > n:
right = pivot_left-1
else: # pivot_right < n.
left = pivot_right+1
def nlargest(k, nums):
nth_element(nums, k-1, compare=lambda a, b: a > b)
return sorted(nums[:k], reverse=True)
curr = 0
lookup = set()
stk = []
for p, c in nlargest(k, items):
if c in lookup:
stk.append(p)
curr += p
lookup.add(c)
lookup2 = collections.defaultdict(int)
for p, c in items:
if c in lookup:
continue
lookup2[c] = max(lookup2[c], p)
l = len(lookup)
result = curr+l**2
for p in nlargest(min(len(stk), len(lookup2)), lookup2.values()):
curr += p-stk.pop()
l += 1
result = max(result, curr+l**2)
return result
# Time: O(nlogn)
# Space: O(k)
# sort, greedy
|
Solution2
|
python
|
doocs__leetcode
|
solution/2500-2599/2593.Find Score of an Array After Marking All Elements/Solution2.py
|
{
"start": 0,
"end": 340
}
|
class ____:
def findScore(self, nums: List[int]) -> int:
n = len(nums)
vis = [False] * (n + 2)
idx = sorted(range(n), key=lambda i: (nums[i], i))
ans = 0
for i in idx:
if not vis[i + 1]:
ans += nums[i]
vis[i] = vis[i + 2] = True
return ans
|
Solution
|
python
|
pola-rs__polars
|
py-polars/src/polars/datatypes/classes.py
|
{
"start": 10634,
"end": 11262
}
|
class ____(FloatType):
"""16-bit floating point type.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
.. warning::
Regular computing platforms do not natively support `Float16` operations,
and compute operations on `Float16` will be significantly slower as a result
than operation on :class:`Float32` or :class:`Float64`.
As such, it is recommended to cast to `Float32` before doing any compute
operations, and cast back to `Float16` afterward if needed.
"""
|
Float16
|
python
|
wandb__wandb
|
wandb/sdk/data_types/_dtypes.py
|
{
"start": 11429,
"end": 11519
}
|
class ____(Type):
name = "string"
types: t.ClassVar[t.List[type]] = [str]
|
StringType
|
python
|
apache__airflow
|
helm-tests/tests/helm_tests/other/test_flower.py
|
{
"start": 18191,
"end": 22514
}
|
class ____:
"""Tests flower service."""
@pytest.mark.parametrize(
("executor", "flower_enabled", "created"),
[
("CeleryExecutor", False, False),
("CeleryKubernetesExecutor", False, False),
("KubernetesExecutor", False, False),
("CeleryExecutor", True, True),
("CeleryKubernetesExecutor", True, True),
("KubernetesExecutor", True, False),
],
)
def test_create_flower(self, executor, flower_enabled, created):
docs = render_chart(
values={"executor": executor, "flower": {"enabled": flower_enabled}},
show_only=["templates/flower/flower-service.yaml"],
)
assert bool(docs) is created
if created:
assert jmespath.search("metadata.name", docs[0]) == "release-name-flower"
def test_default_service(self):
docs = render_chart(
values={"flower": {"enabled": True}},
show_only=["templates/flower/flower-service.yaml"],
)
assert jmespath.search("metadata.name", docs[0]) == "release-name-flower"
assert jmespath.search("metadata.annotations", docs[0]) is None
assert jmespath.search("spec.selector", docs[0]) == {
"tier": "airflow",
"component": "flower",
"release": "release-name",
}
assert jmespath.search("spec.type", docs[0]) == "ClusterIP"
assert {"name": "flower-ui", "port": 5555} in jmespath.search("spec.ports", docs[0])
def test_overrides(self):
docs = render_chart(
values={
"ports": {"flowerUI": 9000},
"flower": {
"enabled": True,
"service": {
"type": "LoadBalancer",
"loadBalancerIP": "127.0.0.1",
"annotations": {"foo": "bar"},
"loadBalancerSourceRanges": ["10.123.0.0/16"],
},
},
},
show_only=["templates/flower/flower-service.yaml"],
)
assert jmespath.search("metadata.annotations", docs[0]) == {"foo": "bar"}
assert jmespath.search("spec.type", docs[0]) == "LoadBalancer"
assert {"name": "flower-ui", "port": 9000} in jmespath.search("spec.ports", docs[0])
assert jmespath.search("spec.loadBalancerIP", docs[0]) == "127.0.0.1"
assert jmespath.search("spec.loadBalancerSourceRanges", docs[0]) == ["10.123.0.0/16"]
@pytest.mark.parametrize(
("ports", "expected_ports"),
[
([{"port": 8888}], [{"port": 8888}]), # name is optional with a single port
(
[{"name": "{{ .Release.Name }}", "protocol": "UDP", "port": "{{ .Values.ports.flowerUI }}"}],
[{"name": "release-name", "protocol": "UDP", "port": 5555}],
),
([{"name": "only_sidecar", "port": "{{ int 9000 }}"}], [{"name": "only_sidecar", "port": 9000}]),
(
[
{"name": "flower-ui", "port": "{{ .Values.ports.flowerUI }}"},
{"name": "sidecar", "port": 80, "targetPort": "sidecar"},
],
[
{"name": "flower-ui", "port": 5555},
{"name": "sidecar", "port": 80, "targetPort": "sidecar"},
],
),
],
)
def test_ports_overrides(self, ports, expected_ports):
docs = render_chart(
values={
"flower": {"enabled": True, "service": {"ports": ports}},
},
show_only=["templates/flower/flower-service.yaml"],
)
assert expected_ports == jmespath.search("spec.ports", docs[0])
def test_should_add_component_specific_labels(self):
docs = render_chart(
values={
"flower": {
"enabled": True,
"labels": {"test_label": "test_label_value"},
},
},
show_only=["templates/flower/flower-service.yaml"],
)
assert "test_label" in jmespath.search("metadata.labels", docs[0])
assert jmespath.search("metadata.labels", docs[0])["test_label"] == "test_label_value"
|
TestFlowerService
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-aws/dagster_aws_tests/pipes_tests/fake_glue.py
|
{
"start": 344,
"end": 7242
}
|
class ____:
def __init__(
self,
aws_endpoint_url: str, # usually received from moto
s3_client: boto3.client, # pyright: ignore (reportGeneralTypeIssues)
glue_client: boto3.client, # pyright: ignore (reportGeneralTypeIssues)
pipes_messages_backend: Literal["s3", "cloudwatch"],
cloudwatch_client: Optional[boto3.client] = None, # pyright: ignore (reportGeneralTypeIssues)
):
"""This class wraps moto3 clients for S3 and Glue, and provides a way to "run" Glue jobs locally.
This is necessary because moto3 does not actually run anything when you start a Glue job, so we won't be able
to receive any Dagster messages from it.
If pipes_messages_backend is configured to be CloudWatch, it also uploads stderr and stdout logs to CloudWatch
as if this has been done by Glue.
Once the job is submitted, it is being executed in a separate process to mimic Glue behavior.
Once the job status is requested, the process is checked for its status and the result is returned.
"""
self.aws_endpoint_url = aws_endpoint_url
self.s3_client = s3_client
self.glue_client = glue_client
self.pipes_messages_backend = pipes_messages_backend
self.cloudwatch_client = cloudwatch_client
self.process = None # jobs will be executed in a separate process
self._job_runs: dict[str, SimulatedJobRun] = {} # mapping of JobRunId to SimulatedJobRun
@property
def meta(self):
return self.glue_client.meta
def get_job_run(self, JobName: str, RunId: str):
# get original response
response = self.glue_client.get_job_run(JobName=JobName, RunId=RunId)
# check if status override is set
simulated_job_run = self._job_runs[RunId]
if simulated_job_run.stopped:
response["JobRun"]["JobRunState"] = "STOPPED"
return response
# check if popen has completed
if simulated_job_run.popen.poll() is not None:
simulated_job_run.popen.wait()
# check status code
if simulated_job_run.popen.returncode == 0:
response["JobRun"]["JobRunState"] = "SUCCEEDED"
else:
response["JobRun"]["JobRunState"] = "FAILED"
_, stderr = simulated_job_run.popen.communicate()
response["JobRun"]["ErrorMessage"] = stderr.decode()
# upload logs to cloudwatch
if self.pipes_messages_backend == "cloudwatch":
self._upload_logs_to_cloudwatch(RunId)
else:
response["JobRun"]["JobRunState"] = "RUNNING"
return response
def start_job_run(self, JobName: str, Arguments: Optional[dict[str, str]], **kwargs):
params = {
"JobName": JobName,
}
if Arguments:
params["Arguments"] = Arguments # type: ignore
script_s3_path = self.glue_client.get_job(JobName=JobName)["Job"]["Command"][
"ScriptLocation"
]
bucket = script_s3_path.split("/")[2]
key = "/".join(script_s3_path.split("/")[3:])
# mock the job run with moto
response = self.glue_client.start_job_run(**params)
job_run_id = response["JobRunId"]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
f = tempfile.NamedTemporaryFile(
delete=False
) # we will close this file later during garbage collection
# load the S3 script to a local file
self.s3_client.download_file(bucket, key, f.name)
# execute the script in a separate process
args = []
for key, val in (Arguments or {}).items():
args.append(key)
args.append(val)
popen = Popen(
[sys.executable, f.name, *args],
env={
"AWS_ENDPOINT_URL": self.aws_endpoint_url,
"TESTING_PIPES_MESSAGES_BACKEND": self.pipes_messages_backend,
},
stdout=PIPE,
stderr=PIPE,
)
# record execution metadata for later use
self._job_runs[job_run_id] = SimulatedJobRun(
popen=popen,
job_run_id=job_run_id,
log_group=self.glue_client.get_job_run(JobName=JobName, RunId=job_run_id)["JobRun"][
"LogGroupName"
],
local_script=f,
)
return response
def batch_stop_job_run(self, JobName: str, JobRunIds: list[str]):
for job_run_id in JobRunIds:
if simulated_job_run := self._job_runs.get(job_run_id):
simulated_job_run.popen.terminate()
simulated_job_run.stopped = True
self._upload_logs_to_cloudwatch(job_run_id)
def _upload_logs_to_cloudwatch(self, job_run_id: str):
log_group = self._job_runs[job_run_id].log_group
stdout, stderr = self._job_runs[job_run_id].popen.communicate()
if self.pipes_messages_backend == "cloudwatch":
assert self.cloudwatch_client is not None, (
"cloudwatch_client has to be provided with cloudwatch messages backend"
)
assert self.cloudwatch_client is not None, (
"cloudwatch_client has to be provided with cloudwatch messages backend"
)
try:
self.cloudwatch_client.create_log_group(
logGroupName=f"{log_group}/output",
)
except self.cloudwatch_client.exceptions.ResourceAlreadyExistsException:
pass
try:
self.cloudwatch_client.create_log_stream(
logGroupName=f"{log_group}/output",
logStreamName=job_run_id,
)
except self.cloudwatch_client.exceptions.ResourceAlreadyExistsException:
pass
for out in [stderr, stdout]: # Glue routes both stderr and stdout to /output
for line in out.decode().split(
"\n"
): # uploading log lines one by one is good enough for tests
if line:
self.cloudwatch_client.put_log_events(
logGroupName=f"{log_group}/output",
logStreamName=job_run_id,
logEvents=[
{"timestamp": int(time.time() * 1000), "message": str(line)}
],
)
time.sleep(
0.01
) # make sure the logs will be properly filtered by ms timestamp when accessed next time
def __del__(self):
# cleanup local script paths
for job_run in self._job_runs.values():
job_run.local_script.close()
|
LocalGlueMockClient
|
python
|
pola-rs__polars
|
py-polars/tests/unit/io/test_iceberg.py
|
{
"start": 2170,
"end": 4721
}
|
class ____:
"""Test coverage for `iceberg` scan ops."""
def test_scan_iceberg_plain(self, iceberg_path: str) -> None:
q = pl.scan_iceberg(iceberg_path)
assert len(q.collect()) == 3
assert q.collect_schema() == {
"id": pl.Int32,
"str": pl.String,
"ts": pl.Datetime(time_unit="us", time_zone=None),
}
def test_scan_iceberg_snapshot_id(self, iceberg_path: str) -> None:
q = pl.scan_iceberg(iceberg_path, snapshot_id=7051579356916758811)
assert len(q.collect()) == 3
assert q.collect_schema() == {
"id": pl.Int32,
"str": pl.String,
"ts": pl.Datetime(time_unit="us", time_zone=None),
}
def test_scan_iceberg_snapshot_id_not_found(self, iceberg_path: str) -> None:
with pytest.raises(ValueError, match="snapshot ID not found"):
pl.scan_iceberg(iceberg_path, snapshot_id=1234567890).collect()
def test_scan_iceberg_filter_on_partition(self, iceberg_path: str) -> None:
ts1 = datetime(2023, 3, 1, 18, 15)
ts2 = datetime(2023, 3, 1, 19, 25)
ts3 = datetime(2023, 3, 2, 22, 0)
lf = pl.scan_iceberg(iceberg_path)
res = lf.filter(pl.col("ts") >= ts2)
assert len(res.collect()) == 2
res = lf.filter(pl.col("ts") > ts2).select(pl.col("id"))
assert res.collect().rows() == [(3,)]
res = lf.filter(pl.col("ts") <= ts2).select("id", "ts")
assert res.collect().rows(named=True) == [
{"id": 1, "ts": ts1},
{"id": 2, "ts": ts2},
]
res = lf.filter(pl.col("ts") > ts3)
assert len(res.collect()) == 0
for constraint in (
(pl.col("ts") == ts1) | (pl.col("ts") == ts3),
pl.col("ts").is_in([ts1, ts3]),
):
res = lf.filter(constraint).select("id")
assert res.collect().rows() == [(1,), (3,)]
def test_scan_iceberg_filter_on_column(self, iceberg_path: str) -> None:
lf = pl.scan_iceberg(iceberg_path)
res = lf.filter(pl.col("id") < 2)
assert res.collect().rows() == [(1, "1", datetime(2023, 3, 1, 18, 15))]
res = lf.filter(pl.col("id") == 2)
assert res.collect().rows() == [(2, "2", datetime(2023, 3, 1, 19, 25))]
res = lf.filter(pl.col("id").is_in([1, 3]))
assert res.collect().rows() == [
(1, "1", datetime(2023, 3, 1, 18, 15)),
(3, "3", datetime(2023, 3, 2, 22, 0)),
]
@pytest.mark.ci_only
|
TestIcebergScanIO
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 222780,
"end": 223059
}
|
class ____(VegaLiteSchema):
"""ConditionalAxisPropertynumbernull schema wrapper."""
_schema = {"$ref": "#/definitions/ConditionalAxisProperty<(number|null)>"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
|
ConditionalAxisPropertynumbernull
|
python
|
getsentry__sentry
|
src/sentry/issues/endpoints/group_events.py
|
{
"start": 1838,
"end": 1938
}
|
class ____(Exception):
pass
@extend_schema(tags=["Events"])
@region_silo_endpoint
|
GroupEventsError
|
python
|
redis__redis-py
|
tests/test_maint_notifications.py
|
{
"start": 16067,
"end": 18533
}
|
class ____:
"""Test the MaintNotificationsConfig class."""
def test_init_defaults(self):
"""Test MaintNotificationsConfig initialization with defaults."""
config = MaintNotificationsConfig()
assert config.enabled == "auto"
assert config.proactive_reconnect is True
assert config.relaxed_timeout == 10
def test_init_custom_values(self):
"""Test MaintNotificationsConfig initialization with custom values."""
config = MaintNotificationsConfig(
enabled=True, proactive_reconnect=False, relaxed_timeout=30
)
assert config.enabled is True
assert config.proactive_reconnect is False
assert config.relaxed_timeout == 30
def test_repr(self):
"""Test MaintNotificationsConfig string representation."""
config = MaintNotificationsConfig(
enabled=True, proactive_reconnect=False, relaxed_timeout=30
)
repr_str = repr(config)
assert "MaintNotificationsConfig" in repr_str
assert "enabled=True" in repr_str
assert "proactive_reconnect=False" in repr_str
assert "relaxed_timeout=30" in repr_str
def test_is_relaxed_timeouts_enabled_true(self):
"""Test is_relaxed_timeouts_enabled returns True for positive timeout."""
config = MaintNotificationsConfig(relaxed_timeout=20)
assert config.is_relaxed_timeouts_enabled() is True
def test_is_relaxed_timeouts_enabled_false(self):
"""Test is_relaxed_timeouts_enabled returns False for -1 timeout."""
config = MaintNotificationsConfig(relaxed_timeout=-1)
assert config.is_relaxed_timeouts_enabled() is False
def test_is_relaxed_timeouts_enabled_zero(self):
"""Test is_relaxed_timeouts_enabled returns True for zero timeout."""
config = MaintNotificationsConfig(relaxed_timeout=0)
assert config.is_relaxed_timeouts_enabled() is True
def test_is_relaxed_timeouts_enabled_none(self):
"""Test is_relaxed_timeouts_enabled returns True for None timeout."""
config = MaintNotificationsConfig(relaxed_timeout=None)
assert config.is_relaxed_timeouts_enabled() is True
def test_relaxed_timeout_none_is_saved_as_none(self):
"""Test that None value for relaxed_timeout is saved as None."""
config = MaintNotificationsConfig(relaxed_timeout=None)
assert config.relaxed_timeout is None
|
TestMaintNotificationsConfig
|
python
|
openai__openai-python
|
src/openai/resources/fine_tuning/checkpoints/checkpoints.py
|
{
"start": 2674,
"end": 2980
}
|
class ____:
def __init__(self, checkpoints: AsyncCheckpoints) -> None:
self._checkpoints = checkpoints
@cached_property
def permissions(self) -> AsyncPermissionsWithRawResponse:
return AsyncPermissionsWithRawResponse(self._checkpoints.permissions)
|
AsyncCheckpointsWithRawResponse
|
python
|
django__django
|
tests/admin_views/admin.py
|
{
"start": 27419,
"end": 27645
}
|
class ____(admin.ModelAdmin):
form = FormWithoutHiddenField
fieldsets = (
(
None,
{
"fields": (("first", "second"),),
},
),
)
|
EmptyModelVisibleAdmin
|
python
|
huggingface__transformers
|
tests/models/eomt/test_modeling_eomt.py
|
{
"start": 1259,
"end": 3741
}
|
class ____:
def __init__(
self,
parent,
batch_size=2,
is_training=True,
image_size=40,
patch_size=2,
num_queries=5,
num_register_tokens=19,
num_labels=4,
hidden_size=8,
num_attention_heads=2,
num_hidden_layers=2,
):
self.parent = parent
self.batch_size = batch_size
self.is_training = is_training
self.num_queries = num_queries
self.image_size = image_size
self.patch_size = patch_size
self.num_labels = num_labels
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.num_hidden_layers = num_hidden_layers
self.num_register_tokens = num_register_tokens
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 1
def get_config(self):
config = {
"image_size": self.image_size,
"patch_size": self.patch_size,
"num_labels": self.num_labels,
"hidden_size": self.hidden_size,
"num_attention_heads": self.num_attention_heads,
"num_hidden_layers": self.num_hidden_layers,
"num_register_tokens": self.num_register_tokens,
"num_queries": self.num_queries,
"num_blocks": 1,
}
return EomtConfig(**config)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, 3, self.image_size, self.image_size]).to(torch_device)
mask_labels = (
torch.rand([self.batch_size, self.num_labels, self.image_size, self.image_size], device=torch_device) > 0.5
).float()
class_labels = (torch.rand((self.batch_size, self.num_labels), device=torch_device) > 0.5).long()
config = self.get_config()
return config, pixel_values, mask_labels, class_labels
def prepare_config_and_inputs_for_common(self):
config, pixel_values, mask_labels, class_labels = self.prepare_config_and_inputs()
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
def prepare_config_and_inputs_for_training(self):
config, pixel_values, mask_labels, class_labels = self.prepare_config_and_inputs()
inputs_dict = {"pixel_values": pixel_values, "mask_labels": mask_labels, "class_labels": class_labels}
return config, inputs_dict
@require_torch
|
EomtForUniversalSegmentationTester
|
python
|
huggingface__transformers
|
src/transformers/models/resnet/modeling_resnet.py
|
{
"start": 7442,
"end": 9036
}
|
class ____(nn.Module):
def __init__(self, config: ResNetConfig):
super().__init__()
self.stages = nn.ModuleList([])
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
config,
config.embedding_size,
config.hidden_sizes[0],
stride=2 if config.downsample_in_first_stage else 1,
depth=config.depths[0],
)
)
in_out_channels = zip(config.hidden_sizes, config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(in_out_channels, config.depths[1:]):
self.stages.append(ResNetStage(config, in_channels, out_channels, depth=depth))
def forward(
self, hidden_state: Tensor, output_hidden_states: bool = False, return_dict: bool = True
) -> BaseModelOutputWithNoAttention:
hidden_states = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
hidden_states = hidden_states + (hidden_state,)
hidden_state = stage_module(hidden_state)
if output_hidden_states:
hidden_states = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(
last_hidden_state=hidden_state,
hidden_states=hidden_states,
)
@auto_docstring
|
ResNetEncoder
|
python
|
pytorch__pytorch
|
torch/_inductor/codegen/mps.py
|
{
"start": 40276,
"end": 41827
}
|
class ____(SIMDScheduling):
kernel_type = MetalKernel # type: ignore[assignment]
def __init__(self, scheduler: Optional[Scheduler]) -> None:
super().__init__(scheduler)
wrapper = V.graph.wrapper_code
if wrapper is not None:
if not V.graph.cpp_wrapper:
wrapper.header.splice(
"from torch._inductor.runtime.runtime_utils import compile_mps_shader"
)
def define_kernel(
self, src_code: str, node_schedule: list[SchedulerNode], kernel: MetalKernel
) -> str:
wrapper = V.graph.wrapper_code
if src_code in wrapper.src_to_kernel:
kernel_name = wrapper.src_to_kernel[src_code]
else:
# TODO: Merge multiple kernels into a single library
# Either using MultiKernel concept or overriding SIMDScheduling.codegen_node_scheduling
mps_lib_name = f"mps_lib_{wrapper.next_kernel_suffix()}"
kernel_name = f"{mps_lib_name}"
wrapper.src_to_kernel[src_code] = kernel_name
if V.graph.cpp_wrapper:
# For shimified version, generate source constant instead of direct instantiation
src_code = f"const char* {mps_lib_name}_source = " + src_code
origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper)
metadata_comment = f"{origins}\n{detailed_origins}"
wrapper.define_kernel(mps_lib_name, src_code, metadata_comment, gpu=False)
return kernel_name
|
MetalScheduling
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/failing_build/package.py
|
{
"start": 217,
"end": 597
}
|
class ____(Package):
"""This package has a trivial install method that fails."""
homepage = "http://www.example.com/trivial_install"
url = "http://www.unit-test-should-replace-this-url/trivial_install-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
def install(self, spec, prefix):
raise InstallError("Expected failure.")
|
FailingBuild
|
python
|
Textualize__textual
|
tests/snapshot_tests/snapshot_apps/recompose.py
|
{
"start": 577,
"end": 793
}
|
class ____(Horizontal):
progress = reactive(0, recompose=True)
def compose(self) -> ComposeResult:
bar = ProgressBar(100, show_eta=False)
bar.progress = self.progress
yield bar
|
Progress
|
python
|
ray-project__ray
|
python/ray/data/datasource/datasink.py
|
{
"start": 1209,
"end": 3829
}
|
class ____(Generic[WriteReturnType]):
"""Interface for defining write-related logic.
If you want to write data to something that isn't built-in, subclass this class
and call :meth:`~ray.data.Dataset.write_datasink`.
"""
def on_write_start(self) -> None:
"""Callback for when a write job starts.
Use this method to perform setup for write tasks. For example, creating a
staging bucket in S3.
"""
pass
def write(
self,
blocks: Iterable[Block],
ctx: TaskContext,
) -> WriteReturnType:
"""Write blocks. This is used by a single write task.
Args:
blocks: Generator of data blocks.
ctx: ``TaskContext`` for the write task.
Returns:
Result of this write task. When the entire write operator finishes,
All returned values will be passed as `WriteResult.write_returns`
to `Datasink.on_write_complete`.
"""
raise NotImplementedError
def on_write_complete(self, write_result: WriteResult[WriteReturnType]):
"""Callback for when a write job completes.
This can be used to `commit` a write output. This method must
succeed prior to ``write_datasink()`` returning to the user. If this
method fails, then ``on_write_failed()`` is called.
Args:
write_result: Aggregated result of the
Write operator, containing write results and stats.
"""
pass
def on_write_failed(self, error: Exception) -> None:
"""Callback for when a write job fails.
This is called on a best-effort basis on write failures.
Args:
error: The first error encountered.
"""
pass
def get_name(self) -> str:
"""Return a human-readable name for this datasink.
This is used as the names of the write tasks.
"""
name = type(self).__name__
datasink_suffix = "Datasink"
if name.startswith("_"):
name = name[1:]
if name.endswith(datasink_suffix):
name = name[: -len(datasink_suffix)]
return name
@property
def supports_distributed_writes(self) -> bool:
"""If ``False``, only launch write tasks on the driver's node."""
return True
@property
def min_rows_per_write(self) -> Optional[int]:
"""The target number of rows to pass to each :meth:`~ray.data.Datasink.write` call.
If ``None``, Ray Data passes a system-chosen number of rows.
"""
return None
@DeveloperAPI
|
Datasink
|
python
|
gevent__gevent
|
src/gevent/win32util.py
|
{
"start": 1292,
"end": 3637
}
|
class ____(object):
"""
Formatter for Windows error messages.
@ivar winError: A callable which takes one integer error number argument
and returns an L{exceptions.WindowsError} instance for that error (like
L{ctypes.WinError}).
@ivar formatMessage: A callable which takes one integer error number
argument and returns a C{str} giving the message for that error (like
L{win32api.FormatMessage}).
@ivar errorTab: A mapping from integer error numbers to C{str} messages
which correspond to those errors (like L{socket.errorTab}).
"""
def __init__(self, WinError, FormatMessage, errorTab):
self.winError = WinError
self.formatMessage = FormatMessage
self.errorTab = errorTab
@classmethod
def fromEnvironment(cls):
"""
Get as many of the platform-specific error translation objects as
possible and return an instance of C{cls} created with them.
"""
try:
from ctypes import WinError
except ImportError:
WinError = None
try:
from win32api import FormatMessage
except ImportError:
FormatMessage = None
try:
from socket import errorTab
except ImportError:
errorTab = None
return cls(WinError, FormatMessage, errorTab)
def formatError(self, errorcode):
"""
Returns the string associated with a Windows error message, such as the
ones found in socket.error.
Attempts direct lookup against the win32 API via ctypes and then
pywin32 if available), then in the error table in the socket module,
then finally defaulting to C{os.strerror}.
@param errorcode: the Windows error code
@type errorcode: C{int}
@return: The error message string
@rtype: C{str}
"""
if self.winError is not None:
return str(self.winError(errorcode))
if self.formatMessage is not None:
return self.formatMessage(errorcode)
if self.errorTab is not None:
result = self.errorTab.get(errorcode)
if result is not None:
return result
return os.strerror(errorcode)
formatError = _ErrorFormatter.fromEnvironment().formatError
|
_ErrorFormatter
|
python
|
jina-ai__jina
|
jina/clients/base/helper.py
|
{
"start": 4954,
"end": 8928
}
|
class ____(AioHttpClientlet):
"""HTTP Client to be used with the streamer"""
UPDATE_EVENT_PREFIX = 14 # the update event has the following format: "event: update: {document_json}"
async def send_message(self, url, request: 'Request'):
"""Sends a POST request to the server
:param url: the URL where to send the message
:param request: request as dict
:return: send post message
"""
req_dict = request.to_dict()
req_dict['exec_endpoint'] = req_dict['header']['exec_endpoint']
if 'target_executor' in req_dict['header']:
req_dict['target_executor'] = req_dict['header']['target_executor']
for attempt in range(1, self.max_attempts + 1):
try:
request_kwargs = {'url': url}
if not docarray_v2:
request_kwargs['json'] = req_dict
else:
from docarray.base_doc.io.json import orjson_dumps
request_kwargs['data'] = JinaJsonPayload(value=req_dict)
async with self.session.post(**request_kwargs) as response:
try:
r_str = await response.json()
except aiohttp.ContentTypeError:
r_str = await response.text()
r_status = response.status
handle_response_status(r_status, r_str, url)
return r_status, r_str
except (ValueError, ConnectionError, BadClient, aiohttp.ClientError, aiohttp.ClientConnectionError) as err:
self.logger.debug(f'Got an error of type {type(err)}: {err} sending POST to {url} in attempt {attempt}/{self.max_attempts}')
await retry.wait_or_raise_err(
attempt=attempt,
err=err,
max_attempts=self.max_attempts,
backoff_multiplier=self.backoff_multiplier,
initial_backoff=self.initial_backoff,
max_backoff=self.max_backoff,
)
except Exception as exc:
self.logger.debug(
f'Got a non-retried error of type {type(exc)}: {exc} sending POST to {url}')
raise exc
async def send_streaming_message(self, url, doc: 'Document', on: str):
"""Sends a GET SSE request to the server
:param url: the URL where to send the message
:param doc: Request Document
:param on: Request endpoint
:yields: responses
"""
req_dict = doc.to_dict() if hasattr(doc, "to_dict") else doc.dict()
request_kwargs = {
'url': url,
'headers': {'Accept': 'text/event-stream'},
'json': req_dict,
}
async with self.session.get(**request_kwargs) as response:
async for chunk in response.content.iter_any():
events = chunk.split(b'event: ')[1:]
for event in events:
if event.startswith(b'update'):
yield event[self.UPDATE_EVENT_PREFIX :].decode()
elif event.startswith(b'end'):
pass
async def send_dry_run(self, url, **kwargs):
"""Query the dry_run endpoint from Gateway
:param url: the URL where to send the message
:param kwargs: keyword arguments to make sure compatible API with other clients
:return: send get message
"""
return await self.session.get(
url=url, timeout=kwargs.get('timeout', None)
).__aenter__()
async def recv_message(self):
"""Receive message for HTTP (sleep)
:return: await sleep
"""
return await asyncio.sleep(1e10)
async def recv_dry_run(self):
"""Receive dry run response for HTTP (sleep)
:return: await sleep
"""
return await asyncio.sleep(1e10)
|
HTTPClientlet
|
python
|
ray-project__ray
|
rllib/policy/dynamic_tf_policy.py
|
{
"start": 1435,
"end": 39922
}
|
class ____(TFPolicy):
"""A TFPolicy that auto-defines placeholders dynamically at runtime.
Do not sub-class this class directly (neither should you sub-class
TFPolicy), but rather use rllib.policy.tf_policy_template.build_tf_policy
to generate your custom tf (graph-mode or eager) Policy classes.
"""
def __init__(
self,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: AlgorithmConfigDict,
loss_fn: Callable[
[Policy, ModelV2, Type[TFActionDistribution], SampleBatch], TensorType
],
*,
stats_fn: Optional[
Callable[[Policy, SampleBatch], Dict[str, TensorType]]
] = None,
grad_stats_fn: Optional[
Callable[[Policy, SampleBatch, ModelGradients], Dict[str, TensorType]]
] = None,
before_loss_init: Optional[
Callable[
[Policy, gym.spaces.Space, gym.spaces.Space, AlgorithmConfigDict], None
]
] = None,
make_model: Optional[
Callable[
[Policy, gym.spaces.Space, gym.spaces.Space, AlgorithmConfigDict],
ModelV2,
]
] = None,
action_sampler_fn: Optional[
Callable[
[TensorType, List[TensorType]],
Union[
Tuple[TensorType, TensorType],
Tuple[TensorType, TensorType, TensorType, List[TensorType]],
],
]
] = None,
action_distribution_fn: Optional[
Callable[
[Policy, ModelV2, TensorType, TensorType, TensorType],
Tuple[TensorType, type, List[TensorType]],
]
] = None,
existing_inputs: Optional[Dict[str, "tf1.placeholder"]] = None,
existing_model: Optional[ModelV2] = None,
get_batch_divisibility_req: Optional[Callable[[Policy], int]] = None,
obs_include_prev_action_reward=DEPRECATED_VALUE,
):
"""Initializes a DynamicTFPolicy instance.
Initialization of this class occurs in two phases and defines the
static graph.
Phase 1: The model is created and model variables are initialized.
Phase 2: A fake batch of data is created, sent to the trajectory
postprocessor, and then used to create placeholders for the loss
function. The loss and stats functions are initialized with these
placeholders.
Args:
observation_space: Observation space of the policy.
action_space: Action space of the policy.
config: Policy-specific configuration data.
loss_fn: Function that returns a loss tensor for the policy graph.
stats_fn: Optional callable that - given the policy and batch
input tensors - returns a dict mapping str to TF ops.
These ops are fetched from the graph after loss calculations
and the resulting values can be found in the results dict
returned by e.g. `Algorithm.train()` or in tensorboard (if TB
logging is enabled).
grad_stats_fn: Optional callable that - given the policy, batch
input tensors, and calculated loss gradient tensors - returns
a dict mapping str to TF ops. These ops are fetched from the
graph after loss and gradient calculations and the resulting
values can be found in the results dict returned by e.g.
`Algorithm.train()` or in tensorboard (if TB logging is
enabled).
before_loss_init: Optional function to run prior to
loss init that takes the same arguments as __init__.
make_model: Optional function that returns a ModelV2 object
given policy, obs_space, action_space, and policy config.
All policy variables should be created in this function. If not
specified, a default model will be created.
action_sampler_fn: A callable returning either a sampled action and
its log-likelihood or a sampled action, its log-likelihood,
action distribution inputs and updated state given Policy,
ModelV2, observation inputs, explore, and is_training.
Provide `action_sampler_fn` if you would like to have full
control over the action computation step, including the
model forward pass, possible sampling from a distribution,
and exploration logic.
Note: If `action_sampler_fn` is given, `action_distribution_fn`
must be None. If both `action_sampler_fn` and
`action_distribution_fn` are None, RLlib will simply pass
inputs through `self.model` to get distribution inputs, create
the distribution object, sample from it, and apply some
exploration logic to the results.
The callable takes as inputs: Policy, ModelV2, obs_batch,
state_batches (optional), seq_lens (optional),
prev_actions_batch (optional), prev_rewards_batch (optional),
explore, and is_training.
action_distribution_fn: A callable returning distribution inputs
(parameters), a dist-class to generate an action distribution
object from, and internal-state outputs (or an empty list if
not applicable).
Provide `action_distribution_fn` if you would like to only
customize the model forward pass call. The resulting
distribution parameters are then used by RLlib to create a
distribution object, sample from it, and execute any
exploration logic.
Note: If `action_distribution_fn` is given, `action_sampler_fn`
must be None. If both `action_sampler_fn` and
`action_distribution_fn` are None, RLlib will simply pass
inputs through `self.model` to get distribution inputs, create
the distribution object, sample from it, and apply some
exploration logic to the results.
The callable takes as inputs: Policy, ModelV2, input_dict,
explore, timestep, is_training.
existing_inputs: When copying a policy, this specifies an existing
dict of placeholders to use instead of defining new ones.
existing_model: When copying a policy, this specifies an existing
model to clone and share weights with.
get_batch_divisibility_req: Optional callable that returns the
divisibility requirement for sample batches. If None, will
assume a value of 1.
"""
if obs_include_prev_action_reward != DEPRECATED_VALUE:
deprecation_warning(old="obs_include_prev_action_reward", error=True)
self.observation_space = obs_space
self.action_space = action_space
self.config = config
self.framework = "tf"
self._loss_fn = loss_fn
self._stats_fn = stats_fn
self._grad_stats_fn = grad_stats_fn
self._seq_lens = None
self._is_tower = existing_inputs is not None
dist_class = None
if action_sampler_fn or action_distribution_fn:
if not make_model:
raise ValueError(
"`make_model` is required if `action_sampler_fn` OR "
"`action_distribution_fn` is given"
)
else:
dist_class, logit_dim = ModelCatalog.get_action_dist(
action_space, self.config["model"]
)
# Setup self.model.
if existing_model:
if isinstance(existing_model, list):
self.model = existing_model[0]
# TODO: (sven) hack, but works for `target_[q_]?model`.
for i in range(1, len(existing_model)):
setattr(self, existing_model[i][0], existing_model[i][1])
elif make_model:
self.model = make_model(self, obs_space, action_space, config)
else:
self.model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=logit_dim,
model_config=self.config["model"],
framework="tf",
)
# Auto-update model's inference view requirements, if recurrent.
self._update_model_view_requirements_from_init_state()
# Input placeholders already given -> Use these.
if existing_inputs:
self._state_inputs = [
v for k, v in existing_inputs.items() if k.startswith("state_in_")
]
# Placeholder for RNN time-chunk valid lengths.
if self._state_inputs:
self._seq_lens = existing_inputs[SampleBatch.SEQ_LENS]
# Create new input placeholders.
else:
self._state_inputs = [
get_placeholder(
space=vr.space,
time_axis=not isinstance(vr.shift, int),
name=k,
)
for k, vr in self.model.view_requirements.items()
if k.startswith("state_in_")
]
# Placeholder for RNN time-chunk valid lengths.
if self._state_inputs:
self._seq_lens = tf1.placeholder(
dtype=tf.int32, shape=[None], name="seq_lens"
)
# Use default settings.
# Add NEXT_OBS, STATE_IN_0.., and others.
self.view_requirements = self._get_default_view_requirements()
# Combine view_requirements for Model and Policy.
self.view_requirements.update(self.model.view_requirements)
# Disable env-info placeholder.
if SampleBatch.INFOS in self.view_requirements:
self.view_requirements[SampleBatch.INFOS].used_for_training = False
# Setup standard placeholders.
if self._is_tower:
timestep = existing_inputs["timestep"]
explore = False
self._input_dict, self._dummy_batch = self._get_input_dict_and_dummy_batch(
self.view_requirements, existing_inputs
)
else:
if not self.config.get("_disable_action_flattening"):
action_ph = ModelCatalog.get_action_placeholder(action_space)
prev_action_ph = {}
if SampleBatch.PREV_ACTIONS not in self.view_requirements:
prev_action_ph = {
SampleBatch.PREV_ACTIONS: ModelCatalog.get_action_placeholder(
action_space, "prev_action"
)
}
(
self._input_dict,
self._dummy_batch,
) = self._get_input_dict_and_dummy_batch(
self.view_requirements,
dict({SampleBatch.ACTIONS: action_ph}, **prev_action_ph),
)
else:
(
self._input_dict,
self._dummy_batch,
) = self._get_input_dict_and_dummy_batch(self.view_requirements, {})
# Placeholder for (sampling steps) timestep (int).
timestep = tf1.placeholder_with_default(
tf.zeros((), dtype=tf.int64), (), name="timestep"
)
# Placeholder for `is_exploring` flag.
explore = tf1.placeholder_with_default(True, (), name="is_exploring")
# Placeholder for `is_training` flag.
self._input_dict.set_training(self._get_is_training_placeholder())
# Multi-GPU towers do not need any action computing/exploration
# graphs.
sampled_action = None
sampled_action_logp = None
dist_inputs = None
extra_action_fetches = {}
self._state_out = None
if not self._is_tower:
# Create the Exploration object to use for this Policy.
self.exploration = self._create_exploration()
# Fully customized action generation (e.g., custom policy).
if action_sampler_fn:
action_sampler_outputs = action_sampler_fn(
self,
self.model,
obs_batch=self._input_dict[SampleBatch.CUR_OBS],
state_batches=self._state_inputs,
seq_lens=self._seq_lens,
prev_action_batch=self._input_dict.get(SampleBatch.PREV_ACTIONS),
prev_reward_batch=self._input_dict.get(SampleBatch.PREV_REWARDS),
explore=explore,
is_training=self._input_dict.is_training,
)
if len(action_sampler_outputs) == 4:
(
sampled_action,
sampled_action_logp,
dist_inputs,
self._state_out,
) = action_sampler_outputs
else:
dist_inputs = None
self._state_out = []
sampled_action, sampled_action_logp = action_sampler_outputs
# Distribution generation is customized, e.g., DQN, DDPG.
else:
if action_distribution_fn:
# Try new action_distribution_fn signature, supporting
# state_batches and seq_lens.
in_dict = self._input_dict
try:
(
dist_inputs,
dist_class,
self._state_out,
) = action_distribution_fn(
self,
self.model,
input_dict=in_dict,
state_batches=self._state_inputs,
seq_lens=self._seq_lens,
explore=explore,
timestep=timestep,
is_training=in_dict.is_training,
)
# Trying the old way (to stay backward compatible).
# TODO: Remove in future.
except TypeError as e:
if (
"positional argument" in e.args[0]
or "unexpected keyword argument" in e.args[0]
):
(
dist_inputs,
dist_class,
self._state_out,
) = action_distribution_fn(
self,
self.model,
obs_batch=in_dict[SampleBatch.CUR_OBS],
state_batches=self._state_inputs,
seq_lens=self._seq_lens,
prev_action_batch=in_dict.get(SampleBatch.PREV_ACTIONS),
prev_reward_batch=in_dict.get(SampleBatch.PREV_REWARDS),
explore=explore,
is_training=in_dict.is_training,
)
else:
raise e
# Default distribution generation behavior:
# Pass through model. E.g., PG, PPO.
else:
if isinstance(self.model, tf.keras.Model):
dist_inputs, self._state_out, extra_action_fetches = self.model(
self._input_dict
)
else:
dist_inputs, self._state_out = self.model(self._input_dict)
action_dist = dist_class(dist_inputs, self.model)
# Using exploration to get final action (e.g. via sampling).
(
sampled_action,
sampled_action_logp,
) = self.exploration.get_exploration_action(
action_distribution=action_dist, timestep=timestep, explore=explore
)
if dist_inputs is not None:
extra_action_fetches[SampleBatch.ACTION_DIST_INPUTS] = dist_inputs
if sampled_action_logp is not None:
extra_action_fetches[SampleBatch.ACTION_LOGP] = sampled_action_logp
extra_action_fetches[SampleBatch.ACTION_PROB] = tf.exp(
tf.cast(sampled_action_logp, tf.float32)
)
# Phase 1 init.
sess = tf1.get_default_session() or tf1.Session(
config=tf1.ConfigProto(**self.config["tf_session_args"])
)
batch_divisibility_req = (
get_batch_divisibility_req(self)
if callable(get_batch_divisibility_req)
else (get_batch_divisibility_req or 1)
)
prev_action_input = (
self._input_dict[SampleBatch.PREV_ACTIONS]
if SampleBatch.PREV_ACTIONS in self._input_dict.accessed_keys
else None
)
prev_reward_input = (
self._input_dict[SampleBatch.PREV_REWARDS]
if SampleBatch.PREV_REWARDS in self._input_dict.accessed_keys
else None
)
super().__init__(
observation_space=obs_space,
action_space=action_space,
config=config,
sess=sess,
obs_input=self._input_dict[SampleBatch.OBS],
action_input=self._input_dict[SampleBatch.ACTIONS],
sampled_action=sampled_action,
sampled_action_logp=sampled_action_logp,
dist_inputs=dist_inputs,
dist_class=dist_class,
loss=None, # dynamically initialized on run
loss_inputs=[],
model=self.model,
state_inputs=self._state_inputs,
state_outputs=self._state_out,
prev_action_input=prev_action_input,
prev_reward_input=prev_reward_input,
seq_lens=self._seq_lens,
max_seq_len=config["model"]["max_seq_len"],
batch_divisibility_req=batch_divisibility_req,
explore=explore,
timestep=timestep,
)
# Phase 2 init.
if before_loss_init is not None:
before_loss_init(self, obs_space, action_space, config)
if hasattr(self, "_extra_action_fetches"):
self._extra_action_fetches.update(extra_action_fetches)
else:
self._extra_action_fetches = extra_action_fetches
# Loss initialization and model/postprocessing test calls.
if not self._is_tower:
self._initialize_loss_from_dummy_batch(auto_remove_unneeded_view_reqs=True)
# Create MultiGPUTowerStacks, if we have at least one actual
# GPU or >1 CPUs (fake GPUs).
if len(self.devices) > 1 or any("gpu" in d for d in self.devices):
# Per-GPU graph copies created here must share vars with the
# policy. Therefore, `reuse` is set to tf1.AUTO_REUSE because
# Adam nodes are created after all of the device copies are
# created.
with tf1.variable_scope("", reuse=tf1.AUTO_REUSE):
self.multi_gpu_tower_stacks = [
TFMultiGPUTowerStack(policy=self)
for i in range(self.config.get("num_multi_gpu_tower_stacks", 1))
]
# Initialize again after loss and tower init.
self.get_session().run(tf1.global_variables_initializer())
@override(TFPolicy)
def copy(self, existing_inputs: List[Tuple[str, "tf1.placeholder"]]) -> TFPolicy:
"""Creates a copy of self using existing input placeholders."""
flat_loss_inputs = tree.flatten(self._loss_input_dict)
flat_loss_inputs_no_rnn = tree.flatten(self._loss_input_dict_no_rnn)
# Note that there might be RNN state inputs at the end of the list
if len(flat_loss_inputs) != len(existing_inputs):
raise ValueError(
"Tensor list mismatch",
self._loss_input_dict,
self._state_inputs,
existing_inputs,
)
for i, v in enumerate(flat_loss_inputs_no_rnn):
if v.shape.as_list() != existing_inputs[i].shape.as_list():
raise ValueError(
"Tensor shape mismatch", i, v.shape, existing_inputs[i].shape
)
# By convention, the loss inputs are followed by state inputs and then
# the seq len tensor.
rnn_inputs = []
for i in range(len(self._state_inputs)):
rnn_inputs.append(
(
"state_in_{}".format(i),
existing_inputs[len(flat_loss_inputs_no_rnn) + i],
)
)
if rnn_inputs:
rnn_inputs.append((SampleBatch.SEQ_LENS, existing_inputs[-1]))
existing_inputs_unflattened = tree.unflatten_as(
self._loss_input_dict_no_rnn,
existing_inputs[: len(flat_loss_inputs_no_rnn)],
)
input_dict = OrderedDict(
[("is_exploring", self._is_exploring), ("timestep", self._timestep)]
+ [
(k, existing_inputs_unflattened[k])
for i, k in enumerate(self._loss_input_dict_no_rnn.keys())
]
+ rnn_inputs
)
instance = self.__class__(
self.observation_space,
self.action_space,
self.config,
existing_inputs=input_dict,
existing_model=[
self.model,
# Deprecated: Target models should all reside under
# `policy.target_model` now.
("target_q_model", getattr(self, "target_q_model", None)),
("target_model", getattr(self, "target_model", None)),
],
)
instance._loss_input_dict = input_dict
losses = instance._do_loss_init(SampleBatch(input_dict))
loss_inputs = [
(k, existing_inputs_unflattened[k])
for i, k in enumerate(self._loss_input_dict_no_rnn.keys())
]
TFPolicy._initialize_loss(instance, losses, loss_inputs)
if instance._grad_stats_fn:
instance._stats_fetches.update(
instance._grad_stats_fn(instance, input_dict, instance._grads)
)
return instance
@override(Policy)
def get_initial_state(self) -> List[TensorType]:
if self.model:
return self.model.get_initial_state()
else:
return []
@override(Policy)
def load_batch_into_buffer(
self,
batch: SampleBatch,
buffer_index: int = 0,
) -> int:
# Set the is_training flag of the batch.
batch.set_training(True)
# Shortcut for 1 CPU only: Store batch in
# `self._loaded_single_cpu_batch`.
if len(self.devices) == 1 and self.devices[0] == "/cpu:0":
assert buffer_index == 0
self._loaded_single_cpu_batch = batch
return len(batch)
input_dict = self._get_loss_inputs_dict(batch, shuffle=False)
data_keys = tree.flatten(self._loss_input_dict_no_rnn)
if self._state_inputs:
state_keys = self._state_inputs + [self._seq_lens]
else:
state_keys = []
inputs = [input_dict[k] for k in data_keys]
state_inputs = [input_dict[k] for k in state_keys]
return self.multi_gpu_tower_stacks[buffer_index].load_data(
sess=self.get_session(),
inputs=inputs,
state_inputs=state_inputs,
num_grad_updates=batch.num_grad_updates,
)
@override(Policy)
def get_num_samples_loaded_into_buffer(self, buffer_index: int = 0) -> int:
# Shortcut for 1 CPU only: Batch should already be stored in
# `self._loaded_single_cpu_batch`.
if len(self.devices) == 1 and self.devices[0] == "/cpu:0":
assert buffer_index == 0
return (
len(self._loaded_single_cpu_batch)
if self._loaded_single_cpu_batch is not None
else 0
)
return self.multi_gpu_tower_stacks[buffer_index].num_tuples_loaded
@override(Policy)
def learn_on_loaded_batch(self, offset: int = 0, buffer_index: int = 0):
# Shortcut for 1 CPU only: Batch should already be stored in
# `self._loaded_single_cpu_batch`.
if len(self.devices) == 1 and self.devices[0] == "/cpu:0":
assert buffer_index == 0
if self._loaded_single_cpu_batch is None:
raise ValueError(
"Must call Policy.load_batch_into_buffer() before "
"Policy.learn_on_loaded_batch()!"
)
# Get the correct slice of the already loaded batch to use,
# based on offset and batch size.
batch_size = self.config.get("minibatch_size")
if batch_size is None:
batch_size = self.config.get(
"sgd_minibatch_size", self.config["train_batch_size"]
)
if batch_size >= len(self._loaded_single_cpu_batch):
sliced_batch = self._loaded_single_cpu_batch
else:
sliced_batch = self._loaded_single_cpu_batch.slice(
start=offset, end=offset + batch_size
)
return self.learn_on_batch(sliced_batch)
tower_stack = self.multi_gpu_tower_stacks[buffer_index]
results = tower_stack.optimize(self.get_session(), offset)
self.num_grad_updates += 1
results.update(
{
NUM_GRAD_UPDATES_LIFETIME: self.num_grad_updates,
# -1, b/c we have to measure this diff before we do the update above.
DIFF_NUM_GRAD_UPDATES_VS_SAMPLER_POLICY: (
self.num_grad_updates - 1 - (tower_stack.num_grad_updates or 0)
),
}
)
return results
def _get_input_dict_and_dummy_batch(self, view_requirements, existing_inputs):
"""Creates input_dict and dummy_batch for loss initialization.
Used for managing the Policy's input placeholders and for loss
initialization.
Input_dict: Str -> tf.placeholders, dummy_batch: str -> np.arrays.
Args:
view_requirements: The view requirements dict.
existing_inputs (Dict[str, tf.placeholder]): A dict of already
existing placeholders.
Returns:
Tuple[Dict[str, tf.placeholder], Dict[str, np.ndarray]]: The
input_dict/dummy_batch tuple.
"""
input_dict = {}
for view_col, view_req in view_requirements.items():
# Point state_in to the already existing self._state_inputs.
mo = re.match(r"state_in_(\d+)", view_col)
if mo is not None:
input_dict[view_col] = self._state_inputs[int(mo.group(1))]
# State-outs (no placeholders needed).
elif view_col.startswith("state_out_"):
continue
# Skip action dist inputs placeholder (do later).
elif view_col == SampleBatch.ACTION_DIST_INPUTS:
continue
# This is a tower: Input placeholders already exist.
elif view_col in existing_inputs:
input_dict[view_col] = existing_inputs[view_col]
# All others.
else:
time_axis = not isinstance(view_req.shift, int)
if view_req.used_for_training:
# Create a +time-axis placeholder if the shift is not an
# int (range or list of ints).
# Do not flatten actions if action flattening disabled.
if self.config.get("_disable_action_flattening") and view_col in [
SampleBatch.ACTIONS,
SampleBatch.PREV_ACTIONS,
]:
flatten = False
# Do not flatten observations if no preprocessor API used.
elif (
view_col in [SampleBatch.OBS, SampleBatch.NEXT_OBS]
and self.config["_disable_preprocessor_api"]
):
flatten = False
# Flatten everything else.
else:
flatten = True
input_dict[view_col] = get_placeholder(
space=view_req.space,
name=view_col,
time_axis=time_axis,
flatten=flatten,
)
dummy_batch = self._get_dummy_batch_from_view_requirements(batch_size=32)
return SampleBatch(input_dict, seq_lens=self._seq_lens), dummy_batch
@override(Policy)
def _initialize_loss_from_dummy_batch(
self, auto_remove_unneeded_view_reqs: bool = True, stats_fn=None
) -> None:
# Create the optimizer/exploration optimizer here. Some initialization
# steps (e.g. exploration postprocessing) may need this.
if not self._optimizers:
self._optimizers = force_list(self.optimizer())
# Backward compatibility.
self._optimizer = self._optimizers[0]
# Test calls depend on variable init, so initialize model first.
self.get_session().run(tf1.global_variables_initializer())
# Fields that have not been accessed are not needed for action
# computations -> Tag them as `used_for_compute_actions=False`.
for key, view_req in self.view_requirements.items():
if (
not key.startswith("state_in_")
and key not in self._input_dict.accessed_keys
):
view_req.used_for_compute_actions = False
for key, value in self._extra_action_fetches.items():
self._dummy_batch[key] = get_dummy_batch_for_space(
gym.spaces.Box(
-1.0, 1.0, shape=value.shape.as_list()[1:], dtype=value.dtype.name
),
batch_size=len(self._dummy_batch),
)
self._input_dict[key] = get_placeholder(value=value, name=key)
if key not in self.view_requirements:
logger.info("Adding extra-action-fetch `{}` to view-reqs.".format(key))
self.view_requirements[key] = ViewRequirement(
space=gym.spaces.Box(
-1.0,
1.0,
shape=value.shape.as_list()[1:],
dtype=value.dtype.name,
),
used_for_compute_actions=False,
)
dummy_batch = self._dummy_batch
logger.info("Testing `postprocess_trajectory` w/ dummy batch.")
self.exploration.postprocess_trajectory(self, dummy_batch, self.get_session())
_ = self.postprocess_trajectory(dummy_batch)
# Add new columns automatically to (loss) input_dict.
for key in dummy_batch.added_keys:
if key not in self._input_dict:
self._input_dict[key] = get_placeholder(
value=dummy_batch[key], name=key
)
if key not in self.view_requirements:
self.view_requirements[key] = ViewRequirement(
space=gym.spaces.Box(
-1.0,
1.0,
shape=dummy_batch[key].shape[1:],
dtype=dummy_batch[key].dtype,
),
used_for_compute_actions=False,
)
train_batch = SampleBatch(
dict(self._input_dict, **self._loss_input_dict),
_is_training=True,
)
if self._state_inputs:
train_batch[SampleBatch.SEQ_LENS] = self._seq_lens
self._loss_input_dict.update(
{SampleBatch.SEQ_LENS: train_batch[SampleBatch.SEQ_LENS]}
)
self._loss_input_dict.update(dict(train_batch))
if log_once("loss_init"):
logger.debug(
"Initializing loss function with dummy input:\n\n{}\n".format(
summarize(train_batch)
)
)
losses = self._do_loss_init(train_batch)
all_accessed_keys = (
train_batch.accessed_keys
| dummy_batch.accessed_keys
| dummy_batch.added_keys
| set(self.model.view_requirements.keys())
)
TFPolicy._initialize_loss(
self,
losses,
[(k, v) for k, v in train_batch.items() if k in all_accessed_keys]
+ (
[(SampleBatch.SEQ_LENS, train_batch[SampleBatch.SEQ_LENS])]
if SampleBatch.SEQ_LENS in train_batch
else []
),
)
if "is_training" in self._loss_input_dict:
del self._loss_input_dict["is_training"]
# Call the grads stats fn.
# TODO: (sven) rename to simply stats_fn to match eager and torch.
if self._grad_stats_fn:
self._stats_fetches.update(
self._grad_stats_fn(self, train_batch, self._grads)
)
# Add new columns automatically to view-reqs.
if auto_remove_unneeded_view_reqs:
# Add those needed for postprocessing and training.
all_accessed_keys = train_batch.accessed_keys | dummy_batch.accessed_keys
# Tag those only needed for post-processing (with some exceptions).
for key in dummy_batch.accessed_keys:
if (
key not in train_batch.accessed_keys
and key not in self.model.view_requirements
and key
not in [
SampleBatch.EPS_ID,
SampleBatch.AGENT_INDEX,
SampleBatch.UNROLL_ID,
SampleBatch.TERMINATEDS,
SampleBatch.TRUNCATEDS,
SampleBatch.REWARDS,
SampleBatch.INFOS,
SampleBatch.T,
SampleBatch.OBS_EMBEDS,
]
):
if key in self.view_requirements:
self.view_requirements[key].used_for_training = False
if key in self._loss_input_dict:
del self._loss_input_dict[key]
# Remove those not needed at all (leave those that are needed
# by Sampler to properly execute sample collection).
# Also always leave TERMINATEDS, TRUNCATEDS, REWARDS, and INFOS,
# no matter what.
for key in list(self.view_requirements.keys()):
if (
key not in all_accessed_keys
and key
not in [
SampleBatch.EPS_ID,
SampleBatch.AGENT_INDEX,
SampleBatch.UNROLL_ID,
SampleBatch.TERMINATEDS,
SampleBatch.TRUNCATEDS,
SampleBatch.REWARDS,
SampleBatch.INFOS,
SampleBatch.T,
]
and key not in self.model.view_requirements
):
# If user deleted this key manually in postprocessing
# fn, warn about it and do not remove from
# view-requirements.
if key in dummy_batch.deleted_keys:
logger.warning(
"SampleBatch key '{}' was deleted manually in "
"postprocessing function! RLlib will "
"automatically remove non-used items from the "
"data stream. Remove the `del` from your "
"postprocessing function.".format(key)
)
# If we are not writing output to disk, safe to erase
# this key to save space in the sample batch.
elif self.config["output"] is None:
del self.view_requirements[key]
if key in self._loss_input_dict:
del self._loss_input_dict[key]
# Add those data_cols (again) that are missing and have
# dependencies by view_cols.
for key in list(self.view_requirements.keys()):
vr = self.view_requirements[key]
if (
vr.data_col is not None
and vr.data_col not in self.view_requirements
):
used_for_training = vr.data_col in train_batch.accessed_keys
self.view_requirements[vr.data_col] = ViewRequirement(
space=vr.space, used_for_training=used_for_training
)
self._loss_input_dict_no_rnn = {
k: v
for k, v in self._loss_input_dict.items()
if (v not in self._state_inputs and v != self._seq_lens)
}
def _do_loss_init(self, train_batch: SampleBatch):
losses = self._loss_fn(self, self.model, self.dist_class, train_batch)
losses = force_list(losses)
if self._stats_fn:
self._stats_fetches.update(self._stats_fn(self, train_batch))
# Override the update ops to be those of the model.
self._update_ops = []
if not isinstance(self.model, tf.keras.Model):
self._update_ops = self.model.update_ops()
return losses
@OldAPIStack
|
DynamicTFPolicy
|
python
|
apache__airflow
|
airflow-core/src/airflow/dag_processing/manager.py
|
{
"start": 3240,
"end": 3382
}
|
class ____(NamedTuple):
"""Information on processing progress."""
done: bool
all_files_processed: bool
@attrs.define
|
DagParsingStat
|
python
|
django__django
|
django/tasks/backends/immediate.py
|
{
"start": 418,
"end": 3377
}
|
class ____(BaseTaskBackend):
supports_async_task = True
supports_priority = True
def __init__(self, alias, params):
super().__init__(alias, params)
self.worker_id = get_random_string(32)
def _execute_task(self, task_result):
"""
Execute the Task for the given TaskResult, mutating it with the
outcome.
"""
object.__setattr__(task_result, "enqueued_at", timezone.now())
task_enqueued.send(type(self), task_result=task_result)
task = task_result.task
task_start_time = timezone.now()
object.__setattr__(task_result, "status", TaskResultStatus.RUNNING)
object.__setattr__(task_result, "started_at", task_start_time)
object.__setattr__(task_result, "last_attempted_at", task_start_time)
task_result.worker_ids.append(self.worker_id)
task_started.send(sender=type(self), task_result=task_result)
try:
if task.takes_context:
raw_return_value = task.call(
TaskContext(task_result=task_result),
*task_result.args,
**task_result.kwargs,
)
else:
raw_return_value = task.call(*task_result.args, **task_result.kwargs)
object.__setattr__(
task_result,
"_return_value",
normalize_json(raw_return_value),
)
except KeyboardInterrupt:
# If the user tried to terminate, let them
raise
except BaseException as e:
object.__setattr__(task_result, "finished_at", timezone.now())
exception_type = type(e)
task_result.errors.append(
TaskError(
exception_class_path=(
f"{exception_type.__module__}.{exception_type.__qualname__}"
),
traceback="".join(format_exception(e)),
)
)
object.__setattr__(task_result, "status", TaskResultStatus.FAILED)
task_finished.send(type(self), task_result=task_result)
else:
object.__setattr__(task_result, "finished_at", timezone.now())
object.__setattr__(task_result, "status", TaskResultStatus.SUCCESSFUL)
task_finished.send(type(self), task_result=task_result)
def enqueue(self, task, args, kwargs):
self.validate_task(task)
task_result = TaskResult(
task=task,
id=get_random_string(32),
status=TaskResultStatus.READY,
enqueued_at=None,
started_at=None,
last_attempted_at=None,
finished_at=None,
args=args,
kwargs=kwargs,
backend=self.alias,
errors=[],
worker_ids=[],
)
self._execute_task(task_result)
return task_result
|
ImmediateBackend
|
python
|
ray-project__ray
|
python/ray/dashboard/modules/log/log_agent.py
|
{
"start": 8096,
"end": 8610
}
|
class ____(dashboard_utils.DashboardAgentModule):
def __init__(self, dashboard_agent):
super().__init__(dashboard_agent)
log_utils.register_mimetypes()
routes.static("/logs", self._dashboard_agent.log_dir, show_index=True)
async def run(self, server):
pass
@staticmethod
def is_minimal_module():
return False
_task_log_search_worker_pool = concurrent.futures.ThreadPoolExecutor(
max_workers=RAY_DASHBOARD_LOG_TASK_LOG_SEARCH_MAX_WORKER_COUNT
)
|
LogAgent
|
python
|
django__django
|
django/contrib/contenttypes/management/commands/remove_stale_contenttypes.py
|
{
"start": 263,
"end": 4494
}
|
class ____(BaseCommand):
help = "Deletes stale content types in the database."
def add_arguments(self, parser):
parser.add_argument(
"--noinput",
"--no-input",
action="store_false",
dest="interactive",
help="Tells Django to NOT prompt the user for input of any kind.",
)
parser.add_argument(
"--database",
default=DEFAULT_DB_ALIAS,
choices=tuple(connections),
help='Nominates the database to use. Defaults to the "default" database.',
)
parser.add_argument(
"--include-stale-apps",
action="store_true",
default=False,
help=(
"Deletes stale content types including ones from previously "
"installed apps that have been removed from INSTALLED_APPS."
),
)
def handle(self, **options):
db = options["database"]
include_stale_apps = options["include_stale_apps"]
interactive = options["interactive"]
verbosity = options["verbosity"]
if not router.allow_migrate_model(db, ContentType):
return
ContentType.objects.clear_cache()
apps_content_types = itertools.groupby(
ContentType.objects.using(db).order_by("app_label", "model"),
lambda obj: obj.app_label,
)
for app_label, content_types in apps_content_types:
if not include_stale_apps and app_label not in apps.app_configs:
continue
to_remove = [ct for ct in content_types if ct.model_class() is None]
# Confirm that the content type is stale before deletion.
using = router.db_for_write(ContentType)
if to_remove:
if interactive:
ct_info = []
for ct in to_remove:
ct_info.append(
" - Content type for %s.%s" % (ct.app_label, ct.model)
)
collector = Collector(
using=using, origin=ct, force_collection=True
)
collector.collect([ct])
for obj_type, objs in collector.data.items():
if objs != {ct}:
ct_info.append(
" - %s %s object(s)"
% (
len(objs),
obj_type._meta.label,
)
)
content_type_display = "\n".join(ct_info)
self.stdout.write(
"Some content types in your database are stale and can be "
"deleted.\n"
"Any objects that depend on these content types will also be "
"deleted.\n"
"The content types and dependent objects that would be deleted "
"are:\n\n"
f"{content_type_display}\n\n"
"This list doesn't include any cascade deletions to data "
"outside of Django\n"
"models (uncommon).\n\n"
"Are you sure you want to delete these content types?\n"
"If you're unsure, answer 'no'."
)
ok_to_delete = input("Type 'yes' to continue, or 'no' to cancel: ")
else:
ok_to_delete = "yes"
if ok_to_delete == "yes":
for ct in to_remove:
if verbosity >= 2:
self.stdout.write(
"Deleting stale content type '%s | %s'"
% (ct.app_label, ct.model)
)
ct.delete()
else:
if verbosity >= 2:
self.stdout.write("Stale content types remain.")
|
Command
|
python
|
huggingface__transformers
|
src/transformers/models/mobilebert/modeling_mobilebert.py
|
{
"start": 24383,
"end": 26884
}
|
class ____(MobileBertPreTrainedModel):
"""
https://huggingface.co/papers/2004.02984
"""
def __init__(self, config, add_pooling_layer=True):
r"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.gradient_checkpointing = False
self.embeddings = MobileBertEmbeddings(config)
self.encoder = MobileBertEncoder(config)
self.pooler = MobileBertPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, BaseModelOutputWithPooling]:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=embedding_output,
attention_mask=attention_mask,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=attention_mask,
**kwargs,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
)
@auto_docstring(
custom_intro="""
MobileBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
`next sentence prediction (classification)` head.
"""
)
|
MobileBertModel
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-the-peaks.py
|
{
"start": 37,
"end": 282
}
|
class ____(object):
def findPeaks(self, mountain):
"""
:type mountain: List[int]
:rtype: List[int]
"""
return [i for i in xrange(1, len(mountain)-1) if mountain[i-1] < mountain[i] > mountain[i+1]]
|
Solution
|
python
|
ray-project__ray
|
python/ray/serve/tests/unit/test_task_consumer.py
|
{
"start": 1899,
"end": 3618
}
|
class ____:
"""Test the task_handler decorator."""
def _create_and_test_handler(self, decorator_args=None, expected_name=None):
"""Helper to create and test a task handler."""
mock = MagicMock()
if decorator_args is None:
@task_handler
def test_handler():
mock()
else:
@task_handler(**decorator_args)
def test_handler():
mock()
test_handler()
assert mock.call_count == 1
assert test_handler._task_name == expected_name
def test_task_handler_decorator_with_name(self):
self._create_and_test_handler(
decorator_args={"name": "my_task"}, expected_name="my_task"
)
def test_task_handler_decorator_without_name(self):
self._create_and_test_handler(expected_name="test_handler")
@pytest.mark.parametrize("invalid_name", ["", " ", 123])
def test_task_handler_decorator_invalid_name(self, invalid_name):
"""Test various invalid task names."""
with pytest.raises(
ValueError,
match=f"Task name must be a non-empty string, got {invalid_name}",
):
@task_handler(name=invalid_name)
def my_task_handler():
pass
def test_task_handler_on_callable_object_without_name_attr(self):
"""Test that AttributeError is raised for callables with no __name__."""
class MyCallable:
"""A simple callable class without a __name__ attribute on instances."""
def __call__(self):
pass
with pytest.raises(AttributeError):
task_handler(MyCallable())
|
TestTaskHandlerDecorator
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.