language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/tasks/builds.py
|
{
"start": 3384,
"end": 4895
}
|
class ____:
"""
Object to store all data related to a Celery task execution.
We use this object from inside the task to store data while we are running
the task. This is to avoid using `self.` inside the task due to its
limitations: it's instantiated once and that instance is re-used for all
the tasks ran. This could produce sharing instance state between two
different and unrelated tasks.
Note that *all the data* that needs to be saved in the task to share among
different task's method, should be stored in this object. Normally, under
`self.data` inside the Celery task itself.
See https://docs.celeryproject.org/en/master/userguide/tasks.html#instantiation
.. note::
Dataclasses require type annotations, this doesn't mean we are using
type hints or enforcing them in our codebase.
"""
# Arguments from the task.
version_pk: int = None
build_pk: int = None
build_commit: str = None
# Slumber client to interact with the API v2.
api_client: API = None
start_time: timezone.datetime = None
environment_class: type[DockerBuildEnvironment] | type[LocalBuildEnvironment] = None
build_director: BuildDirector = None
config: BuildConfigV2 = None
project: APIProject = None
version: APIVersion = None
# Dictionary returned from the API.
build: dict = field(default_factory=dict)
# Build data for analytics (telemetry).
build_data: dict = field(default_factory=dict)
|
TaskData
|
python
|
huggingface__transformers
|
src/transformers/generation/continuous_batching/cache_manager.py
|
{
"start": 1814,
"end": 9954
}
|
class ____:
"""A class to manage the number of free blocks and block re-use. If prefix sharing is off, the block manager is a
simple FIFO structure where blocks are either free or in use. If prefix sharing is on, blocks can have 3 states:
- in use: one or more requests references this block, thus it cannot be written over. The number of requests
referencing this block is stored as ref_count in the Block object.
- un-initialized: the block points to a space in the KV cache tensor that contains no data yet. Those blocks can
be given as free blocks to new requests without any overhead.
- initialized: the block is complete and was used by one or more request that are finished. It contains KV cache
data and its hash is stored in the hash table. If a new request needs a block with the same hash, we increase
the ref_count of the block and remove it from the list of initialized blocks, because it is now in use.
Still, the block can be freed if no un-initialized blocks are left. In that case, we remove its hash from the
hash table.
There is no structure to keep track of the blocks in use: if a block is neither un-initialized nor initialized,
it is in use.
"""
def __init__(self, num_blocks: int, block_size: int, use_prefix_sharing: bool) -> None:
"""Initializes the block manager with a given number of blocks (num_blocks) of size (block_size). Prefix sharing
can be turned on with the (use_prefix_sharing) flag, which only happens if the model has only full attention
layers."""
self.num_blocks = num_blocks
self.block_size = block_size
self._uninit_block_ids = deque(range(num_blocks))
self._init_block_ids: dict[int, None] = {} # effectively act as an ordered set
self._use_prefix_sharing = use_prefix_sharing
self._hash_to_id: dict[int, int] = {}
self._id_to_block: dict[int, Block] = {}
@property
def num_free_blocks(self) -> int:
"""Returns the number of free blocks left. Both initialized and uninitialized blocks are considered free."""
return len(self._uninit_block_ids) + len(self._init_block_ids)
def has_enough_free_blocks(self, n_blocks: int) -> bool:
"""Checks if there are enough free blocks to allocate the requested number of blocks (n_blocks). If there are
not enough uninitialized blocks, we uninitialize the required number of initialized blocks."""
# Exit early if there are enough uninitialized blocks
if len(self._uninit_block_ids) >= n_blocks:
return True
# Exit early if even after uninitializing all initialized blocks, there are not enough free blocks
block_to_uninitialize = n_blocks - len(self._uninit_block_ids)
if len(self._init_block_ids) < block_to_uninitialize:
return False
# Uninitialize the required amount of blocks
for _ in range(block_to_uninitialize):
id_to_uninitialize = self._init_block_ids.popitem()[0]
block = self._id_to_block[id_to_uninitialize]
self._hash_to_id.pop(block.hash)
self._uninit_block_ids.append(id_to_uninitialize)
return True
def get_free_blocks(self, n_blocks: int, last_block_id: int | None) -> list[int] | None:
"""Returns a list of (n_blocks) free block and mark them as no longuer free in the internal data structures. One
can also pass a (last_block_id) to indicate the last block id in the sequence, which is used to keep track of
the parent block. If the manager cannot find enough free blocks, it returns None."""
if not self.has_enough_free_blocks(n_blocks):
return None
allocated_block_ids = [self._uninit_block_ids.popleft() for _ in range(n_blocks)]
# If we use prefix caching, we keep track of the allocated blocks as partial blocks
if self._use_prefix_sharing:
for block_id in allocated_block_ids:
block = Block(block_id, last_block_id)
self._id_to_block[block_id] = block
last_block_id = block_id
# In both cases, we return the allocated block ids
return allocated_block_ids
def increase_ref_count(self, block_id: int) -> None:
"""Increases the reference count of a given (block_id)."""
block = self._id_to_block[block_id]
block.ref_count += 1
if block.ref_count == 1:
self._init_block_ids.pop(block_id)
def decrease_ref_count(self, block_id: int) -> None:
"""Decreases the reference count of a given (block_id). If the reference count reaches 0, the block is no longer
in use, and becomes initialized (if it was complete) or uninitialized (if it was incomplete)."""
block = self._id_to_block[block_id]
block.ref_count -= 1
if block.ref_count == 0:
if block.is_complete:
self._init_block_ids[block_id] = None
else:
self._id_to_block.pop(block_id)
self._uninit_block_ids.append(block_id)
def free_blocks(self, blocks: list[int]) -> None:
"""Marks a list of (blocks) as free. If there is no prefix sharing, we simply add them to the uninitialized
blocks queue. Otherwise, their new state depends on whether they are complete."""
if self._use_prefix_sharing:
for block_id in blocks:
self.decrease_ref_count(block_id)
else:
self._uninit_block_ids.extend(blocks)
def mark_blocks_as_complete(
self, num_complete_blocks: int, allocated_blocks: list[int], prompt_ids: list[int]
) -> None:
"""Among the list of (allocated_blocks), mark (num_complete_blocks) incomplete blocks as now complete. The list
of (prompt_ids) is used to compute the hash of the new block."""
# Look for the first complete block, starting from the last block in the sequence
parent_hash = None
incomplete_blocks: list[Block] = []
for i, block_id in reverse_enumerate(allocated_blocks):
block = self._id_to_block[block_id]
if block.is_complete:
parent_hash = block.hash
break
incomplete_blocks.append((i, block))
# Now go through the incomplete blocks and updated them
new_parent_id = None
while incomplete_blocks:
i, block = incomplete_blocks.pop()
# If the parent id has been updated, we apply the change
if new_parent_id is not None:
block.parent_id = new_parent_id
new_parent_id = None
# If we have set the hash for all complete blocks, we can stop
if num_complete_blocks == 0:
break
# Otherwise, we compute the hash
num_complete_blocks -= 1
tokens = prompt_ids[i * self.block_size : (i + 1) * self.block_size]
block.hash = self.compute_hash(parent_hash, tokens)
existing_block_id = self._hash_to_id.get(block.hash)
# If the block hash is already in the hash to id mapping, we reference the existing block instead
if existing_block_id is not None:
logger.debug(f"Found existing block {existing_block_id} for block {block.id}")
allocated_blocks[i] = existing_block_id
self._id_to_block[existing_block_id].ref_count += 1
new_parent_id = existing_block_id
self.free_blocks([block.id])
# Otherwise, we add the completed block to the hash table
else:
self._hash_to_id[block.hash] = block.id
# Update loop variables
parent_hash = block.hash
def compute_hash(self, parent_hash: int | None, tokens: list[int]) -> int:
"""Computes the hash of a block containing the given (tokens) with a given (parent_hash). If the block has no
parent, the parent hash is None."""
return hash((parent_hash, tuple(tokens)))
|
BlockManager
|
python
|
ray-project__ray
|
python/ray/serve/tests/test_replica_request_context.py
|
{
"start": 348,
"end": 2480
}
|
class ____:
def test_basic_route_prefix(self):
@serve.deployment
class A:
def __call__(self) -> str:
return _get_request_context_route()
# No route prefix, should return "/" regardless of full route.
serve.run(A.bind())
r = httpx.get(f"{get_application_url()}/")
assert r.status_code == 200
assert r.text == "/"
assert httpx.get(f"{get_application_url()}/subpath").text == "/"
# Configured route prefix should be set.
serve.run(A.bind(), route_prefix="/prefix")
base_url = get_application_url(exclude_route_prefix=True)
assert httpx.get(f"{base_url}/prefix").text == "/prefix"
assert httpx.get(f"{base_url}/prefix/subpath").text == "/prefix"
def test_matching_fastapi_route(self):
fastapi_app = FastAPI()
@serve.deployment
@serve.ingress(fastapi_app)
class A:
@fastapi_app.get("/fastapi-path")
def root(self) -> str:
return PlainTextResponse(_get_request_context_route())
@fastapi_app.get("/dynamic/{user_id}")
def dynamic(self) -> str:
return PlainTextResponse(_get_request_context_route())
# No route prefix, should return matched fastapi route.
serve.run(A.bind())
assert (
httpx.get(f"{get_application_url()}/fastapi-path").text == "/fastapi-path"
)
assert (
httpx.get(f"{get_application_url()}/dynamic/abc123").text
== "/dynamic/{user_id}"
)
# Configured route prefix, should return matched route prefix + fastapi route.
serve.run(A.bind(), route_prefix="/prefix")
base_url = get_application_url(exclude_route_prefix=True)
assert (
httpx.get(f"{base_url}/prefix/fastapi-path").text == "/prefix/fastapi-path"
)
assert (
httpx.get(f"{base_url}/prefix/dynamic/abc123").text
== "/prefix/dynamic/{user_id}"
)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
|
TestHTTPRoute
|
python
|
django__django
|
tests/admin_views/models.py
|
{
"start": 4713,
"end": 4983
}
|
class ____(models.Model):
expected = models.BooleanField(default=False)
leader = models.ForeignKey(Actor, models.CASCADE)
country = models.CharField(max_length=20)
def __str__(self):
return "by %s from %s" % (self.leader, self.country)
|
Inquisition
|
python
|
pytorch__pytorch
|
torch/_dynamo/variables/ctx_manager.py
|
{
"start": 41271,
"end": 43838
}
|
class ____(ContextWrappingVariable):
_guards_singleton = Guard(GlobalStateSource(), GuardBuilder.FSDP_TRAINING_STATE) # type: ignore[arg-type]
@staticmethod
def create(
tx: "InstructionTranslator",
param_group_var: Any,
target_value: Any,
**kwargs: Any,
) -> "FSDPParamGroupUseTrainingStateVariable":
var = FSDPParamGroupUseTrainingStateVariable(
param_group_var=param_group_var,
target_values=[target_value],
initial_values=[param_group_var.value._training_state],
**kwargs,
)
return var
def __init__(
self,
param_group_var: Any,
target_values: Sequence[Any],
initial_values: Optional[Sequence[Any]] = None,
**kwargs: Any,
) -> None:
super().__init__(
target_values=target_values, initial_values=initial_values, **kwargs
)
self.param_group_var = param_group_var
install_guard(self._guards_singleton)
def enter(self, tx: "InstructionTranslator") -> VariableTracker:
self._call_func(tx, self.target_values)
return variables.ConstantVariable.create(None)
def exit(
self, tx: "InstructionTranslator", *args: VariableTracker
) -> VariableTracker:
self._call_func(tx, self.initial_values) # type: ignore[arg-type]
return variables.ConstantVariable.create(None)
def call_function(
self,
tx: "InstructionTranslator",
args: Sequence[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> VariableTracker:
# undo eager initialization
self._call_func(tx, self.initial_values) # type: ignore[arg-type]
return super().call_function(tx, args, kwargs)
def _call_func(self, tx: "InstructionTranslator", values: Sequence[Any]) -> None:
assert len(values) == 1
value = values[0]
if self.param_group_var.value._training_state != value:
self.param_group_var.call_method(
tx,
"__setattr__",
(
variables.ConstantVariable.create("_training_state"),
variables.EnumVariable(value),
),
{},
)
self.param_group_var.value._training_state = value
def module_name(self) -> str:
return "torch.distributed.fsdp._fully_shard._fsdp_param_group.FSDPParamGroup"
def fn_name(self) -> str:
return "use_training_state"
|
FSDPParamGroupUseTrainingStateVariable
|
python
|
networkx__networkx
|
networkx/algorithms/bipartite/tests/test_matching.py
|
{
"start": 7698,
"end": 11973
}
|
class ____:
@classmethod
def setup_class(cls):
pytest.importorskip("scipy")
def test_minimum_weight_full_matching_incomplete_graph(self):
B = nx.Graph()
B.add_nodes_from([1, 2], bipartite=0)
B.add_nodes_from([3, 4], bipartite=1)
B.add_edge(1, 4, weight=100)
B.add_edge(2, 3, weight=100)
B.add_edge(2, 4, weight=50)
matching = minimum_weight_full_matching(B)
assert matching == {1: 4, 2: 3, 4: 1, 3: 2}
def test_minimum_weight_full_matching_with_no_full_matching(self):
B = nx.Graph()
B.add_nodes_from([1, 2, 3], bipartite=0)
B.add_nodes_from([4, 5, 6], bipartite=1)
B.add_edge(1, 4, weight=100)
B.add_edge(2, 4, weight=100)
B.add_edge(3, 4, weight=50)
B.add_edge(3, 5, weight=50)
B.add_edge(3, 6, weight=50)
with pytest.raises(ValueError):
minimum_weight_full_matching(B)
def test_minimum_weight_full_matching_square(self):
G = nx.complete_bipartite_graph(3, 3)
G.add_edge(0, 3, weight=400)
G.add_edge(0, 4, weight=150)
G.add_edge(0, 5, weight=400)
G.add_edge(1, 3, weight=400)
G.add_edge(1, 4, weight=450)
G.add_edge(1, 5, weight=600)
G.add_edge(2, 3, weight=300)
G.add_edge(2, 4, weight=225)
G.add_edge(2, 5, weight=300)
matching = minimum_weight_full_matching(G)
assert matching == {0: 4, 1: 3, 2: 5, 4: 0, 3: 1, 5: 2}
def test_minimum_weight_full_matching_smaller_left(self):
G = nx.complete_bipartite_graph(3, 4)
G.add_edge(0, 3, weight=400)
G.add_edge(0, 4, weight=150)
G.add_edge(0, 5, weight=400)
G.add_edge(0, 6, weight=1)
G.add_edge(1, 3, weight=400)
G.add_edge(1, 4, weight=450)
G.add_edge(1, 5, weight=600)
G.add_edge(1, 6, weight=2)
G.add_edge(2, 3, weight=300)
G.add_edge(2, 4, weight=225)
G.add_edge(2, 5, weight=290)
G.add_edge(2, 6, weight=3)
matching = minimum_weight_full_matching(G)
assert matching == {0: 4, 1: 6, 2: 5, 4: 0, 5: 2, 6: 1}
def test_minimum_weight_full_matching_smaller_top_nodes_right(self):
G = nx.complete_bipartite_graph(3, 4)
G.add_edge(0, 3, weight=400)
G.add_edge(0, 4, weight=150)
G.add_edge(0, 5, weight=400)
G.add_edge(0, 6, weight=1)
G.add_edge(1, 3, weight=400)
G.add_edge(1, 4, weight=450)
G.add_edge(1, 5, weight=600)
G.add_edge(1, 6, weight=2)
G.add_edge(2, 3, weight=300)
G.add_edge(2, 4, weight=225)
G.add_edge(2, 5, weight=290)
G.add_edge(2, 6, weight=3)
matching = minimum_weight_full_matching(G, top_nodes=[3, 4, 5, 6])
assert matching == {0: 4, 1: 6, 2: 5, 4: 0, 5: 2, 6: 1}
def test_minimum_weight_full_matching_smaller_right(self):
G = nx.complete_bipartite_graph(4, 3)
G.add_edge(0, 4, weight=400)
G.add_edge(0, 5, weight=400)
G.add_edge(0, 6, weight=300)
G.add_edge(1, 4, weight=150)
G.add_edge(1, 5, weight=450)
G.add_edge(1, 6, weight=225)
G.add_edge(2, 4, weight=400)
G.add_edge(2, 5, weight=600)
G.add_edge(2, 6, weight=290)
G.add_edge(3, 4, weight=1)
G.add_edge(3, 5, weight=2)
G.add_edge(3, 6, weight=3)
matching = minimum_weight_full_matching(G)
assert matching == {1: 4, 2: 6, 3: 5, 4: 1, 5: 3, 6: 2}
def test_minimum_weight_full_matching_negative_weights(self):
G = nx.complete_bipartite_graph(2, 2)
G.add_edge(0, 2, weight=-2)
G.add_edge(0, 3, weight=0.2)
G.add_edge(1, 2, weight=-2)
G.add_edge(1, 3, weight=0.3)
matching = minimum_weight_full_matching(G)
assert matching == {0: 3, 1: 2, 2: 1, 3: 0}
def test_minimum_weight_full_matching_different_weight_key(self):
G = nx.complete_bipartite_graph(2, 2)
G.add_edge(0, 2, mass=2)
G.add_edge(0, 3, mass=0.2)
G.add_edge(1, 2, mass=1)
G.add_edge(1, 3, mass=2)
matching = minimum_weight_full_matching(G, weight="mass")
assert matching == {0: 3, 1: 2, 2: 1, 3: 0}
|
TestMinimumWeightFullMatching
|
python
|
ray-project__ray
|
python/ray/serve/batching.py
|
{
"start": 19610,
"end": 19759
}
|
class ____(Protocol, Generic[SelfType, T, R]):
def __call__(self, self_: SelfType, __batch: List[T], /) -> List[R]:
...
|
_SyncBatchingMethod
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_quote_name04.py
|
{
"start": 315,
"end": 1261
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("quote_name04.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet = workbook.add_worksheet("Sheet 1")
chart = workbook.add_chart({"type": "pie"})
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": ["Sheet 1", 0, 0, 4, 0], "name": ["Sheet 1", 0, 0]})
chart.set_title({"name": "Foo"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
getsentry__sentry
|
tests/sentry/tasks/test_post_process.py
|
{
"start": 26294,
"end": 30533
}
|
class ____(BasePostProgressGroupMixin):
@patch("sentry.sentry_apps.tasks.sentry_apps.process_resource_change_bound.delay")
def test_processes_resource_change_task_on_new_group(self, delay: MagicMock) -> None:
event = self.create_event(data={}, project_id=self.project.id)
group = event.group
self.call_post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=False,
event=event,
)
delay.assert_called_once_with(action="created", sender="Group", instance_id=group.id)
@with_feature("organizations:integrations-event-hooks")
@patch("sentry.sentry_apps.tasks.sentry_apps.process_resource_change_bound.delay")
def test_processes_resource_change_task_on_error_events(self, delay: MagicMock) -> None:
event = self.create_event(
data={
"message": "Foo bar",
"exception": {"type": "Foo", "value": "oh no"},
"level": "error",
"timestamp": timezone.now().isoformat(),
},
project_id=self.project.id,
assert_no_errors=False,
)
self.create_service_hook(
project=self.project,
organization=self.project.organization,
actor=self.user,
events=["error.created"],
)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event,
)
delay.assert_called_once_with(
action="created",
sender="Error",
instance_id=event.event_id,
group_id=event.group_id,
project_id=self.project.id,
)
@with_feature("organizations:integrations-event-hooks")
@patch("sentry.sentry_apps.tasks.sentry_apps.process_resource_change_bound.delay")
def test_processes_resource_change_task_not_called_for_non_errors(
self, delay: MagicMock
) -> None:
event = self.create_event(
data={
"message": "Foo bar",
"level": "info",
"timestamp": timezone.now().isoformat(),
},
project_id=self.project.id,
assert_no_errors=False,
)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event,
)
assert not delay.called
@patch("sentry.sentry_apps.tasks.sentry_apps.process_resource_change_bound.delay")
def test_processes_resource_change_task_not_called_without_feature_flag(
self, delay: MagicMock
) -> None:
event = self.create_event(
data={
"message": "Foo bar",
"level": "info",
"timestamp": timezone.now().isoformat(),
},
project_id=self.project.id,
assert_no_errors=False,
)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event,
)
assert not delay.called
@with_feature("organizations:integrations-event-hooks")
@patch("sentry.sentry_apps.tasks.sentry_apps.process_resource_change_bound.delay")
def test_processes_resource_change_task_not_called_without_error_created(
self, delay: MagicMock
) -> None:
event = self.create_event(
data={
"message": "Foo bar",
"level": "error",
"exception": {"type": "Foo", "value": "oh no"},
"timestamp": timezone.now().isoformat(),
},
project_id=self.project.id,
assert_no_errors=False,
)
self.create_service_hook(
project=self.project, organization=self.project.organization, actor=self.user, events=[]
)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event,
)
assert not delay.called
|
ResourceChangeBoundsTestMixin
|
python
|
gevent__gevent
|
src/gevent/_imap.py
|
{
"start": 892,
"end": 6186
}
|
class ____(Greenlet): # pylint:disable=undefined-variable
"""
At iterator of map results.
"""
def __init__(self, func, iterable, spawn, maxsize=None, _zipped=False):
"""
An iterator that.
:param callable spawn: The function we use to create new greenlets.
:keyword int maxsize: If given and not-None, specifies the maximum number of
finished results that will be allowed to accumulated awaiting the reader;
more than that number of results will cause map function greenlets to begin
to block. This is most useful is there is a great disparity in the speed of
the mapping code and the consumer and the results consume a great deal of resources.
Using a bound is more computationally expensive than not using a bound.
.. versionchanged:: 1.1b3
Added the *maxsize* parameter.
"""
Greenlet.__init__(self) # pylint:disable=undefined-variable
self.spawn = spawn
self._zipped = _zipped
self.func = func
self.iterable = iterable
self.queue = UnboundQueue() # pylint:disable=undefined-variable
if maxsize:
# Bounding the queue is not enough if we want to keep from
# accumulating objects; the result value will be around as
# the greenlet's result, blocked on self.queue.put(), and
# we'll go on to spawn another greenlet, which in turn can
# create the result. So we need a semaphore to prevent a
# greenlet from exiting while the queue is full so that we
# don't spawn the next greenlet (assuming that self.spawn
# is of course bounded). (Alternatively we could have the
# greenlet itself do the insert into the pool, but that
# takes some rework).
#
# Given the use of a semaphore at this level, sizing the queue becomes
# redundant, and that lets us avoid having to use self.link() instead
# of self.rawlink() to avoid having blocking methods called in the
# hub greenlet.
self._result_semaphore = Semaphore(maxsize) # pylint:disable=undefined-variable
else:
self._result_semaphore = None
self._outstanding_tasks = 0
# The index (zero based) of the maximum number of
# results we will have.
self._max_index = -1
self.finished = False
# We're iterating in a different greenlet than we're running.
def __iter__(self):
return self
def __next__(self):
if self._result_semaphore is not None:
self._result_semaphore.release()
value = self._inext()
if isinstance(value, Failure):
_raise_exc(value)
return value
next = __next__ # Py2
def _inext(self):
return self.queue.get()
def _ispawn(self, func, item, item_index):
if self._result_semaphore is not None:
self._result_semaphore.acquire()
self._outstanding_tasks += 1
g = self.spawn(func, item) if not self._zipped else self.spawn(func, *item)
g._imap_task_index = item_index
g.rawlink(self._on_result)
return g
def _run(self): # pylint:disable=method-hidden
try:
func = self.func
for item in self.iterable:
self._max_index += 1
self._ispawn(func, item, self._max_index)
self._on_finish(None)
except BaseException as e:
self._on_finish(e)
raise
finally:
self.spawn = None
self.func = None
self.iterable = None
self._result_semaphore = None
def _on_result(self, greenlet):
# This method will be called in the hub greenlet (we rawlink)
self._outstanding_tasks -= 1
count = self._outstanding_tasks
finished = self.finished
ready = self.ready()
put_finished = False
if ready and count <= 0 and not finished:
finished = self.finished = True
put_finished = True
if greenlet.successful():
self.queue.put(self._iqueue_value_for_success(greenlet))
else:
self.queue.put(self._iqueue_value_for_failure(greenlet))
if put_finished:
self.queue.put(self._iqueue_value_for_self_finished())
def _on_finish(self, exception):
# Called in this greenlet.
if self.finished:
return
if exception is not None:
self.finished = True
self.queue.put(self._iqueue_value_for_self_failure(exception))
return
if self._outstanding_tasks <= 0:
self.finished = True
self.queue.put(self._iqueue_value_for_self_finished())
def _iqueue_value_for_success(self, greenlet):
return greenlet.value
def _iqueue_value_for_failure(self, greenlet):
return Failure(greenlet.exception, getattr(greenlet, '_raise_exception'))
def _iqueue_value_for_self_finished(self):
return Failure(StopIteration())
def _iqueue_value_for_self_failure(self, exception):
return Failure(exception, self._raise_exception)
|
IMapUnordered
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/classes/config_vectorizers.py
|
{
"start": 8372,
"end": 8971
}
|
class ____(_VectorizerConfigCreate):
vectorizer: Union[Vectorizers, _EnumLikeStr] = Field(
default=Vectorizers.TEXT2VEC_OPENAI, frozen=True, exclude=True
)
baseURL: Optional[AnyHttpUrl]
resourceName: str
deploymentId: str
vectorizeClassName: bool
dimensions: Optional[int]
model: Optional[str]
def _to_dict(self) -> Dict[str, Any]:
ret_dict = super()._to_dict()
if self.baseURL is not None:
ret_dict["baseURL"] = self.baseURL.unicode_string()
ret_dict["isAzure"] = True
return ret_dict
|
_Text2VecAzureOpenAIConfig
|
python
|
langchain-ai__langchain
|
libs/core/langchain_core/output_parsers/openai_tools.py
|
{
"start": 6915,
"end": 9969
}
|
class ____(JsonOutputToolsParser):
"""Parse tools from OpenAI response."""
key_name: str
"""The type of tools to return."""
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
"""Parse the result of an LLM call to a list of tool calls.
Args:
result: The result of the LLM call.
partial: Whether to parse partial JSON.
If `True`, the output will be a JSON object containing
all the keys that have been returned so far.
If `False`, the output will be the full JSON object.
Raises:
OutputParserException: If the generation is not a chat generation.
Returns:
The parsed tool calls.
"""
generation = result[0]
if not isinstance(generation, ChatGeneration):
msg = "This output parser can only be used with a chat generation."
raise OutputParserException(msg)
message = generation.message
if isinstance(message, AIMessage) and message.tool_calls:
parsed_tool_calls = [dict(tc) for tc in message.tool_calls]
for tool_call in parsed_tool_calls:
if not self.return_id:
_ = tool_call.pop("id")
else:
try:
# This exists purely for backward compatibility / cached messages
# All new messages should use `message.tool_calls`
raw_tool_calls = copy.deepcopy(message.additional_kwargs["tool_calls"])
except KeyError:
if self.first_tool_only:
return None
return []
parsed_tool_calls = parse_tool_calls(
raw_tool_calls,
partial=partial,
strict=self.strict,
return_id=self.return_id,
)
# For backwards compatibility
for tc in parsed_tool_calls:
tc["type"] = tc.pop("name")
if self.first_tool_only:
parsed_result = list(
filter(lambda x: x["type"] == self.key_name, parsed_tool_calls)
)
single_result = (
parsed_result[0]
if parsed_result and parsed_result[0]["type"] == self.key_name
else None
)
if self.return_id:
return single_result
if single_result:
return single_result["args"]
return None
return (
[res for res in parsed_tool_calls if res["type"] == self.key_name]
if self.return_id
else [
res["args"] for res in parsed_tool_calls if res["type"] == self.key_name
]
)
# Common cause of ValidationError is truncated output due to max_tokens.
_MAX_TOKENS_ERROR = (
"Output parser received a `max_tokens` stop reason. "
"The output is likely incomplete—please increase `max_tokens` "
"or shorten your prompt."
)
|
JsonOutputKeyToolsParser
|
python
|
PrefectHQ__prefect
|
src/prefect/artifacts.py
|
{
"start": 10280,
"end": 11057
}
|
class ____(Artifact):
progress: float
type: Optional[str] = "progress"
def _format(self) -> float:
# Ensure progress is between 0 and 100
min_progress = 0.0
max_progress = 100.0
if self.progress < min_progress or self.progress > max_progress:
logger.warning(
f"ProgressArtifact received an invalid value, Progress: {self.progress}%"
)
self.progress = max(min_progress, min(self.progress, max_progress))
logger.warning(f"Interpreting as {self.progress}% progress")
return self.progress
async def aformat(self) -> float:
return self._format()
@async_dispatch(aformat)
def format(self) -> float:
return self._format()
|
ProgressArtifact
|
python
|
sqlalchemy__sqlalchemy
|
examples/association/dict_of_sets_with_default.py
|
{
"start": 1879,
"end": 2388
}
|
class ____(Base):
__tablename__ = "b"
a_id: Mapped[int] = mapped_column(ForeignKey("a.id"))
elements: Mapped[set[C]] = relationship("C", collection_class=set)
key: Mapped[str]
values: AssociationProxy[set[int]] = association_proxy("elements", "value")
"""Bridge the association from 'elements' over to the
'value' element of C."""
def __init__(self, key: str, values: set[int] | None = None) -> None:
self.key = key
if values:
self.values = values
|
B
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/backends/backend_pgf.py
|
{
"start": 34774,
"end": 34840
}
|
class ____(_Backend):
FigureCanvas = FigureCanvasPgf
|
_BackendPgf
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/types/beta/beta_request_mcp_server_tool_configuration_param.py
|
{
"start": 289,
"end": 441
}
|
class ____(TypedDict, total=False):
allowed_tools: Optional[SequenceNotStr[str]]
enabled: Optional[bool]
|
BetaRequestMCPServerToolConfigurationParam
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/period.py
|
{
"start": 1219,
"end": 1652
}
|
class ____:
def setup(self):
self.rng = period_range(start="1/1/1990", freq="s", periods=20000)
self.df = DataFrame(index=range(len(self.rng)))
def time_setitem_period_column(self):
self.df["col"] = self.rng
def time_set_index(self):
# GH#21582 limited by comparisons of Period objects
self.df["col2"] = self.rng
self.df.set_index("col2", append=True)
|
DataFramePeriodColumn
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/operators/rds.py
|
{
"start": 1814,
"end": 2479
}
|
class ____(AwsBaseOperator[RdsHook]):
"""Base operator that implements common functions for all operators."""
aws_hook_class = RdsHook
ui_color = "#eeaa88"
ui_fgcolor = "#ffffff"
def __init__(
self,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self._await_interval = 60 # seconds
def execute(self, context: Context) -> str:
"""Different implementations for snapshots, tasks and events."""
raise NotImplementedError
def on_kill(self) -> None:
"""Different implementations for snapshots, tasks and events."""
raise NotImplementedError
|
RdsBaseOperator
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-appsflyer/source_appsflyer/source.py
|
{
"start": 13160,
"end": 13465
}
|
class ____(AggregateDataMixin, IncrementalAppsflyerStream):
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
return f"agg-data/export/app/{self.app_id}/geo_by_date_report/v5"
|
GeoReport
|
python
|
cython__cython
|
tests/run/test_tstring.py
|
{
"start": 1309,
"end": 4037
}
|
class ____:
def assertInterpolationEqual(self, i, exp):
"""Test Interpolation equality.
The *i* argument must be an Interpolation instance.
The *exp* argument must be a tuple of the form
(value, expression, conversion, format_spec) where the final three
items may be omitted and are assumed to be '', None and '' respectively.
"""
if len(exp) == 4:
actual = (i.value, i.expression, i.conversion, i.format_spec)
self.assertEqual(actual, exp)
elif len(exp) == 3:
self.assertEqual((i.value, i.expression, i.conversion), exp)
self.assertEqual(i.format_spec, "")
elif len(exp) == 2:
self.assertEqual((i.value, i.expression), exp)
self.assertEqual(i.conversion, None)
self.assertEqual(i.format_spec, "")
elif len(exp) == 1:
self.assertEqual((i.value,), exp)
self.assertEqual(i.expression, "")
self.assertEqual(i.conversion, None)
self.assertEqual(i.format_spec, "")
def assertTStringEqual(self, t, strings, interpolations):
"""Test template string literal equality.
The *strings* argument must be a tuple of strings equal to *t.strings*.
The *interpolations* argument must be a sequence of tuples which are
compared against *t.interpolations*. Each tuple must match the form
described in the `assertInterpolationEqual` method.
"""
self.assertEqual(t.strings, strings)
self.assertEqual(len(t.interpolations), len(interpolations))
if len(t.interpolations) != len(interpolations):
# Handle Python <3.10 which doesn't have strict in zip
raise ValueError(f"Lengths differ {len(t.interpolations)} {len(interpolations)}")
for i, exp in zip(t.interpolations, interpolations):
self.assertInterpolationEqual(i, exp)
def convert(value, conversion):
if conversion == "a":
return ascii(value)
elif conversion == "r":
return repr(value)
elif conversion == "s":
return str(value)
return value
def fstring(template):
parts = []
for item in template:
# adapted from match/case since we don't yet support it
if isinstance(item, str):
parts.append(item)
elif isinstance(item, Interpolation):
value = item.value
conversion = item.conversion
format_spec = item.format_spec
value = convert(value, conversion)
value = format(value, format_spec)
parts.append(value)
return "".join(parts)
############# From Python test file ##############
|
TStringBaseCase
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-pattern-in-infinite-stream-i.py
|
{
"start": 33,
"end": 96
}
|
class ____:
def next(self):
pass
# kmp
|
InfiniteStream
|
python
|
rapidsai__cudf
|
python/cudf/cudf/core/buffer/spillable_buffer.py
|
{
"start": 1665,
"end": 2399
}
|
class ____:
# A wrapper that exposes the __cuda_array_interface__ of a SpillableBuffer without
# actually accessing __cuda_array_interface__, which triggers spilling.
_buf: SpillableBuffer
def __init__(self, buf: SpillableBuffer) -> None:
self._buf = buf
self._spill_lock = SpillLock()
@property
def __cuda_array_interface__(self) -> dict:
self._buf.spill_lock(self._spill_lock)
# Accessing _memory_info doesn't trigger spilling
ptr, size, _ = self._buf.memory_info()
return {
"data": (ptr, False),
"shape": (size,),
"strides": None,
"typestr": "|u1",
"version": 0,
}
|
SpillableBufferCAIWrapper
|
python
|
pallets__werkzeug
|
src/werkzeug/datastructures/mixins.py
|
{
"start": 475,
"end": 1932
}
|
class ____:
"""Makes a :class:`list` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache: int | None = None
def __hash__(self) -> int:
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(tuple(self)) # type: ignore[arg-type]
return rv
def __reduce_ex__(self, protocol: t.SupportsIndex) -> t.Any:
return type(self), (list(self),) # type: ignore[call-overload]
def __delitem__(self, key: t.Any) -> t.NoReturn:
_immutable_error(self)
def __iadd__(self, other: t.Any) -> t.NoReturn:
_immutable_error(self)
def __imul__(self, other: t.Any) -> t.NoReturn:
_immutable_error(self)
def __setitem__(self, key: t.Any, value: t.Any) -> t.NoReturn:
_immutable_error(self)
def append(self, item: t.Any) -> t.NoReturn:
_immutable_error(self)
def remove(self, item: t.Any) -> t.NoReturn:
_immutable_error(self)
def extend(self, iterable: t.Any) -> t.NoReturn:
_immutable_error(self)
def insert(self, pos: t.Any, value: t.Any) -> t.NoReturn:
_immutable_error(self)
def pop(self, index: t.Any = -1) -> t.NoReturn:
_immutable_error(self)
def reverse(self: t.Any) -> t.NoReturn:
_immutable_error(self)
def sort(self, key: t.Any = None, reverse: t.Any = False) -> t.NoReturn:
_immutable_error(self)
|
ImmutableListMixin
|
python
|
tqdm__tqdm
|
tqdm/contrib/discord.py
|
{
"start": 638,
"end": 3077
}
|
class ____(MonoWorker):
"""Non-blocking file-like IO using a Discord Bot."""
API = "https://discord.com/api/v10"
UA = f"tqdm (https://tqdm.github.io, {__version__}) {default_user_agent()}"
def __init__(self, token, channel_id):
"""Creates a new message in the given `channel_id`."""
super().__init__()
self.token = token
self.channel_id = channel_id
self.session = Session()
self.text = self.__class__.__name__
self.message_id
@property
def message_id(self):
if hasattr(self, '_message_id'):
return self._message_id
try:
res = self.session.post(
f'{self.API}/channels/{self.channel_id}/messages',
headers={'Authorization': f'Bot {self.token}', 'User-Agent': self.UA},
json={'content': f"`{self.text}`"}).json()
except Exception as e:
tqdm_auto.write(str(e))
else:
if res.get('error_code') == 429:
warn("Creation rate limit: try increasing `mininterval`.",
TqdmWarning, stacklevel=2)
else:
self._message_id = res['id']
return self._message_id
def write(self, s):
"""Replaces internal `message_id`'s text with `s`."""
if not s:
s = "..."
s = s.replace('\r', '').strip()
if s == self.text:
return # avoid duplicate message Bot error
message_id = self.message_id
if message_id is None:
return
self.text = s
try:
future = self.submit(
self.session.patch,
f'{self.API}/channels/{self.channel_id}/messages/{message_id}',
headers={'Authorization': f'Bot {self.token}', 'User-Agent': self.UA},
json={'content': f"`{self.text}`"})
except Exception as e:
tqdm_auto.write(str(e))
else:
return future
def delete(self):
"""Deletes internal `message_id`."""
try:
future = self.submit(
self.session.delete,
f'{self.API}/channels/{self.channel_id}/messages/{self.message_id}',
headers={'Authorization': f'Bot {self.token}', 'User-Agent': self.UA})
except Exception as e:
tqdm_auto.write(str(e))
else:
return future
|
DiscordIO
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 1461031,
"end": 1469700
}
|
class ____(TopLevelSpec):
"""
TopLevelHConcatSpec schema wrapper.
Parameters
----------
hconcat : Sequence[dict, :class:`FacetSpec`, :class:`LayerSpec`, :class:`RepeatSpec`, :class:`FacetedUnitSpec`, :class:`LayerRepeatSpec`, :class:`NonNormalizedSpec`, :class:`NonLayerRepeatSpec`, :class:`ConcatSpecGenericSpec`, :class:`HConcatSpecGenericSpec`, :class:`VConcatSpecGenericSpec`]
A list of views to be concatenated and put into a row.
autosize : dict, :class:`AutosizeType`, :class:`AutoSizeParams`, Literal['pad', 'none', 'fit', 'fit-x', 'fit-y']
How the visualization size should be determined. If a string, should be one of
``"pad"``, ``"fit"`` or ``"none"``. Object values can additionally specify
parameters for content sizing and automatic resizing.
**Default value**: ``pad``
background : str, dict, :class:`Color`, :class:`ExprRef`, :class:`HexColor`, :class:`ColorName`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple']
CSS color property to use as the background of the entire view.
**Default value:** ``"white"``
bounds : Literal['full', 'flush']
The bounds calculation method to use for determining the extent of a sub-plot. One
of ``full`` (the default) or ``flush``.
* If set to ``full``, the entire calculated bounds (including axes, title, and
legend) will be used.
* If set to ``flush``, only the specified width and height values for the sub-view
will be used. The ``flush`` setting can be useful when attempting to place
sub-plots without axes or legends into a uniform grid structure.
**Default value:** ``"full"``
center : bool
Boolean flag indicating if subviews should be centered relative to their respective
rows or columns.
**Default value:** ``false``
config : dict, :class:`Config`
Vega-Lite configuration object. This property can only be defined at the top-level
of a specification.
data : dict, :class:`Data`, :class:`UrlData`, :class:`Generator`, :class:`NamedData`, :class:`DataSource`, :class:`InlineData`, :class:`SphereGenerator`, :class:`SequenceGenerator`, :class:`GraticuleGenerator`, None
An object describing the data source. Set to ``null`` to ignore the parent's data
source. If no data is set, it is derived from the parent.
datasets : dict, :class:`Datasets`
A global data store for named datasets. This is a mapping from names to inline
datasets. This can be an array of objects or primitive values or a string. Arrays of
primitive values are ingested as objects with a ``data`` property.
description : str
Description of this mark for commenting purpose.
name : str
Name of the visualization for later reference.
padding : dict, float, :class:`ExprRef`, :class:`Padding`
The default visualization padding, in pixels, from the edge of the visualization
canvas to the data rectangle. If a number, specifies padding for all sides. If an
object, the value should have the format ``{"left": 5, "top": 5, "right": 5,
"bottom": 5}`` to specify padding for each side of the visualization.
**Default value**: ``5``
params : Sequence[dict, :class:`TopLevelParameter`, :class:`VariableParameter`, :class:`TopLevelSelectionParameter`]
Dynamic variables or selections that parameterize a visualization.
resolve : dict, :class:`Resolve`
Scale, axis, and legend resolutions for view composition specifications.
spacing : float
The spacing in pixels between sub-views of the concat operator.
**Default value**: ``10``
title : str, dict, :class:`Text`, Sequence[str], :class:`TitleParams`
Title for the plot.
transform : Sequence[dict, :class:`Transform`, :class:`BinTransform`, :class:`FoldTransform`, :class:`LoessTransform`, :class:`PivotTransform`, :class:`StackTransform`, :class:`ExtentTransform`, :class:`FilterTransform`, :class:`ImputeTransform`, :class:`LookupTransform`, :class:`SampleTransform`, :class:`WindowTransform`, :class:`DensityTransform`, :class:`FlattenTransform`, :class:`QuantileTransform`, :class:`TimeUnitTransform`, :class:`AggregateTransform`, :class:`CalculateTransform`, :class:`RegressionTransform`, :class:`JoinAggregateTransform`]
An array of data transformations such as filter and new field calculation.
usermeta : dict, :class:`Dict`
Optional metadata that will be passed to Vega. This object is completely ignored by
Vega and Vega-Lite and can be used for custom metadata.
$schema : str
URL to `JSON schema <http://json-schema.org/>`__ for a Vega-Lite specification.
Unless you have a reason to change this, use
``https://vega.github.io/schema/vega-lite/v6.json``. Setting the ``$schema``
property allows automatic validation and autocomplete in editors that support JSON
schema.
"""
_schema = {"$ref": "#/definitions/TopLevelHConcatSpec"}
def __init__(
self,
hconcat: Optional[Sequence[SchemaBase | Map]] = Undefined,
autosize: Optional[SchemaBase | Map | AutosizeType_T] = Undefined,
background: Optional[
str | Parameter | SchemaBase | Map | ColorName_T
] = Undefined,
bounds: Optional[Literal["full", "flush"]] = Undefined,
center: Optional[bool] = Undefined,
config: Optional[SchemaBase | Map] = Undefined,
data: Optional[SchemaBase | ChartDataType | Map | None] = Undefined,
datasets: Optional[SchemaBase | Map] = Undefined,
description: Optional[str] = Undefined,
name: Optional[str] = Undefined,
padding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
params: Optional[Sequence[SchemaBase | Map]] = Undefined,
resolve: Optional[SchemaBase | Map] = Undefined,
spacing: Optional[float] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined,
transform: Optional[Sequence[SchemaBase | Map]] = Undefined,
usermeta: Optional[SchemaBase | Map] = Undefined,
**kwds,
):
super().__init__(
hconcat=hconcat,
autosize=autosize,
background=background,
bounds=bounds,
center=center,
config=config,
data=data,
datasets=datasets,
description=description,
name=name,
padding=padding,
params=params,
resolve=resolve,
spacing=spacing,
title=title,
transform=transform,
usermeta=usermeta,
**kwds,
)
|
TopLevelHConcatSpec
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py
|
{
"start": 50335,
"end": 51153
}
|
class ____(GeneratedAirbyteSource):
@public
def __init__(
self, name: str, tickers: str, interval: Optional[str] = None, range: Optional[str] = None
):
"""Airbyte Source for Yahoo Finance Price.
Args:
name (str): The name of the destination.
tickers (str): Comma-separated identifiers for the stocks to be queried. Whitespaces are allowed.
interval (Optional[str]): The interval of between prices queried.
range (Optional[str]): The range of prices to be queried.
"""
self.tickers = check.str_param(tickers, "tickers")
self.interval = check.opt_str_param(interval, "interval")
self.range = check.opt_str_param(range, "range")
super().__init__("Yahoo Finance Price", name)
|
YahooFinancePriceSource
|
python
|
bokeh__bokeh
|
src/bokeh/models/formatters.py
|
{
"start": 3296,
"end": 4237
}
|
class ____(TickFormatter):
''' Display tick values from continuous ranges as "basic numbers",
using scientific notation when appropriate by default.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
precision = Either(Auto, Int, help="""
How many digits of precision to display in tick labels.
""")
use_scientific = Bool(True, help="""
Whether to ever display scientific notation. If ``True``, then
when to use scientific notation is controlled by ``power_limit_low``
and ``power_limit_high``.
""")
power_limit_high = Int(5, help="""
Limit the use of scientific notation to when::
log(x) >= power_limit_high
""")
power_limit_low = Int(-3, help="""
Limit the use of scientific notation to when::
log(x) <= power_limit_low
""")
|
BasicTickFormatter
|
python
|
django__django
|
django/contrib/auth/backends.py
|
{
"start": 218,
"end": 1700
}
|
class ____:
def authenticate(self, request, **kwargs):
return None
async def aauthenticate(self, request, **kwargs):
return await sync_to_async(self.authenticate)(request, **kwargs)
def get_user(self, user_id):
return None
async def aget_user(self, user_id):
return await sync_to_async(self.get_user)(user_id)
def get_user_permissions(self, user_obj, obj=None):
return set()
async def aget_user_permissions(self, user_obj, obj=None):
return await sync_to_async(self.get_user_permissions)(user_obj, obj)
def get_group_permissions(self, user_obj, obj=None):
return set()
async def aget_group_permissions(self, user_obj, obj=None):
return await sync_to_async(self.get_group_permissions)(user_obj, obj)
def get_all_permissions(self, user_obj, obj=None):
return {
*self.get_user_permissions(user_obj, obj=obj),
*self.get_group_permissions(user_obj, obj=obj),
}
async def aget_all_permissions(self, user_obj, obj=None):
return {
*await self.aget_user_permissions(user_obj, obj=obj),
*await self.aget_group_permissions(user_obj, obj=obj),
}
def has_perm(self, user_obj, perm, obj=None):
return perm in self.get_all_permissions(user_obj, obj=obj)
async def ahas_perm(self, user_obj, perm, obj=None):
return perm in await self.aget_all_permissions(user_obj, obj)
|
BaseBackend
|
python
|
paramiko__paramiko
|
tests/test_client.py
|
{
"start": 1716,
"end": 3774
}
|
class ____(paramiko.ServerInterface):
def __init__(self, *args, **kwargs):
# Allow tests to enable/disable specific key types
self.__allowed_keys = kwargs.pop("allowed_keys", [])
# And allow them to set a (single...meh) expected public blob (cert)
self.__expected_public_blob = kwargs.pop("public_blob", None)
super().__init__(*args, **kwargs)
def get_allowed_auths(self, username):
if username == "slowdive":
return "publickey,password"
return "publickey"
def check_auth_password(self, username, password):
if (username == "slowdive") and (password == "pygmalion"):
return paramiko.AUTH_SUCCESSFUL
if (username == "slowdive") and (password == "unresponsive-server"):
time.sleep(5)
return paramiko.AUTH_SUCCESSFUL
return paramiko.AUTH_FAILED
def check_auth_publickey(self, username, key):
try:
expected = FINGERPRINTS[key.get_name()]
except KeyError:
return paramiko.AUTH_FAILED
# Base check: allowed auth type & fingerprint matches
happy = (
key.get_name() in self.__allowed_keys
and key.get_fingerprint() == expected
)
# Secondary check: if test wants assertions about cert data
if (
self.__expected_public_blob is not None
and key.public_blob != self.__expected_public_blob
):
happy = False
return paramiko.AUTH_SUCCESSFUL if happy else paramiko.AUTH_FAILED
def check_channel_request(self, kind, chanid):
return paramiko.OPEN_SUCCEEDED
def check_channel_exec_request(self, channel, command):
if command != b"yes":
return False
return True
def check_channel_env_request(self, channel, name, value):
if name == "INVALID_ENV":
return False
if not hasattr(channel, "env"):
setattr(channel, "env", {})
channel.env[name] = value
return True
|
NullServer
|
python
|
streamlit__streamlit
|
lib/streamlit/testing/v1/element_tree.py
|
{
"start": 20135,
"end": 20516
}
|
class ____(Element):
proto: MetricProto
label: str
delta: str
color: str
help: str
def __init__(self, proto: MetricProto, root: ElementTree) -> None:
self.proto = proto
self.key = None
self.root = root
self.type = "metric"
@property
def value(self) -> str:
return self.proto.body
@dataclass(repr=False)
|
Metric
|
python
|
pytorch__pytorch
|
test/quantization/core/test_utils.py
|
{
"start": 366,
"end": 8542
}
|
class ____(TestCase):
def _test_get_fqn_to_example_inputs(self, M, example_inputs, expected_fqn_to_dim):
m = M().eval()
fqn_to_example_inputs = get_fqn_to_example_inputs(m, example_inputs)
for fqn, expected_dims in expected_fqn_to_dim.items():
assert fqn in expected_fqn_to_dim
example_inputs = fqn_to_example_inputs[fqn]
for example_input, expected_dim in zip(example_inputs, expected_dims):
assert example_input.dim() == expected_dim
def test_get_fqn_to_example_inputs_simple(self):
class Sub(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear1 = torch.nn.Linear(5, 5)
self.linear2 = torch.nn.Linear(5, 5)
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
return x
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear1 = torch.nn.Linear(5, 5)
self.linear2 = torch.nn.Linear(5, 5)
self.sub = Sub()
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
x = self.sub(x)
return x
expected_fqn_to_dim = {
"": (2,),
"linear1": (2,),
"linear2": (2,),
"sub": (2,),
"sub.linear1": (2,),
"sub.linear2": (2,)
}
example_inputs = (torch.rand(1, 5),)
self._test_get_fqn_to_example_inputs(M, example_inputs, expected_fqn_to_dim)
def test_get_fqn_to_example_inputs_default_kwargs(self):
""" Test that we can get example inputs for functions with default keyword arguments
"""
class Sub(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear1 = torch.nn.Linear(5, 5)
self.linear2 = torch.nn.Linear(5, 5)
def forward(self, x, key1=torch.rand(1), key2=torch.rand(1)):
x = self.linear1(x)
x = self.linear2(x)
return x
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear1 = torch.nn.Linear(5, 5)
self.linear2 = torch.nn.Linear(5, 5)
self.sub = Sub()
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
# only override `key2`, `key1` will use default
x = self.sub(x, key2=torch.rand(1, 2))
return x
expected_fqn_to_dim = {
"": (2,),
"linear1": (2,),
"linear2": (2,),
# second arg is `key1`, which is using default argument
# third arg is `key2`, override by callsite
"sub": (2, 1, 2),
"sub.linear1": (2,),
"sub.linear2": (2,)
}
example_inputs = (torch.rand(1, 5),)
self._test_get_fqn_to_example_inputs(M, example_inputs, expected_fqn_to_dim)
def test_get_fqn_to_example_inputs_complex_args(self):
""" Test that we can record complex example inputs such as lists and dicts
"""
class Sub(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear1 = torch.nn.Linear(5, 5)
self.linear2 = torch.nn.Linear(5, 5)
def forward(self, x, list_arg, dict_arg):
x = self.linear1(x)
x = self.linear2(x)
return x
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear1 = torch.nn.Linear(5, 5)
self.linear2 = torch.nn.Linear(5, 5)
self.sub = Sub()
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
x = self.sub(x, [x], {"3": x})
return x
example_inputs = (torch.rand(1, 5),)
m = M().eval()
fqn_to_example_inputs = get_fqn_to_example_inputs(m, example_inputs)
assert "sub" in fqn_to_example_inputs
assert isinstance(fqn_to_example_inputs["sub"][1], list)
assert isinstance(fqn_to_example_inputs["sub"][2], dict) and \
"3" in fqn_to_example_inputs["sub"][2]
def test_quantize_weight_clamping_per_tensor(self):
""" Test quant_{min, max} from per tensor observer is honored by `_quantize_weight` method
"""
fp_min, fp_max = -1000.0, 1000.0
q8_min, q8_max = -10, 10
float_tensor = torch.tensor([fp_min, fp_max])
observer = MovingAverageMinMaxObserver(
averaging_constant=1.0,
dtype=torch.qint8,
quant_min=q8_min,
quant_max=q8_max,
qscheme=torch.per_tensor_symmetric,
)
observer(float_tensor)
assert observer.min_val == fp_min
assert observer.max_val == fp_max
quantized_tensor = _quantize_weight(float_tensor, observer)
assert quantized_tensor.int_repr().max().item() == q8_max
assert quantized_tensor.int_repr().min().item() == q8_min
# Actual weight values can be outside than observer [min_val, max_val] for the moving average observer
float_tensor *= 1.2
quantized_tensor = _quantize_weight(float_tensor, observer)
assert quantized_tensor.int_repr().max().item() == q8_max
assert quantized_tensor.int_repr().min().item() == q8_min
def test_quantize_weight_clamping_per_channel(self):
""" Test quant_{min, max} from per channel observer is honored by `_quantize_weight` method
"""
fp_min, fp_max = -1000.0, 1000.0
q8_min, q8_max = -10, 10
float_tensor = torch.tensor([[fp_min, fp_max]])
observer = MovingAveragePerChannelMinMaxObserver(
averaging_constant=1.0,
dtype=torch.qint8,
quant_min=q8_min,
quant_max=q8_max,
qscheme=torch.per_channel_symmetric,
ch_axis=0,
)
observer(float_tensor)
assert observer.min_val == fp_min
assert observer.max_val == fp_max
quantized_tensor = _quantize_weight(float_tensor, observer)
assert quantized_tensor.int_repr().max().item() == q8_max
assert quantized_tensor.int_repr().min().item() == q8_min
# Actual weight values can be outside than observer [min_val, max_val] for the moving average observer
float_tensor *= 1.2
quantized_tensor = _quantize_weight(float_tensor, observer)
assert quantized_tensor.int_repr().max().item() == q8_max
assert quantized_tensor.int_repr().min().item() == q8_min
def test_uint4_int4_dtype(self):
def up_size(size):
return (*size[:-1], size[-1] * 2)
for dtype in [torch.uint4, torch.int4]:
class UInt4OrInt4Tensor(torch.Tensor):
@staticmethod
def __new__(cls, elem, **kwargs):
assert elem.dtype is torch.uint8
assert not kwargs.get("requires_grad", False)
kwargs["requires_grad"] = False
return torch.Tensor._make_wrapper_subclass(cls, up_size(elem.shape), dtype=dtype, **kwargs)
def __init__(self, elem):
self.elem = elem
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs=None):
pass
# make sure it runs
x = UInt4OrInt4Tensor(torch.tensor([
[0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF],
[0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF],
[0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF],
], dtype=torch.uint8))
assert x.dtype == dtype
if __name__ == "__main__":
raise_on_run_directly("test/test_quantization.py")
|
TestUtils
|
python
|
pexpect__pexpect
|
tests/test_repr.py
|
{
"start": 78,
"end": 999
}
|
class ____(PexpectTestCase.PexpectTestCase):
def test_str_spawnu(self):
""" Exercise spawnu.__str__() """
# given,
p = pexpect.spawnu('cat')
# exercise,
value = str(p)
# verify
assert isinstance(value, str)
def test_str_spawn(self):
""" Exercise spawn.__str__() """
# given,
p = pexpect.spawn('cat')
# exercise,
value = str(p)
# verify
assert isinstance(value, str)
def test_str_before_spawn(self):
""" Exercise derived spawn.__str__() """
# given,
child = pexpect.spawn(None, None)
child.read_nonblocking = lambda size, timeout: b''
try:
child.expect('alpha', timeout=0.1)
except pexpect.TIMEOUT as e:
str(e) # Smoketest
else:
assert False, 'TIMEOUT exception expected. No exception raised.'
|
TestCaseMisc
|
python
|
numpy__numpy
|
benchmarks/benchmarks/bench_polynomial.py
|
{
"start": 52,
"end": 803
}
|
class ____(Benchmark):
def setup(self):
self.polynomial_degree2 = np.polynomial.Polynomial(np.array([1, 2]))
self.array3 = np.linspace(0, 1, 3)
self.array1000 = np.linspace(0, 1, 10_000)
self.float64 = np.float64(1.0)
def time_polynomial_evaluation_scalar(self):
self.polynomial_degree2(self.float64)
def time_polynomial_evaluation_python_float(self):
self.polynomial_degree2(1.0)
def time_polynomial_evaluation_array_3(self):
self.polynomial_degree2(self.array3)
def time_polynomial_evaluation_array_1000(self):
self.polynomial_degree2(self.array1000)
def time_polynomial_addition(self):
_ = self.polynomial_degree2 + self.polynomial_degree2
|
Polynomial
|
python
|
huggingface__transformers
|
src/transformers/models/poolformer/modeling_poolformer.py
|
{
"start": 3777,
"end": 4579
}
|
class ____(nn.Module):
def __init__(self, config, dropout_prob, hidden_size, intermediate_size):
super().__init__()
self.conv1 = nn.Conv2d(hidden_size, intermediate_size, 1)
self.conv2 = nn.Conv2d(intermediate_size, hidden_size, 1)
self.drop = PoolFormerDropPath(dropout_prob)
if isinstance(config.hidden_act, str):
self.act_fn = ACT2FN[config.hidden_act]
else:
self.act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.conv1(hidden_states)
hidden_states = self.act_fn(hidden_states)
hidden_states = self.drop(hidden_states)
hidden_states = self.conv2(hidden_states)
hidden_states = self.drop(hidden_states)
return hidden_states
|
PoolFormerOutput
|
python
|
GoogleCloudPlatform__python-docs-samples
|
compute/client_library/snippets/instances/custom_machine_types/create_shared_with_helper.py
|
{
"start": 1200,
"end": 20072
}
|
class ____:
"""
Allows to create custom machine types to be used with the VM instances.
"""
@unique
class CPUSeries(Enum):
N1 = "custom"
N2 = "n2-custom"
N2D = "n2d-custom"
E2 = "e2-custom"
E2_MICRO = "e2-custom-micro"
E2_SMALL = "e2-custom-small"
E2_MEDIUM = "e2-custom-medium"
TypeLimits = namedtuple(
"TypeLimits",
[
"allowed_cores",
"min_mem_per_core",
"max_mem_per_core",
"allow_extra_memory",
"extra_memory_limit",
],
)
# The limits for various CPU types are described on:
# https://cloud.google.com/compute/docs/general-purpose-machines
LIMITS = {
CPUSeries.E2: TypeLimits(frozenset(range(2, 33, 2)), 512, 8192, False, 0),
CPUSeries.E2_MICRO: TypeLimits(frozenset(), 1024, 2048, False, 0),
CPUSeries.E2_SMALL: TypeLimits(frozenset(), 2048, 4096, False, 0),
CPUSeries.E2_MEDIUM: TypeLimits(frozenset(), 4096, 8192, False, 0),
CPUSeries.N2: TypeLimits(
frozenset(range(2, 33, 2)).union(set(range(36, 129, 4))),
512,
8192,
True,
gb_to_mb(624),
),
CPUSeries.N2D: TypeLimits(
frozenset({2, 4, 8, 16, 32, 48, 64, 80, 96}), 512, 8192, True, gb_to_mb(768)
),
CPUSeries.N1: TypeLimits(
frozenset({1}.union(range(2, 97, 2))), 922, 6656, True, gb_to_mb(624)
),
}
def __init__(
self, zone: str, cpu_series: CPUSeries, memory_mb: int, core_count: int = 0
):
self.zone = zone
self.cpu_series = cpu_series
self.limits = self.LIMITS[self.cpu_series]
# Shared machine types (e2-small, e2-medium and e2-micro) always have
# 2 vCPUs: https://cloud.google.com/compute/docs/general-purpose-machines#e2_limitations
self.core_count = 2 if self.is_shared() else core_count
self.memory_mb = memory_mb
self._checked = False
self._check_parameters()
self.extra_memory_used = self._check_extra_memory()
def is_shared(self):
return self.cpu_series in (
CustomMachineType.CPUSeries.E2_SMALL,
CustomMachineType.CPUSeries.E2_MICRO,
CustomMachineType.CPUSeries.E2_MEDIUM,
)
def _check_extra_memory(self) -> bool:
if self._checked:
return self.memory_mb > self.core_count * self.limits.max_mem_per_core
else:
raise RuntimeError(
"You need to call _check_parameters() before calling _check_extra_memory()"
)
def _check_parameters(self):
"""
Check whether the requested parameters are allowed. Find more information about limitations of custom machine
types at: https://cloud.google.com/compute/docs/general-purpose-machines#custom_machine_types
"""
# Check the number of cores
if (
self.limits.allowed_cores
and self.core_count not in self.limits.allowed_cores
):
raise RuntimeError(
f"Invalid number of cores requested. Allowed number of cores for {self.cpu_series.name} is: {sorted(self.limits.allowed_cores)}"
)
# Memory must be a multiple of 256 MB
if self.memory_mb % 256 != 0:
raise RuntimeError("Requested memory must be a multiple of 256 MB.")
# Check if the requested memory isn't too little
if self.memory_mb < self.core_count * self.limits.min_mem_per_core:
raise RuntimeError(
f"Requested memory is too low. Minimal memory for {self.cpu_series.name} is {self.limits.min_mem_per_core} MB per core."
)
# Check if the requested memory isn't too much
if self.memory_mb > self.core_count * self.limits.max_mem_per_core:
if self.limits.allow_extra_memory:
if self.memory_mb > self.limits.extra_memory_limit:
raise RuntimeError(
f"Requested memory is too large.. Maximum memory allowed for {self.cpu_series.name} is {self.limits.extra_memory_limit} MB."
)
else:
raise RuntimeError(
f"Requested memory is too large.. Maximum memory allowed for {self.cpu_series.name} is {self.limits.max_mem_per_core} MB per core."
)
self._checked = True
def __str__(self) -> str:
"""
Return the custom machine type in form of a string acceptable by Compute Engine API.
"""
if self.cpu_series in {
self.CPUSeries.E2_SMALL,
self.CPUSeries.E2_MICRO,
self.CPUSeries.E2_MEDIUM,
}:
return f"zones/{self.zone}/machineTypes/{self.cpu_series.value}-{self.memory_mb}"
if self.extra_memory_used:
return f"zones/{self.zone}/machineTypes/{self.cpu_series.value}-{self.core_count}-{self.memory_mb}-ext"
return f"zones/{self.zone}/machineTypes/{self.cpu_series.value}-{self.core_count}-{self.memory_mb}"
def short_type_str(self) -> str:
"""
Return machine type in a format without the zone. For example, n2-custom-0-10240.
This format is used to create instance templates.
"""
return str(self).rsplit("/", maxsplit=1)[1]
@classmethod
def from_str(cls, machine_type: str):
"""
Construct a new object from a string. The string needs to be a valid custom machine type like:
- https://www.googleapis.com/compute/v1/projects/diregapic-mestiv/zones/us-central1-b/machineTypes/e2-custom-4-8192
- zones/us-central1-b/machineTypes/e2-custom-4-8192
- e2-custom-4-8192 (in this case, the zone parameter will not be set)
"""
zone = None
if machine_type.startswith("http"):
machine_type = machine_type[machine_type.find("zones/") :]
if machine_type.startswith("zones/"):
_, zone, _, machine_type = machine_type.split("/")
extra_mem = machine_type.endswith("-ext")
if machine_type.startswith("custom"):
cpu = cls.CPUSeries.N1
_, cores, memory = machine_type.rsplit("-", maxsplit=2)
else:
if extra_mem:
cpu_series, _, cores, memory, _ = machine_type.split("-")
else:
cpu_series, _, cores, memory = machine_type.split("-")
if cpu_series == "n2":
cpu = cls.CPUSeries.N2
elif cpu_series == "n2d":
cpu = cls.CPUSeries.N2D
elif cpu_series == "e2":
cpu = cls.CPUSeries.E2
if cores == "micro":
cpu = cls.CPUSeries.E2_MICRO
cores = 2
elif cores == "small":
cpu = cls.CPUSeries.E2_SMALL
cores = 2
elif cores == "medium":
cpu = cls.CPUSeries.E2_MEDIUM
cores = 2
else:
raise RuntimeError("Unknown CPU series.")
cores = int(cores)
memory = int(memory)
return cls(zone, cpu, memory, cores)
def get_image_from_family(project: str, family: str) -> compute_v1.Image:
"""
Retrieve the newest image that is part of a given family in a project.
Args:
project: project ID or project number of the Cloud project you want to get image from.
family: name of the image family you want to get image from.
Returns:
An Image object.
"""
image_client = compute_v1.ImagesClient()
# List of public operating system (OS) images: https://cloud.google.com/compute/docs/images/os-details
newest_image = image_client.get_from_family(project=project, family=family)
return newest_image
def disk_from_image(
disk_type: str,
disk_size_gb: int,
boot: bool,
source_image: str,
auto_delete: bool = True,
) -> compute_v1.AttachedDisk:
"""
Create an AttachedDisk object to be used in VM instance creation. Uses an image as the
source for the new disk.
Args:
disk_type: the type of disk you want to create. This value uses the following format:
"zones/{zone}/diskTypes/(pd-standard|pd-ssd|pd-balanced|pd-extreme)".
For example: "zones/us-west3-b/diskTypes/pd-ssd"
disk_size_gb: size of the new disk in gigabytes
boot: boolean flag indicating whether this disk should be used as a boot disk of an instance
source_image: source image to use when creating this disk. You must have read access to this disk. This can be one
of the publicly available images or an image from one of your projects.
This value uses the following format: "projects/{project_name}/global/images/{image_name}"
auto_delete: boolean flag indicating whether this disk should be deleted with the VM that uses it
Returns:
AttachedDisk object configured to be created using the specified image.
"""
boot_disk = compute_v1.AttachedDisk()
initialize_params = compute_v1.AttachedDiskInitializeParams()
initialize_params.source_image = source_image
initialize_params.disk_size_gb = disk_size_gb
initialize_params.disk_type = disk_type
boot_disk.initialize_params = initialize_params
# Remember to set auto_delete to True if you want the disk to be deleted when you delete
# your VM instance.
boot_disk.auto_delete = auto_delete
boot_disk.boot = boot
return boot_disk
def wait_for_extended_operation(
operation: ExtendedOperation, verbose_name: str = "operation", timeout: int = 300
) -> Any:
"""
Waits for the extended (long-running) operation to complete.
If the operation is successful, it will return its result.
If the operation ends with an error, an exception will be raised.
If there were any warnings during the execution of the operation
they will be printed to sys.stderr.
Args:
operation: a long-running operation you want to wait on.
verbose_name: (optional) a more verbose name of the operation,
used only during error and warning reporting.
timeout: how long (in seconds) to wait for operation to finish.
If None, wait indefinitely.
Returns:
Whatever the operation.result() returns.
Raises:
This method will raise the exception received from `operation.exception()`
or RuntimeError if there is no exception set, but there is an `error_code`
set for the `operation`.
In case of an operation taking longer than `timeout` seconds to complete,
a `concurrent.futures.TimeoutError` will be raised.
"""
result = operation.result(timeout=timeout)
if operation.error_code:
print(
f"Error during {verbose_name}: [Code: {operation.error_code}]: {operation.error_message}",
file=sys.stderr,
flush=True,
)
print(f"Operation ID: {operation.name}", file=sys.stderr, flush=True)
raise operation.exception() or RuntimeError(operation.error_message)
if operation.warnings:
print(f"Warnings during {verbose_name}:\n", file=sys.stderr, flush=True)
for warning in operation.warnings:
print(f" - {warning.code}: {warning.message}", file=sys.stderr, flush=True)
return result
def create_instance(
project_id: str,
zone: str,
instance_name: str,
disks: list[compute_v1.AttachedDisk],
machine_type: str = "n1-standard-1",
network_link: str = "global/networks/default",
subnetwork_link: str = None,
internal_ip: str = None,
external_access: bool = False,
external_ipv4: str = None,
accelerators: list[compute_v1.AcceleratorConfig] = None,
preemptible: bool = False,
spot: bool = False,
instance_termination_action: str = "STOP",
custom_hostname: str = None,
delete_protection: bool = False,
) -> compute_v1.Instance:
"""
Send an instance creation request to the Compute Engine API and wait for it to complete.
Args:
project_id: project ID or project number of the Cloud project you want to use.
zone: name of the zone to create the instance in. For example: "us-west3-b"
instance_name: name of the new virtual machine (VM) instance.
disks: a list of compute_v1.AttachedDisk objects describing the disks
you want to attach to your new instance.
machine_type: machine type of the VM being created. This value uses the
following format: "zones/{zone}/machineTypes/{type_name}".
For example: "zones/europe-west3-c/machineTypes/f1-micro"
network_link: name of the network you want the new instance to use.
For example: "global/networks/default" represents the network
named "default", which is created automatically for each project.
subnetwork_link: name of the subnetwork you want the new instance to use.
This value uses the following format:
"regions/{region}/subnetworks/{subnetwork_name}"
internal_ip: internal IP address you want to assign to the new instance.
By default, a free address from the pool of available internal IP addresses of
used subnet will be used.
external_access: boolean flag indicating if the instance should have an external IPv4
address assigned.
external_ipv4: external IPv4 address to be assigned to this instance. If you specify
an external IP address, it must live in the same region as the zone of the instance.
This setting requires `external_access` to be set to True to work.
accelerators: a list of AcceleratorConfig objects describing the accelerators that will
be attached to the new instance.
preemptible: boolean value indicating if the new instance should be preemptible
or not. Preemptible VMs have been deprecated and you should now use Spot VMs.
spot: boolean value indicating if the new instance should be a Spot VM or not.
instance_termination_action: What action should be taken once a Spot VM is terminated.
Possible values: "STOP", "DELETE"
custom_hostname: Custom hostname of the new VM instance.
Custom hostnames must conform to RFC 1035 requirements for valid hostnames.
delete_protection: boolean value indicating if the new virtual machine should be
protected against deletion or not.
Returns:
Instance object.
"""
instance_client = compute_v1.InstancesClient()
# Use the network interface provided in the network_link argument.
network_interface = compute_v1.NetworkInterface()
network_interface.network = network_link
if subnetwork_link:
network_interface.subnetwork = subnetwork_link
if internal_ip:
network_interface.network_i_p = internal_ip
if external_access:
access = compute_v1.AccessConfig()
access.type_ = compute_v1.AccessConfig.Type.ONE_TO_ONE_NAT.name
access.name = "External NAT"
access.network_tier = access.NetworkTier.PREMIUM.name
if external_ipv4:
access.nat_i_p = external_ipv4
network_interface.access_configs = [access]
# Collect information into the Instance object.
instance = compute_v1.Instance()
instance.network_interfaces = [network_interface]
instance.name = instance_name
instance.disks = disks
if re.match(r"^zones/[a-z\d\-]+/machineTypes/[a-z\d\-]+$", machine_type):
instance.machine_type = machine_type
else:
instance.machine_type = f"zones/{zone}/machineTypes/{machine_type}"
instance.scheduling = compute_v1.Scheduling()
if accelerators:
instance.guest_accelerators = accelerators
instance.scheduling.on_host_maintenance = (
compute_v1.Scheduling.OnHostMaintenance.TERMINATE.name
)
if preemptible:
# Set the preemptible setting
warnings.warn(
"Preemptible VMs are being replaced by Spot VMs.", DeprecationWarning
)
instance.scheduling = compute_v1.Scheduling()
instance.scheduling.preemptible = True
if spot:
# Set the Spot VM setting
instance.scheduling.provisioning_model = (
compute_v1.Scheduling.ProvisioningModel.SPOT.name
)
instance.scheduling.instance_termination_action = instance_termination_action
if custom_hostname is not None:
# Set the custom hostname for the instance
instance.hostname = custom_hostname
if delete_protection:
# Set the delete protection bit
instance.deletion_protection = True
# Prepare the request to insert an instance.
request = compute_v1.InsertInstanceRequest()
request.zone = zone
request.project = project_id
request.instance_resource = instance
# Wait for the create operation to complete.
print(f"Creating the {instance_name} instance in {zone}...")
operation = instance_client.insert(request=request)
wait_for_extended_operation(operation, "instance creation")
print(f"Instance {instance_name} created.")
return instance_client.get(project=project_id, zone=zone, instance=instance_name)
def create_custom_shared_core_instance(
project_id: str,
zone: str,
instance_name: str,
cpu_series: CustomMachineType.CPUSeries,
memory: int,
) -> compute_v1.Instance:
"""
Create a new VM instance with a custom type using shared CPUs.
Args:
project_id: project ID or project number of the Cloud project you want to use.
zone: name of the zone to create the instance in. For example: "us-west3-b"
instance_name: name of the new virtual machine (VM) instance.
cpu_series: the type of CPU you want to use. Pick one value from the CustomMachineType.CPUSeries enum.
For example: CustomMachineType.CPUSeries.E2_MICRO
memory: the amount of memory for the VM instance, in megabytes.
Return:
Instance object.
"""
assert cpu_series in (
CustomMachineType.CPUSeries.E2_MICRO,
CustomMachineType.CPUSeries.E2_SMALL,
CustomMachineType.CPUSeries.E2_MEDIUM,
)
custom_type = CustomMachineType(zone, cpu_series, memory)
newest_debian = get_image_from_family(project="debian-cloud", family="debian-12")
disk_type = f"zones/{zone}/diskTypes/pd-standard"
disks = [disk_from_image(disk_type, 10, True, newest_debian.self_link)]
return create_instance(project_id, zone, instance_name, disks, str(custom_type))
# [END compute_custom_machine_type_create_shared_with_helper]
|
CustomMachineType
|
python
|
django__django
|
tests/forms_tests/tests/test_validators.py
|
{
"start": 2364,
"end": 6829
}
|
class ____(TestCase):
def test_value_placeholder_with_char_field(self):
cases = [
(validators.validate_integer, "-42.5", "invalid"),
(validators.validate_email, "a", "invalid"),
(validators.validate_email, "a@b\n.com", "invalid"),
(validators.validate_email, "a\n@b.com", "invalid"),
(validators.validate_slug, "你 好", "invalid"),
(validators.validate_unicode_slug, "你 好", "invalid"),
(validators.validate_ipv4_address, "256.1.1.1", "invalid"),
(validators.validate_ipv6_address, "1:2", "invalid"),
(validators.validate_ipv46_address, "256.1.1.1", "invalid"),
(validators.validate_comma_separated_integer_list, "a,b,c", "invalid"),
(validators.int_list_validator(), "-1,2,3", "invalid"),
(validators.MaxLengthValidator(10), 11 * "x", "max_length"),
(validators.MinLengthValidator(10), 9 * "x", "min_length"),
(validators.URLValidator(), "no_scheme", "invalid"),
(validators.URLValidator(), "http://test[.com", "invalid"),
(validators.URLValidator(), "http://[::1:2::3]/", "invalid"),
(
validators.URLValidator(),
"http://" + ".".join(["a" * 35 for _ in range(9)]),
"invalid",
),
(validators.RegexValidator("[0-9]+"), "xxxxxx", "invalid"),
]
for validator, value, code in cases:
if isinstance(validator, types.FunctionType):
name = validator.__name__
else:
name = type(validator).__name__
with self.subTest(name, value=value):
class MyForm(forms.Form):
field = forms.CharField(
validators=[validator],
error_messages={code: "%(value)s"},
)
form = MyForm({"field": value})
self.assertIs(form.is_valid(), False)
self.assertEqual(form.errors, {"field": [value]})
def test_value_placeholder_with_null_character(self):
class MyForm(forms.Form):
field = forms.CharField(
error_messages={"null_characters_not_allowed": "%(value)s"},
)
form = MyForm({"field": "a\0b"})
self.assertIs(form.is_valid(), False)
self.assertEqual(form.errors, {"field": ["a\x00b"]})
def test_value_placeholder_with_integer_field(self):
cases = [
(validators.MaxValueValidator(0), 1, "max_value"),
(validators.MinValueValidator(0), -1, "min_value"),
(validators.URLValidator(), "1", "invalid"),
]
for validator, value, code in cases:
with self.subTest(type(validator).__name__, value=value):
class MyForm(forms.Form):
field = forms.IntegerField(
validators=[validator],
error_messages={code: "%(value)s"},
)
form = MyForm({"field": value})
self.assertIs(form.is_valid(), False)
self.assertEqual(form.errors, {"field": [str(value)]})
def test_value_placeholder_with_decimal_field(self):
cases = [
("NaN", "invalid"),
("123", "max_digits"),
("0.12", "max_decimal_places"),
("12", "max_whole_digits"),
]
for value, code in cases:
with self.subTest(value=value):
class MyForm(forms.Form):
field = forms.DecimalField(
max_digits=2,
decimal_places=1,
error_messages={code: "%(value)s"},
)
form = MyForm({"field": value})
self.assertIs(form.is_valid(), False)
self.assertEqual(form.errors, {"field": [value]})
def test_value_placeholder_with_file_field(self):
class MyForm(forms.Form):
field = forms.FileField(
validators=[validators.validate_image_file_extension],
error_messages={"invalid_extension": "%(value)s"},
)
form = MyForm(files={"field": SimpleUploadedFile("myfile.txt", b"abc")})
self.assertIs(form.is_valid(), False)
self.assertEqual(form.errors, {"field": ["myfile.txt"]})
|
ValidatorCustomMessageTests
|
python
|
ipython__ipython
|
IPython/core/historyapp.py
|
{
"start": 901,
"end": 4518
}
|
class ____(BaseIPythonApplication):
description = trim_hist_help
backup = Bool(False, help="Keep the old history file as history.sqlite.<N>").tag(
config=True
)
keep = Int(1000, help="Number of recent lines to keep in the database.").tag(
config=True
)
flags = Dict( # type: ignore
dict(backup=({"HistoryTrim": {"backup": True}}, backup.help))
)
aliases = Dict(dict(keep="HistoryTrim.keep")) # type: ignore
def start(self):
profile_dir = Path(self.profile_dir.location)
hist_file = profile_dir / "history.sqlite"
con = sqlite3.connect(hist_file)
# Grab the recent history from the current database.
inputs = list(con.execute('SELECT session, line, source, source_raw FROM '
'history ORDER BY session DESC, line DESC LIMIT ?', (self.keep+1,)))
if len(inputs) <= self.keep:
print("There are already at most %d entries in the history database." % self.keep)
print("Not doing anything. Use --keep= argument to keep fewer entries")
return
print("Trimming history to the most recent %d entries." % self.keep)
inputs.pop() # Remove the extra element we got to check the length.
inputs.reverse()
if inputs:
first_session = inputs[0][0]
outputs = list(con.execute('SELECT session, line, output FROM '
'output_history WHERE session >= ?', (first_session,)))
sessions = list(con.execute('SELECT session, start, end, num_cmds, remark FROM '
'sessions WHERE session >= ?', (first_session,)))
con.close()
# Create the new history database.
new_hist_file = profile_dir / "history.sqlite.new"
i = 0
while new_hist_file.exists():
# Make sure we don't interfere with an existing file.
i += 1
new_hist_file = profile_dir / ("history.sqlite.new" + str(i))
new_db = sqlite3.connect(new_hist_file)
new_db.execute("""CREATE TABLE IF NOT EXISTS sessions (session integer
primary key autoincrement, start timestamp,
end timestamp, num_cmds integer, remark text)""")
new_db.execute("""CREATE TABLE IF NOT EXISTS history
(session integer, line integer, source text, source_raw text,
PRIMARY KEY (session, line))""")
new_db.execute("""CREATE TABLE IF NOT EXISTS output_history
(session integer, line integer, output text,
PRIMARY KEY (session, line))""")
new_db.commit()
if inputs:
with new_db:
# Add the recent history into the new database.
new_db.executemany('insert into sessions values (?,?,?,?,?)', sessions)
new_db.executemany('insert into history values (?,?,?,?)', inputs)
new_db.executemany('insert into output_history values (?,?,?)', outputs)
new_db.close()
if self.backup:
i = 1
backup_hist_file = profile_dir / ("history.sqlite.old.%d" % i)
while backup_hist_file.exists():
i += 1
backup_hist_file = profile_dir / ("history.sqlite.old.%d" % i)
hist_file.rename(backup_hist_file)
print("Backed up longer history file to", backup_hist_file)
else:
hist_file.unlink()
new_hist_file.rename(hist_file)
|
HistoryTrim
|
python
|
has2k1__plotnine
|
plotnine/stats/stat_sina.py
|
{
"start": 466,
"end": 9571
}
|
class ____(stat):
"""
Compute Sina plot values
{usage}
Parameters
----------
{common_parameters}
binwidth : float, default=None
The width of the bins. The default is to use bins that
cover the range of the data. You should always override this
value, exploring multiple widths to find the best to
illustrate the stories in your data.
bins : int, default=50
Number of bins. Overridden by binwidth.
method : Literal["density", "counts"], default="density"
Choose the method to spread the samples within the same bin
along the x-axis. Available methods: "density", "counts"
(can be abbreviated, e.g. "d"). See Details.
maxwidth : float, default=None
Control the maximum width the points can spread into.
Values should be in the range (0, 1).
adjust : float, default=1
Adjusts the bandwidth of the density kernel when
`method="density"`. see [](`~plotnine.stats.stat_density`).
bw : str | float, default="nrd0"
The bandwidth to use, If a float is given, it is the bandwidth.
The `str`{.py} choices are:
`"nrd0", "normal_reference", "scott", "silverman"`{.py}
`nrd0` is a port of `stats::bw.nrd0` in R; it is eqiuvalent
to `silverman` when there is more than 1 value in a group.
bin_limit : int, default=1
If the samples within the same y-axis bin are more
than `bin_limit`, the samples's X coordinates will be adjusted.
This parameter is effective only when `method="counts"`{.py}
random_state : int | ~numpy.random.RandomState, default=None
Seed or Random number generator to use. If `None`, then
numpy global generator [](`numpy.random`) is used.
scale : Literal["area", "count", "width"], default="area"
How to scale the sina groups.
- `area` - Scale by the largest density/bin among the different sinas
- `count` - areas are scaled proportionally to the number of points
- `width` - Only scale according to the maxwidth parameter.
style :
Type of sina plot to draw. The options are
```python
'full' # Regular (2 sided)
'left' # Left-sided half
'right' # Right-sided half
'left-right' # Alternate (left first) half by the group
'right-left' # Alternate (right first) half by the group
```
See Also
--------
plotnine.geom_sina : The default `geom` for this `stat`.
"""
_aesthetics_doc = """
{aesthetics_table}
**Options for computed aesthetics**
```python
"quantile" # quantile
"group" # group identifier
```
Calculated aesthetics are accessed using the `after_stat` function.
e.g. `after_stat('quantile')`{.py}.
"""
REQUIRED_AES = {"x", "y"}
DEFAULT_PARAMS = {
"geom": "sina",
"position": "dodge",
"na_rm": False,
"binwidth": None,
"bins": None,
"method": "density",
"bw": "nrd0",
"maxwidth": None,
"adjust": 1,
"bin_limit": 1,
"random_state": None,
"scale": "area",
"style": "full",
}
CREATES = {"scaled"}
def setup_data(self, data):
if (
array_kind.continuous(data["x"])
and not has_groups(data)
and (data["x"] != data["x"].iloc[0]).any()
):
raise TypeError(
"Continuous x aesthetic -- did you forget aes(group=...)?"
)
return data
def setup_params(self, data):
params = self.params
random_state = params["random_state"]
if params["maxwidth"] is None:
params["maxwidth"] = resolution(data["x"], False) * 0.9
if params["binwidth"] is None and self.params["bins"] is None:
params["bins"] = 50
if random_state is None:
params["random_state"] = np.random
elif isinstance(random_state, int):
params["random_state"] = np.random.RandomState(random_state)
# Required by compute_density
params["kernel"] = "gau" # It has to be a gaussian kernel
params["cut"] = 0
params["gridsize"] = None
params["clip"] = (-np.inf, np.inf)
params["bounds"] = (-np.inf, np.inf)
params["n"] = 512
def compute_panel(self, data, scales):
params = self.params
maxwidth = params["maxwidth"]
random_state = params["random_state"]
data = super().compute_panel(data, scales)
if not len(data):
return data
if params["scale"] == "area":
data["sinawidth"] = data["density"] / data["density"].max()
elif params["scale"] == "count":
data["sinawidth"] = (
data["density"]
/ data["density"].max()
* data["n"]
/ data["n"].max()
)
elif params["scale"] == "width":
data["sinawidth"] = data["scaled"]
else:
msg = "Unknown scale value '{}'"
raise PlotnineError(msg.format(params["scale"]))
is_infinite = ~np.isfinite(data["sinawidth"])
if is_infinite.any():
data.loc[is_infinite, "sinawidth"] = 0
data["xmin"] = data["x"] - maxwidth / 2
data["xmax"] = data["x"] + maxwidth / 2
data["x_diff"] = (
random_state.uniform(-1, 1, len(data))
* maxwidth
* data["sinawidth"]
/ 2
)
data["width"] = maxwidth
# jitter y values if the input is integer,
# but not if it is the same value
y = data["y"].to_numpy()
all_integers = (y == np.floor(y)).all()
some_are_unique = len(np.unique(y)) > 1
if all_integers and some_are_unique:
data["y"] = jitter(y, random_state=random_state)
return data
def compute_group(self, data, scales):
binwidth = self.params["binwidth"]
maxwidth = self.params["maxwidth"]
bin_limit = self.params["bin_limit"]
weight = None
y = data["y"]
if len(data) == 0:
return pd.DataFrame()
elif len(data) < 3:
data["density"] = 0
data["scaled"] = 1
elif len(np.unique(y)) < 2:
data["density"] = 1
data["scaled"] = 1
elif self.params["method"] == "density":
from scipy.interpolate import interp1d
# density kernel estimation
range_y = y.min(), y.max()
dens = compute_density(y, weight, range_y, self.params)
densf = interp1d(
dens["x"],
dens["density"],
bounds_error=False,
fill_value="extrapolate", # pyright: ignore
)
data["density"] = densf(y)
data["scaled"] = data["density"] / dens["density"].max()
else:
expanded_y_range = nextafter_range(scales.y.dimension())
if binwidth is not None:
bins = breaks_from_binwidth(expanded_y_range, binwidth)
else:
bins = breaks_from_bins(expanded_y_range, self.params["bins"])
# bin based estimation
bin_index = pd.cut(y, bins, include_lowest=True, labels=False) # pyright: ignore[reportCallIssue,reportArgumentType]
data["density"] = (
pd.Series(bin_index)
.groupby(bin_index)
.apply(len)[bin_index]
.to_numpy()
)
data.loc[data["density"] <= bin_limit, "density"] = 0
data["scaled"] = data["density"] / data["density"].max()
# Compute width if x has multiple values
if len(data["x"].unique()) > 1:
width = np.ptp(data["x"]) * maxwidth
else:
width = maxwidth
data["width"] = width
data["n"] = len(data)
data["x"] = np.mean([data["x"].max(), data["x"].min()])
return data
def finish_layer(self, data):
# Rescale x in case positions have been adjusted
style = self.params["style"]
x_mean = cast("FloatArray", data["x"].to_numpy())
x_mod = (data["xmax"] - data["xmin"]) / data["width"]
data["x"] = data["x"] + data["x_diff"] * x_mod
group = cast("IntArray", data["group"].to_numpy())
x = cast("FloatArray", data["x"].to_numpy())
even = group % 2 == 0
def mirror_x(bool_idx):
"""
Mirror x locations along the mean value
"""
data.loc[bool_idx, "x"] = 2 * x_mean[bool_idx] - x[bool_idx]
match style:
case "left":
mirror_x(x_mean < x)
case "right":
mirror_x(x < x_mean)
case "left-right":
mirror_x(even & (x < x_mean) | ~even & (x_mean < x))
case "right-left":
mirror_x(even & (x_mean < x) | ~even & (x < x_mean))
return data
|
stat_sina
|
python
|
Textualize__textual
|
docs/examples/themes/todo_app.py
|
{
"start": 234,
"end": 2176
}
|
class ____(App[None]):
CSS = """
Screen {
align: center middle;
hatch: right $foreground 10%;
}
#content {
height: auto;
width: 40;
padding: 1 2;
}
#header {
height: 1;
width: auto;
margin-bottom: 1;
}
.title {
text-style: bold;
padding: 0 1;
width: 1fr;
}
#overdue {
color: $text-error;
background: $error-muted;
padding: 0 1;
width: auto;
}
#done {
color: $text-success;
background: $success-muted;
padding: 0 1;
margin: 0 1;
}
#footer {
height: auto;
margin-bottom: 2;
}
#history-header {
height: 1;
width: auto;
}
#history-done {
width: auto;
padding: 0 1;
margin: 0 1;
background: $primary-muted;
color: $text-primary;
}
"""
BINDINGS = [Binding("ctrl+t", "cycle_theme", "Cycle theme")]
THEMES = cycle(
["nord", "gruvbox", "tokyo-night", "textual-dark", "solarized-light"]
)
def compose(self) -> ComposeResult:
yield Header()
with Vertical(id="content"):
with Horizontal(id="header"):
yield Label("Today", classes="title")
yield Label("1 overdue", id="overdue")
yield Label("1 done", id="done")
yield SelectionList(
("Buy milk", 0),
("Buy bread", 1),
("Go and vote", 2, True),
("Return package", 3),
id="todo-list",
)
with Horizontal(id="footer"):
yield Input(placeholder="Add a task")
with Horizontal(id="history-header"):
yield Label("History", classes="title")
yield Label("4 items", id="history-done")
yield Footer()
def on_mount(self) -> None:
self.action_cycle_theme()
def action_cycle_theme(self) -> None:
self.theme = next(self.THEMES)
app = TodoList()
if __name__ == "__main__":
app.run()
|
TodoList
|
python
|
simonw__datasette
|
datasette/utils/baseconv.py
|
{
"start": 290,
"end": 1553
}
|
class ____(object):
decimal_digits = "0123456789"
def __init__(self, digits):
self.digits = digits
def encode(self, i):
return self.convert(i, self.decimal_digits, self.digits)
def decode(self, s):
return int(self.convert(s, self.digits, self.decimal_digits))
def convert(number, fromdigits, todigits):
# Based on http://code.activestate.com/recipes/111286/
if str(number)[0] == "-":
number = str(number)[1:]
neg = 1
else:
neg = 0
# make an integer out of the number
x = 0
for digit in str(number):
x = x * len(fromdigits) + fromdigits.index(digit)
# create the result in base 'len(todigits)'
if x == 0:
res = todigits[0]
else:
res = ""
while x > 0:
digit = x % len(todigits)
res = todigits[digit] + res
x = int(x / len(todigits))
if neg:
res = "-" + res
return res
convert = staticmethod(convert)
bin = BaseConverter("01")
hexconv = BaseConverter("0123456789ABCDEF")
base62 = BaseConverter("ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz")
|
BaseConverter
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_pie/PIE794.py
|
{
"start": 764,
"end": 862
}
|
class ____:
name: str = "Foo"
name: str = name + " Bar"
name: str = "Bar" # PIE794
|
Person
|
python
|
ray-project__ray
|
python/ray/serve/schema.py
|
{
"start": 37614,
"end": 38386
}
|
class ____(BaseModel):
"""Deployment-level autoscaler observability."""
scaling_status: AutoscalingStatus = Field(
..., description="Current scaling direction or stability."
)
decisions: List[ScalingDecision] = Field(
default_factory=list, description="Recent scaling decisions."
)
metrics: Optional[Dict[str, Any]] = Field(
None, description="Aggregated metrics for this deployment."
)
metrics_health: AutoscalingMetricsHealth = Field(
AutoscalingMetricsHealth.HEALTHY,
description="Health of metrics collection pipeline.",
)
errors: List[str] = Field(
default_factory=list, description="Recent errors/abnormal events."
)
@PublicAPI(stability="stable")
|
DeploymentAutoscalingDetail
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/queues.py
|
{
"start": 90646,
"end": 93922
}
|
class ____(Request):
"""
Update queue information
:param queue: Queue id
:type queue: str
:param name: Queue name Unique within the company.
:type name: str
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
"""
_service = "queues"
_action = "update"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"name": {
"description": "Queue name Unique within the company.",
"type": "string",
},
"queue": {"description": "Queue id", "type": "string"},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": "array",
},
},
"required": ["queue"],
"type": "object",
}
def __init__(
self,
queue: str,
name: Optional[str] = None,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
**kwargs: Any
) -> None:
super(UpdateRequest, self).__init__(**kwargs)
self.queue = queue
self.name = name
self.tags = tags
self.system_tags = system_tags
@schema_property("queue")
def queue(self) -> str:
return self._property_queue
@queue.setter
def queue(self, value: str) -> None:
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property("name")
def name(self) -> Optional[str]:
return self._property_name
@name.setter
def name(self, value: Optional[str]) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
|
UpdateRequest
|
python
|
sympy__sympy
|
sympy/physics/quantum/cartesian.py
|
{
"start": 2237,
"end": 2785
}
|
class ____(HermitianOperator):
""" Z cartesian coordinate operator (for 3D systems) """
@classmethod
def default_args(self):
return ("Z",)
@classmethod
def _eval_hilbert_space(self, args):
return L2(Interval(S.NegativeInfinity, S.Infinity))
def _apply_operator_PositionKet3D(self, ket, **options):
return ket.position_z*ket
#-------------------------------------------------------------------------
# Momentum operators
#-------------------------------------------------------------------------
|
ZOp
|
python
|
PyCQA__pylint
|
tests/functional/i/invalid/invalid_index_returned.py
|
{
"start": 986,
"end": 1159
}
|
class ____:
""" __index__ returns node which does not have 'value' in AST """
def __index__(self): # [invalid-index-returned]
return lambda: 3
|
FourthBadIndex
|
python
|
py-pdf__pypdf
|
pypdf/constants.py
|
{
"start": 3418,
"end": 3939
}
|
class ____:
"""
Table 3.30 Entries in a resource dictionary.
Table 34 in the 2.0 reference.
"""
EXT_G_STATE = "/ExtGState" # dictionary, optional
COLOR_SPACE = "/ColorSpace" # dictionary, optional
PATTERN = "/Pattern" # dictionary, optional
SHADING = "/Shading" # dictionary, optional
XOBJECT = "/XObject" # dictionary, optional
FONT = "/Font" # dictionary, optional
PROC_SET = "/ProcSet" # array, optional
PROPERTIES = "/Properties" # dictionary, optional
|
Resources
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/legacy_tf_layers/convolutional.py
|
{
"start": 988,
"end": 9906
}
|
class ____(keras_layers.Conv1D, base.Layer):
"""1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Args:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of a single integer, specifying the
length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
dilation_rate: An integer or tuple/list of a single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self, filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(Conv1D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name, **kwargs)
def conv1d(inputs,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for 1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Args:
inputs: Tensor input.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of a single integer, specifying the
length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
dilation_rate: An integer or tuple/list of a single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
warnings.warn('`tf.layers.conv1d` is deprecated and '
'will be removed in a future version. '
'Please Use `tf.keras.layers.Conv1D` instead.')
layer = Conv1D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
|
Conv1D
|
python
|
PrefectHQ__prefect
|
src/prefect/server/schemas/responses.py
|
{
"start": 19645,
"end": 20845
}
|
class ____(schemas.core.Worker):
status: schemas.statuses.WorkerStatus = Field(
schemas.statuses.WorkerStatus.OFFLINE,
description="Current status of the worker.",
)
@classmethod
def model_validate(
cls: Type[Self],
obj: Any,
*,
strict: Optional[bool] = None,
from_attributes: Optional[bool] = None,
context: Optional[dict[str, Any]] = None,
) -> Self:
worker = super().model_validate(
obj, strict=strict, from_attributes=from_attributes, context=context
)
if from_attributes:
offline_horizon = datetime.datetime.now(
tz=datetime.timezone.utc
) - datetime.timedelta(
seconds=(
worker.heartbeat_interval_seconds
or DEFAULT_HEARTBEAT_INTERVAL_SECONDS
)
* INACTIVITY_HEARTBEAT_MULTIPLE
)
if worker.last_heartbeat_time > offline_horizon:
worker.status = schemas.statuses.WorkerStatus.ONLINE
else:
worker.status = schemas.statuses.WorkerStatus.OFFLINE
return worker
|
WorkerResponse
|
python
|
django__django
|
django/db/migrations/exceptions.py
|
{
"start": 252,
"end": 370
}
|
class ____(Exception):
"""There's an impossible-to-resolve circular dependency."""
pass
|
CircularDependencyError
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/remote_representation/external.py
|
{
"start": 25268,
"end": 25477
}
|
class ____:
job_selector: JobSubsetSelector
run_config: Optional[Mapping[str, Any]]
@cached_method
def __hash__(self) -> int:
return hash(make_hashable(self))
|
RemoteExecutionPlanSelector
|
python
|
Netflix__metaflow
|
test/test_config/helloconfig.py
|
{
"start": 818,
"end": 1085
}
|
class ____(FlowMutator):
def mutate(self, mutable_flow):
s = mutable_flow.start
s.add_decorator(environment, vars={"hello": mutable_flow.config.env_to_start})
@TitusOrNot
@AddEnvToStart
@project(name=config_expr("config").project_name)
|
AddEnvToStart
|
python
|
getsentry__sentry
|
tests/sentry/web/frontend/test_auth_organization_login.py
|
{
"start": 1281,
"end": 44561
}
|
class ____(AuthProviderTestCase):
@cached_property
def organization(self) -> Organization:
return self.create_organization(name="foo", owner=self.user)
@cached_property
def path(self) -> str:
return reverse("sentry-auth-organization", args=[self.organization.slug])
def test_renders_basic(self) -> None:
self.login_as(self.user)
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, "sentry/organization-login.html")
assert resp.context["login_form"]
with assume_test_silo_mode(SiloMode.REGION):
assert resp.context["organization"] == serialize_rpc_organization(self.organization)
assert "provider_key" not in resp.context
assert resp.context["join_request_link"]
def test_cannot_get_request_join_link_with_setting_disabled(self) -> None:
with assume_test_silo_mode(SiloMode.REGION):
OrganizationOption.objects.create(
organization_id=self.organization.id, key="sentry:join_requests", value=False
)
self.login_as(self.user)
resp = self.client.get(self.path)
assert resp.status_code == 200
assert resp.context["join_request_link"] is None
def test_renders_session_expire_message(self) -> None:
self.client.cookies["session_expired"] = "1"
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, "sentry/organization-login.html")
assert len(resp.context["messages"]) == 1
def test_flow_as_anonymous(self) -> None:
auth_provider = AuthProvider.objects.create(
organization_id=self.organization.id, provider="dummy"
)
resp = self.client.post(self.path, {"init": True})
assert resp.status_code == 200
assert PLACEHOLDER_TEMPLATE in resp.content.decode("utf-8")
path = reverse("sentry-auth-sso")
resp = self.client.post(path, {"email": "foo@example.com"})
self.assertTemplateUsed(resp, "sentry/auth-confirm-identity.html")
assert resp.status_code == 200
frontend_events = {"event_name": "Sign Up", "event_label": "dummy"}
marketing_query = urlencode({"frontend_events": json.dumps(frontend_events)})
with self.settings(
TERMS_URL="https://example.com/terms", PRIVACY_URL="https://example.com/privacy"
):
resp = self.client.post(path, {"op": "newuser"}, follow=True)
assert resp.redirect_chain == [
(reverse("sentry-login") + f"?{marketing_query}", 302),
("/organizations/foo/issues/", 302),
]
auth_identity = AuthIdentity.objects.get(auth_provider=auth_provider)
user = auth_identity.user
assert user.email == "foo@example.com"
assert not user.has_usable_password()
assert not user.is_managed
assert user.flags.newsletter_consent_prompt
with assume_test_silo_mode(SiloMode.REGION):
member = OrganizationMember.objects.get(organization=self.organization, user_id=user.id)
assert getattr(member.flags, "sso:linked")
assert not getattr(member.flags, "sso:invalid")
assert not getattr(member.flags, "member-limit:restricted")
def test_flow_as_existing_user_with_new_account(self) -> None:
auth_provider = AuthProvider.objects.create(
organization_id=self.organization.id, provider="dummy"
)
user = self.create_user("bar@example.com")
self.login_as(user)
resp = self.client.post(self.path, {"init": True})
assert resp.status_code == 200
assert PLACEHOLDER_TEMPLATE in resp.content.decode("utf-8")
path = reverse("sentry-auth-sso")
resp = self.client.post(path, {"email": "foo@example.com"})
self.assertTemplateUsed(resp, "sentry/auth-confirm-link.html")
assert resp.status_code == 200
resp = self.client.post(path, {"op": "confirm"}, follow=True)
assert resp.redirect_chain == [
(reverse("sentry-login"), 302),
("/organizations/foo/issues/", 302),
]
auth_identity = AuthIdentity.objects.get(auth_provider=auth_provider)
assert user == auth_identity.user
with assume_test_silo_mode(SiloMode.REGION):
member = OrganizationMember.objects.get(organization=self.organization, user_id=user.id)
assert getattr(member.flags, "sso:linked")
assert not getattr(member.flags, "sso:invalid")
assert not getattr(member.flags, "member-limit:restricted")
def test_flow_as_existing_user_with_new_account_member_limit(self) -> None:
with self.feature({"organizations:invite-members": False}):
auth_provider = AuthProvider.objects.create(
organization_id=self.organization.id, provider="dummy"
)
user = self.create_user("bar@example.com")
self.login_as(user)
resp = self.client.post(self.path, {"init": True})
assert resp.status_code == 200
assert PLACEHOLDER_TEMPLATE in resp.content.decode("utf-8")
path = reverse("sentry-auth-sso")
resp = self.client.post(path, {"email": "foo@example.com"})
self.assertTemplateUsed(resp, "sentry/auth-confirm-link.html")
assert resp.status_code == 200
resp = self.client.post(path, {"op": "confirm"}, follow=True)
assert resp.redirect_chain == [
(reverse("sentry-login"), 302),
("/organizations/foo/issues/", 302),
("/organizations/foo/disabled-member/", 302),
]
auth_identity = AuthIdentity.objects.get(auth_provider=auth_provider)
assert user == auth_identity.user
with assume_test_silo_mode(SiloMode.REGION):
member = OrganizationMember.objects.get(
organization=self.organization, user_id=user.id
)
assert getattr(member.flags, "sso:linked")
assert not getattr(member.flags, "sso:invalid")
assert getattr(member.flags, "member-limit:restricted")
def test_flow_as_existing_identity(self) -> None:
user = self.create_user("bar@example.com")
auth_provider = AuthProvider.objects.create(
organization_id=self.organization.id, provider="dummy"
)
AuthIdentity.objects.create(
auth_provider=auth_provider, user_id=user.id, ident="foo@example.com"
)
resp = self.client.post(self.path, {"init": True})
assert resp.status_code == 200
assert PLACEHOLDER_TEMPLATE in resp.content.decode("utf-8")
path = reverse("sentry-auth-sso")
resp = self.client.post(path, {"email": "foo@example.com"}, follow=True)
assert resp.redirect_chain == [
(reverse("sentry-login"), 302),
("/organizations/foo/issues/", 302),
]
def test_org_redirects_to_relative_next_url(self) -> None:
user = self.create_user("bar@example.com")
auth_provider = AuthProvider.objects.create(
organization_id=self.organization.id, provider="dummy"
)
AuthIdentity.objects.create(
auth_provider=auth_provider, user_id=user.id, ident="foo@example.com"
)
next = f"/organizations/{self.organization.slug}/releases/"
resp = self.client.post(self.path + "?next=" + next, {"init": True})
assert resp.status_code == 200
assert PLACEHOLDER_TEMPLATE in resp.content.decode("utf-8")
path = reverse("sentry-auth-sso")
resp = self.client.post(path, {"email": "foo@example.com"}, follow=True)
assert resp.redirect_chain == [
(next, 302),
]
@with_feature("sytem:multi-region")
def test_org_redirects_to_next_url_customer_domain(self) -> None:
user = self.create_user("bar@example.com")
auth_provider = AuthProvider.objects.create(
organization_id=self.organization.id, provider="dummy"
)
AuthIdentity.objects.create(
auth_provider=auth_provider, user_id=user.id, ident="foo@example.com"
)
next = f"/organizations/{self.organization.slug}/releases/"
resp = self.client.post(
self.path + "?next=" + self.organization.absolute_url(next), {"init": True}
)
assert resp.status_code == 200
assert PLACEHOLDER_TEMPLATE in resp.content.decode("utf-8")
path = reverse("sentry-auth-sso")
resp = self.client.post(path, {"email": "foo@example.com"}, follow=True)
assert resp.redirect_chain == [
(self.organization.absolute_url(next), 302),
]
def test_org_login_doesnt_redirect_external(self) -> None:
user = self.create_user("bar@example.com")
auth_provider = AuthProvider.objects.create(
organization_id=self.organization.id, provider="dummy"
)
AuthIdentity.objects.create(
auth_provider=auth_provider, user_id=user.id, ident="foo@example.com"
)
next = "http://example.com"
resp = self.client.post(self.path + "?next=" + urlquote(next), {"init": True})
assert resp.status_code == 200
assert PLACEHOLDER_TEMPLATE in resp.content.decode("utf-8")
path = reverse("sentry-auth-sso")
resp = self.client.post(path, {"email": "foo@example.com"}, follow=True)
assert resp.redirect_chain == [
(reverse("sentry-login"), 302),
("/organizations/foo/issues/", 302),
]
def test_flow_as_unauthenticated_existing_matched_user_no_merge(self) -> None:
auth_provider = AuthProvider.objects.create(
organization_id=self.organization.id, provider="dummy"
)
user = self.create_user("bar@example.com")
resp = self.client.post(self.path, {"init": True})
assert resp.status_code == 200
assert PLACEHOLDER_TEMPLATE in resp.content.decode("utf-8")
path = reverse("sentry-auth-sso")
resp = self.client.post(path, {"email": user.email})
self.assertTemplateUsed(resp, "sentry/auth-confirm-identity.html")
assert resp.status_code == 200
assert resp.context["existing_user"] == user
assert resp.context["login_form"]
frontend_events = {"event_name": "Sign Up", "event_label": "dummy"}
marketing_query = urlencode({"frontend_events": json.dumps(frontend_events)})
resp = self.client.post(path, {"op": "newuser"}, follow=True)
assert resp.redirect_chain == [
(reverse("sentry-login") + f"?{marketing_query}", 302),
("/organizations/foo/issues/", 302),
]
auth_identity = AuthIdentity.objects.get(auth_provider=auth_provider)
new_user = auth_identity.user
assert user.email == "bar@example.com"
assert new_user != user
# Without settings.TERMS_URL and settings.PRIVACY_URL, this should be
# unset following new user creation
assert not new_user.flags.newsletter_consent_prompt
with assume_test_silo_mode(SiloMode.REGION):
member = OrganizationMember.objects.get(
organization=self.organization, user_id=new_user.id
)
assert getattr(member.flags, "sso:linked")
assert not getattr(member.flags, "sso:invalid")
assert not getattr(member.flags, "member-limit:restricted")
def test_flow_as_unauthenticated_existing_matched_user_with_merge(self) -> None:
user = self.create_user("bar@example.com")
user.update(is_superuser=False)
org1 = self.create_organization(name="bar", owner=user)
path = reverse("sentry-auth-organization", args=[org1.slug])
# create a second org that the user belongs to, ensure they are redirected to correct
self.create_organization(name="zap", owner=user)
auth_provider = AuthProvider.objects.create(organization_id=org1.id, provider="dummy")
email = user.emails.all()[:1].get()
email.is_verified = False
email.save()
resp = self.client.post(path, {"init": True})
assert resp.status_code == 200
assert PLACEHOLDER_TEMPLATE in resp.content.decode("utf-8")
path = reverse("sentry-auth-sso")
resp = self.client.post(path, {"email": user.email})
self.assertTemplateUsed(resp, "sentry/auth-confirm-identity.html")
assert resp.status_code == 200
assert resp.context["existing_user"] == user
assert resp.context["login_form"]
resp = self.client.post(
path, {"op": "login", "username": user.username, "password": "admin"}
)
self.assertTemplateUsed(resp, "sentry/auth-confirm-link.html")
assert resp.status_code == 200
resp = self.client.post(path, {"op": "confirm"}, follow=True)
assert resp.redirect_chain == [
(reverse("sentry-login"), 302),
(f"/organizations/{org1.slug}/issues/", 302),
]
auth_identity = AuthIdentity.objects.get(auth_provider=auth_provider)
new_user = auth_identity.user
assert new_user == user
with assume_test_silo_mode(SiloMode.REGION):
member = OrganizationMember.objects.get(organization=org1, user_id=user.id)
assert getattr(member.flags, "sso:linked")
assert not getattr(member.flags, "sso:invalid")
assert not getattr(member.flags, "member-limit:restricted")
def test_flow_as_unauthenticated_existing_matched_user_via_secondary_email(self) -> None:
auth_provider = AuthProvider.objects.create(
organization_id=self.organization.id, provider="dummy"
)
user = self.create_user("foo@example.com")
UserEmail.objects.create(user=user, email="bar@example.com", is_verified=True)
resp = self.client.post(self.path, {"init": True})
assert resp.status_code == 200
assert PLACEHOLDER_TEMPLATE in resp.content.decode("utf-8")
path = reverse("sentry-auth-sso")
resp = self.client.post(path, {"email": user.email})
self.assertTemplateUsed(resp, "sentry/auth-confirm-identity.html")
assert resp.status_code == 200
assert resp.context["existing_user"] == user
assert resp.context["login_form"]
resp = self.client.post(
path, {"op": "login", "username": user.username, "password": "admin"}
)
self.assertTemplateUsed(resp, "sentry/auth-confirm-link.html")
assert resp.status_code == 200
resp = self.client.post(path, {"op": "confirm"}, follow=True)
assert resp.redirect_chain == [
(reverse("sentry-login"), 302),
("/organizations/foo/issues/", 302),
]
auth_identity = AuthIdentity.objects.get(auth_provider=auth_provider)
new_user = auth_identity.user
assert new_user == user
with assume_test_silo_mode(SiloMode.REGION):
member = OrganizationMember.objects.get(organization=self.organization, user_id=user.id)
assert getattr(member.flags, "sso:linked")
assert not getattr(member.flags, "sso:invalid")
assert not getattr(member.flags, "member-limit:restricted")
@mock.patch("sentry.auth.helper.AuthIdentityHandler.warn_about_ambiguous_email")
def test_flow_as_unauthenticated_existing_matched_user_with_ambiguous_email(
self, mock_warning: mock.MagicMock
) -> None:
AuthProvider.objects.create(organization_id=self.organization.id, provider="dummy")
secondary_email = "foo@example.com"
users = {self.create_user() for _ in range(2)}
for user in users:
UserEmail.objects.create(user=user, email=secondary_email, is_verified=True)
resp = self.client.post(self.path, {"init": True})
assert resp.status_code == 200
assert PLACEHOLDER_TEMPLATE in resp.content.decode("utf-8")
path = reverse("sentry-auth-sso")
resp = self.client.post(path, {"email": secondary_email})
assert resp.status_code == 200
assert mock_warning.called
received_email, found_users, chosen_user = mock_warning.call_args.args
assert received_email == secondary_email
assert set(found_users) == users
assert chosen_user in users
def test_flow_as_unauthenticated_existing_unmatched_user_with_merge(self) -> None:
auth_provider = AuthProvider.objects.create(
organization_id=self.organization.id, provider="dummy"
)
user = self.create_user("foo@example.com")
resp = self.client.post(self.path, {"init": True})
assert resp.status_code == 200
assert PLACEHOLDER_TEMPLATE in resp.content.decode("utf-8")
path = reverse("sentry-auth-sso")
resp = self.client.post(path, {"email": "bar@example.com"})
self.assertTemplateUsed(resp, "sentry/auth-confirm-identity.html")
assert resp.status_code == 200
assert not resp.context["existing_user"]
assert resp.context["login_form"]
resp = self.client.post(
path, {"op": "login", "username": user.username, "password": "admin"}
)
self.assertTemplateUsed(resp, "sentry/auth-confirm-link.html")
assert resp.status_code == 200
resp = self.client.post(path, {"op": "confirm"}, follow=True)
assert resp.redirect_chain == [
(reverse("sentry-login"), 302),
("/organizations/foo/issues/", 302),
]
auth_identity = AuthIdentity.objects.get(auth_provider=auth_provider)
new_user = auth_identity.user
assert new_user == user
with assume_test_silo_mode(SiloMode.REGION):
member = OrganizationMember.objects.get(organization=self.organization, user_id=user.id)
assert getattr(member.flags, "sso:linked")
assert not getattr(member.flags, "sso:invalid")
assert not getattr(member.flags, "member-limit:restricted")
def test_flow_as_unauthenticated_existing_matched_user_with_merge_and_existing_identity(
self,
) -> None:
auth_provider = AuthProvider.objects.create(
organization_id=self.organization.id, provider="dummy"
)
user = self.create_user("bar@example.com")
auth_identity = AuthIdentity.objects.create(
auth_provider=auth_provider, user=user, ident="adfadsf@example.com"
)
resp = self.client.post(self.path, {"init": True})
assert resp.status_code == 200
assert PLACEHOLDER_TEMPLATE in resp.content.decode("utf-8")
path = reverse("sentry-auth-sso")
resp = self.client.post(path, {"email": user.email})
self.assertTemplateUsed(resp, "sentry/auth-confirm-identity.html")
assert resp.status_code == 200
assert resp.context["existing_user"] == user
assert resp.context["login_form"]
resp = self.client.post(
path, {"op": "login", "username": user.username, "password": "admin"}
)
self.assertTemplateUsed(resp, "sentry/auth-confirm-link.html")
assert resp.status_code == 200
resp = self.client.post(path, {"op": "confirm"}, follow=True)
assert resp.redirect_chain == [
(reverse("sentry-login"), 302),
("/organizations/foo/issues/", 302),
]
auth_identity = AuthIdentity.objects.get(id=auth_identity.id)
assert auth_identity.ident == user.email
new_user = auth_identity.user
assert new_user == user
with assume_test_silo_mode(SiloMode.REGION):
member = OrganizationMember.objects.get(organization=self.organization, user_id=user.id)
assert getattr(member.flags, "sso:linked")
assert not getattr(member.flags, "sso:invalid")
assert not getattr(member.flags, "member-limit:restricted")
def test_flow_as_unauthenticated_existing_inactive_user_with_merge_and_existing_identity(
self,
) -> None:
"""
Given an unauthenticated user, and an existing, inactive user account
with a linked identity, this should claim that identity and create
a new user account.
"""
auth_provider = AuthProvider.objects.create(
organization_id=self.organization.id, provider="dummy"
)
user = self.create_user("bar@example.com", is_active=False)
auth_identity = AuthIdentity.objects.create(
auth_provider=auth_provider, user_id=user.id, ident="adfadsf@example.com"
)
resp = self.client.post(self.path, {"init": True})
assert resp.status_code == 200
assert PLACEHOLDER_TEMPLATE in resp.content.decode("utf-8")
path = reverse("sentry-auth-sso")
resp = self.client.post(path, {"email": "adfadsf@example.com"})
self.assertTemplateUsed(resp, "sentry/auth-confirm-identity.html")
assert resp.status_code == 200
assert not resp.context["existing_user"]
assert resp.context["login_form"]
frontend_events = {"event_name": "Sign Up", "event_label": "dummy"}
marketing_query = urlencode({"frontend_events": json.dumps(frontend_events)})
resp = self.client.post(path, {"op": "newuser"}, follow=True)
assert resp.redirect_chain == [
(reverse("sentry-login") + f"?{marketing_query}", 302),
("/organizations/foo/issues/", 302),
]
auth_identity = AuthIdentity.objects.get(id=auth_identity.id)
assert auth_identity.ident == "adfadsf@example.com"
new_user = auth_identity.user
assert new_user.id != user.id
with assume_test_silo_mode(SiloMode.REGION):
member = OrganizationMember.objects.get(
organization=self.organization, user_id=new_user.id
)
assert getattr(member.flags, "sso:linked")
assert not getattr(member.flags, "sso:invalid")
assert not getattr(member.flags, "member-limit:restricted")
def test_flow_duplicate_users_with_membership_and_verified(self) -> None:
"""
Given an existing authenticated user, and an updated identity (e.g.
the ident changed from the SSO provider), we should be re-linking
the identity automatically (without prompt) assuming the user is
a member of the org.
This only works when the email is mapped to an identical identity.
"""
auth_provider = AuthProvider.objects.create(
organization_id=self.organization.id, provider="dummy"
)
# setup a 'previous' identity, such as when we migrated Google from
# the old idents to the new
user = self.create_user("bar@example.com", is_managed=True, is_active=False)
auth_identity = AuthIdentity.objects.create(
auth_provider=auth_provider, user=user, ident="bar@example.com"
)
# they must be a member for the auto merge to happen
self.create_member(organization=self.organization, user_id=user.id)
# user needs to be logged in
self.login_as(user)
resp = self.client.post(self.path, {"init": True})
assert resp.status_code == 200
assert PLACEHOLDER_TEMPLATE in resp.content.decode("utf-8")
path = reverse("sentry-auth-sso")
# we're suggesting the identity changed (as if the Google ident was
# updated to be something else)
resp = self.client.post(
path, {"email": "bar@example.com", "id": "123", "email_verified": "1"}, follow=True
)
assert resp.redirect_chain == [
(reverse("sentry-login"), 302),
("/organizations/foo/issues/", 302),
("/auth/login/foo/", 302),
]
# there should be no prompt as we auto merge the identity
auth_identity = AuthIdentity.objects.get(id=auth_identity.id)
assert auth_identity.ident == "123"
new_user = auth_identity.user
assert new_user == user
with assume_test_silo_mode(SiloMode.REGION):
member = OrganizationMember.objects.get(
organization=self.organization, user_id=new_user.id
)
assert getattr(member.flags, "sso:linked")
assert not getattr(member.flags, "sso:invalid")
assert not getattr(member.flags, "member-limit:restricted")
def test_flow_duplicate_users_without_verified(self) -> None:
"""
Given an existing authenticated user, and an updated identity (e.g.
the ident changed from the SSO provider), we should be re-linking
the identity automatically (without prompt) assuming the user is
a member of the org.
"""
auth_provider = AuthProvider.objects.create(
organization_id=self.organization.id, provider="dummy"
)
# setup a 'previous' identity, such as when we migrated Google from
# the old idents to the new
user = self.create_user("bar@example.com", is_managed=True)
AuthIdentity.objects.create(auth_provider=auth_provider, user=user, ident="bar@example.com")
# they must be a member for the auto merge to happen
self.create_member(organization=self.organization, user_id=user.id)
# user needs to be logged in
self.login_as(user)
resp = self.client.post(self.path, {"init": True})
assert resp.status_code == 200
assert PLACEHOLDER_TEMPLATE in resp.content.decode("utf-8")
path = reverse("sentry-auth-sso")
# we're suggesting the identity changed (as if the Google ident was
# updated to be something else)
resp = self.client.post(path, {"email": "adfadsf@example.com"})
# there should be no prompt as we auto merge the identity
assert resp.status_code == 200
def test_flow_authenticated_without_verified_without_password(self) -> None:
"""
Given an existing authenticated user, and an updated identity (e.g.
the ident changed from the SSO provider), we should be re-linking
the identity automatically as they don't have a password.
This is specifically testing an unauthenticated flow.
"""
AuthProvider.objects.create(organization_id=self.organization.id, provider="dummy")
# setup a 'previous' identity, such as when we migrated Google from
# the old idents to the new
user = self.create_user("bar@example.com", is_managed=False, password="")
assert not user.has_usable_password()
UserEmail.objects.filter(user=user, email="bar@example.com").update(is_verified=False)
self.create_member(organization=self.organization, user_id=user.id)
resp = self.client.post(self.path, {"init": True})
assert resp.status_code == 200
assert PLACEHOLDER_TEMPLATE in resp.content.decode("utf-8")
path = reverse("sentry-auth-sso")
resp = self.client.post(path, {"email": "bar@example.com"})
self.assertTemplateUsed(resp, "sentry/auth-confirm-account.html")
assert resp.status_code == 200
assert resp.context["existing_user"] == user
def test_flow_managed_duplicate_users_without_membership(self) -> None:
"""
Given an existing authenticated user, and an updated identity (e.g.
the ident changed from the SSO provider), we should be prompting to
confirm their identity as they don't have membership.
"""
auth_provider = AuthProvider.objects.create(
organization_id=self.organization.id, provider="dummy"
)
# setup a 'previous' identity, such as when we migrated Google from
# the old idents to the new
user = self.create_user("bar@example.com", is_managed=True)
AuthIdentity.objects.create(auth_provider=auth_provider, user=user, ident="bar@example.com")
# user needs to be logged in
self.login_as(user)
resp = self.client.post(self.path, {"init": True})
assert resp.status_code == 200
assert PLACEHOLDER_TEMPLATE in resp.content.decode("utf-8")
path = reverse("sentry-auth-sso")
# we're suggesting the identity changed (as if the Google ident was
# updated to be something else)
resp = self.client.post(path, {"email": "adfadsf@example.com", "email_verified": "1"})
self.assertTemplateUsed(resp, "sentry/auth-confirm-link.html")
assert resp.status_code == 200
assert resp.context["existing_user"].id == user.id
def test_swapped_identities(self) -> None:
"""
Given two existing user accounts with mismatched identities, such as:
- foo SSO'd as bar@example.com
- bar SSO'd as foo@example.com
If bar is authenticating via SSO as bar@example.com, we should remove
the existing entry attached to bar, and re-bind the entry owned by foo.
"""
auth_provider = AuthProvider.objects.create(
organization_id=self.organization.id, provider="dummy"
)
# setup a 'previous' identity, such as when we migrated Google from
# the old idents to the new
user = self.create_user("bar@example.com", is_managed=True, is_active=False)
identity1 = AuthIdentity.objects.create(
auth_provider=auth_provider, user=user, ident="bar@example.com"
)
# create another identity which is used, but not by the authenticating
# user
user2 = self.create_user("adfadsf@example.com", is_managed=True, is_active=False)
identity2 = AuthIdentity.objects.create(
auth_provider=auth_provider, user=user2, ident="adfadsf@example.com"
)
member2 = self.create_member(user_id=user2.id, organization=self.organization)
# user needs to be logged in
self.login_as(user)
resp = self.client.post(self.path, {"init": True})
assert resp.status_code == 200
assert PLACEHOLDER_TEMPLATE in resp.content.decode("utf-8")
path = reverse("sentry-auth-sso")
# we're suggesting the identity changed (as if the Google ident was
# updated to be something else)
resp = self.client.post(path, {"email": "adfadsf@example.com"}, follow=True)
assert resp.redirect_chain == [
(reverse("sentry-login"), 302),
("/organizations/foo/issues/", 302),
("/auth/login/foo/", 302),
]
assert not AuthIdentity.objects.filter(id=identity1.id).exists()
identity2 = AuthIdentity.objects.get(id=identity2.id)
assert identity2.ident == "adfadsf@example.com"
assert identity2.user == user
with assume_test_silo_mode(SiloMode.REGION):
member1 = OrganizationMember.objects.get(
user_id=user.id, organization=self.organization
)
assert getattr(member1.flags, "sso:linked")
assert not getattr(member1.flags, "sso:invalid")
assert not getattr(member1.flags, "member-limit:restricted")
with assume_test_silo_mode(SiloMode.REGION):
member2 = OrganizationMember.objects.get(id=member2.id)
assert not getattr(member2.flags, "sso:linked")
assert getattr(member2.flags, "sso:invalid")
assert not getattr(member2.flags, "member-limit:restricted")
def test_flow_as_unauthenticated_existing_user_legacy_identity_migration(self) -> None:
user = self.create_user("bar@example.com")
auth_provider = AuthProvider.objects.create(
organization_id=self.organization.id, provider="dummy"
)
user_ident = AuthIdentity.objects.create(
auth_provider=auth_provider, user=user, ident="foo@example.com"
)
resp = self.client.post(self.path, {"init": True})
assert resp.status_code == 200
assert PLACEHOLDER_TEMPLATE in resp.content.decode("utf-8")
path = reverse("sentry-auth-sso")
resp = self.client.post(
path, {"email": "foo@new-domain.com", "legacy_email": "foo@example.com"}, follow=True
)
assert resp.redirect_chain == [
(reverse("sentry-login"), 302),
("/organizations/foo/issues/", 302),
]
# Ensure the ident was migrated from the legacy identity
updated_ident = AuthIdentity.objects.get(id=user_ident.id)
assert updated_ident.ident == "foo@new-domain.com"
def test_flow_as_authenticated_user_with_invite_joining(self) -> None:
auth_provider = AuthProvider.objects.create(
organization_id=self.organization.id, provider="dummy"
)
user = self.create_user("bar@example.com")
member = self.create_member(email="bar@example.com", organization=self.organization)
with assume_test_silo_mode(SiloMode.REGION):
member.user_id = None
member.save()
self.login_as(user)
resp = self.client.post(self.path, {"init": True})
assert resp.status_code == 200
assert PLACEHOLDER_TEMPLATE in resp.content.decode("utf-8")
path = reverse("sentry-auth-sso")
resp = self.client.post(path, {"email": "bar@example.com"})
self.assertTemplateUsed(resp, "sentry/auth-confirm-link.html")
assert resp.status_code == 200
resp = self.client.post(path, {"op": "confirm"}, follow=True)
assert resp.redirect_chain == [
(reverse("sentry-login"), 302),
("/organizations/foo/issues/", 302),
]
auth_identity = AuthIdentity.objects.get(auth_provider=auth_provider)
assert user == auth_identity.user
with assume_test_silo_mode(SiloMode.REGION):
test_member = OrganizationMember.objects.get(
organization=self.organization, user_id=user.id
)
assert member.id == test_member.id
assert getattr(test_member.flags, "sso:linked")
assert not getattr(test_member.flags, "sso:invalid")
assert not getattr(test_member.flags, "member-limit:restricted")
@override_settings(SENTRY_SINGLE_ORGANIZATION=True)
@with_feature({"organizations:create": False})
def test_basic_auth_flow_as_not_invited_user(self) -> None:
user = self.create_user("foor@example.com")
self.session["_next"] = reverse(
"sentry-organization-settings", args=[self.organization.slug]
)
self.save_session()
resp = self.client.post(
self.path, {"username": user, "password": "admin", "op": "login"}, follow=True
)
assert resp.redirect_chain == [("/auth/login/", 302)]
assert resp.status_code == 403
self.assertTemplateUsed(resp, "sentry/no-organization-access.html")
def test_basic_auth_flow_as_not_invited_user_not_single_org_mode(self) -> None:
user = self.create_user("u2@example.com")
resp = self.client.post(
self.path, {"username": user, "password": "admin", "op": "login"}, follow=True
)
assert resp.redirect_chain == [("/auth/login/", 302), ("/organizations/new/", 302)]
@override_settings(SENTRY_SINGLE_ORGANIZATION=True)
@with_feature({"organizations:create": False})
def test_basic_auth_flow_as_user_with_confirmed_membership(self) -> None:
user = self.create_user("foor@example.com")
self.create_member(organization=self.organization, user_id=user.id)
self.session["_next"] = reverse(
"sentry-organization-settings", args=[self.organization.slug]
)
self.save_session()
resp = self.client.post(
self.path, {"username": user.username, "password": "admin", "op": "login"}, follow=True
)
assert resp.redirect_chain == [
(reverse("sentry-organization-settings", args=[self.organization.slug]), 302),
]
@override_settings(SENTRY_SINGLE_ORGANIZATION=True)
@with_feature({"organizations:create": False})
def test_flow_as_user_without_any_membership(self) -> None:
# not sure how this could happen on Single Org Mode
user = self.create_user("foor@example.com")
resp = self.client.post(
self.path, {"username": user, "password": "admin", "op": "login"}, follow=True
)
assert resp.redirect_chain == [("/auth/login/", 302)]
assert resp.status_code == 403
self.assertTemplateUsed(resp, "sentry/no-organization-access.html")
def test_multiorg_login_correct_redirect_basic_auth(self) -> None:
user = self.create_user("bar@example.com")
user.update(is_superuser=False)
org1 = self.create_organization(name="bar", owner=user)
path = reverse("sentry-auth-organization", args=[org1.slug])
# create a second org that the user belongs to, ensure they are redirected to correct
self.create_organization(name="zap", owner=user)
self.client.get(path)
resp = self.client.post(
path,
{"username": user.username, "password": "admin", "op": "login"},
follow=True,
)
assert resp.redirect_chain == [
(reverse("sentry-login"), 302),
(f"/organizations/{org1.slug}/issues/", 302),
]
def test_multiorg_login_correct_redirect_sso(self) -> None:
user = self.create_user("bar@example.com")
user.update(is_superuser=False)
org1 = self.create_organization(name="bar", owner=user)
path = reverse("sentry-auth-organization", args=[org1.slug])
# create a second org that the user belongs to, ensure they are redirected to correct
self.create_organization(name="zap", owner=user)
auth_provider = AuthProvider.objects.create(organization_id=org1.id, provider="dummy")
AuthIdentity.objects.create(auth_provider=auth_provider, user=user, ident="foo@example.com")
resp = self.client.post(path, {"init": True})
path = reverse("sentry-auth-sso")
resp = self.client.post(path, {"email": "foo@example.com"}, follow=True)
assert resp.redirect_chain == [
(reverse("sentry-login"), 302),
(f"/organizations/{org1.slug}/issues/", 302),
]
@override_settings(SENTRY_SINGLE_ORGANIZATION=True)
@with_feature({"organizations:create": False})
def test_correct_redirect_as_2fa_user_single_org_invited(self) -> None:
user = self.create_user("foor@example.com")
RecoveryCodeInterface().enroll(user)
TotpInterface().enroll(user)
self.create_member(organization=self.organization, user_id=user.id)
with assume_test_silo_mode(SiloMode.REGION):
member = OrganizationMember.objects.get(organization=self.organization, user_id=user.id)
member.email = "foor@example.com"
member.user_id = None
member.save()
resp = self.client.post(
self.path, {"username": user, "password": "admin", "op": "login"}, follow=True
)
# Users with 2FA should be redirected to 2FA dialog first, even with pending invites
assert resp.redirect_chain == [("/auth/2fa/", 302)]
def test_correct_redirect_as_2fa_user_invited(self) -> None:
user = self.create_user("foor@example.com")
RecoveryCodeInterface().enroll(user)
TotpInterface().enroll(user)
self.create_member(organization=self.organization, user_id=user.id)
with assume_test_silo_mode(SiloMode.REGION):
member = OrganizationMember.objects.get(organization=self.organization, user_id=user.id)
member.email = "foor@example.com"
member.user_id = None
member.save()
resp = self.client.post(
self.path, {"username": user, "password": "admin", "op": "login"}, follow=True
)
# Users with 2FA should be redirected to 2FA dialog first, even with pending invites
assert resp.redirect_chain == [("/auth/2fa/", 302)]
@override_settings(SENTRY_SINGLE_ORGANIZATION=True)
@with_feature({"organizations:create": False})
def test_correct_redirect_as_2fa_user_single_org_no_membership(self) -> None:
user = self.create_user("foor@example.com")
RecoveryCodeInterface().enroll(user)
TotpInterface().enroll(user)
resp = self.client.post(
self.path, {"username": user, "password": "admin", "op": "login"}, follow=True
)
assert resp.redirect_chain == [("/auth/2fa/", 302)]
def test_correct_redirect_as_2fa_user_no_membership(self) -> None:
user = self.create_user("foor@example.com")
RecoveryCodeInterface().enroll(user)
TotpInterface().enroll(user)
resp = self.client.post(
self.path, {"username": user, "password": "admin", "op": "login"}, follow=True
)
assert resp.redirect_chain == [("/auth/2fa/", 302)]
@override_settings(SENTRY_SINGLE_ORGANIZATION=True)
@with_feature({"organizations:create": False})
def test_correct_redirect_as_2fa_user_single_org_member(self) -> None:
user = self.create_user("foor@example.com")
RecoveryCodeInterface().enroll(user)
TotpInterface().enroll(user)
self.create_member(organization=self.organization, user_id=user.id)
resp = self.client.post(
self.path, {"username": user, "password": "admin", "op": "login"}, follow=True
)
assert resp.redirect_chain == [("/auth/2fa/", 302)]
def test_correct_redirect_as_2fa_user_invited_member(self) -> None:
user = self.create_user("foor@example.com")
RecoveryCodeInterface().enroll(user)
TotpInterface().enroll(user)
self.create_member(organization=self.organization, user_id=user.id)
resp = self.client.post(
self.path, {"username": user, "password": "admin", "op": "login"}, follow=True
)
assert resp.redirect_chain == [("/auth/2fa/", 302)]
def test_anonymous_user_with_automatic_migration(self) -> None:
AuthProvider.objects.create(organization_id=self.organization.id, provider="dummy")
resp = self.client.post(self.path, {"init": True})
assert resp.status_code == 200
path = reverse("sentry-auth-sso")
# Check that we don't call send_one_time_account_confirm_link with an AnonymousUser
resp = self.client.post(path, {"email": "foo@example.com"})
assert resp.status_code == 200
def test_org_not_visible(self) -> None:
with assume_test_silo_mode(SiloMode.REGION):
self.organization.update(status=OrganizationStatus.DELETION_IN_PROGRESS)
resp = self.client.get(self.path, follow=True)
assert resp.status_code == 200
assert resp.redirect_chain == [("/auth/login/", 302)]
self.assertTemplateUsed(resp, "sentry/login.html")
@control_silo_test
|
OrganizationAuthLoginTest
|
python
|
ansible__ansible
|
test/lib/ansible_test/_internal/cli/parsers/__init__.py
|
{
"start": 8821,
"end": 9015
}
|
class ____(PythonTargetParser):
"""Composite argument parser for a sanity Python target."""
def __init__(self) -> None:
super().__init__(allow_venv=False)
|
SanityPythonTargetParser
|
python
|
spack__spack
|
lib/spack/spack/vendor/jinja2/nodes.py
|
{
"start": 11993,
"end": 12133
}
|
class ____(Stmt):
"""Node for filter sections."""
fields = ("body", "filter")
body: t.List[Node]
filter: "Filter"
|
FilterBlock
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 987464,
"end": 987629
}
|
class ____(sgqlc.types.Type, GitSignature):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ()
|
SmimeSignature
|
python
|
kamyu104__LeetCode-Solutions
|
Python/reaching-points.py
|
{
"start": 42,
"end": 503
}
|
class ____(object):
def reachingPoints(self, sx, sy, tx, ty):
"""
:type sx: int
:type sy: int
:type tx: int
:type ty: int
:rtype: bool
"""
while tx >= sx and ty >= sy:
if tx < ty:
sx, sy = sy, sx
tx, ty = ty, tx
if ty > sy:
tx %= ty
else:
return (tx - sx) % ty == 0
return False
|
Solution
|
python
|
realpython__materials
|
python-maze-solver/source_code_final/src/maze_solver/view/primitives.py
|
{
"start": 232,
"end": 451
}
|
class ____(NamedTuple):
x: int
y: int
def draw(self, **attributes) -> str:
return f"{self.x},{self.y}"
def translate(self, x=0, y=0) -> "Point":
return Point(self.x + x, self.y + y)
|
Point
|
python
|
Textualize__textual
|
tests/css/test_nested_css.py
|
{
"start": 1267,
"end": 2008
}
|
class ____(App[None]):
CSS = """
Label {
&.foo, &.bar {
background: red;
}
}
"""
def compose(self) -> ComposeResult:
yield Label("one", classes="foo")
yield Label("two", classes="bar")
yield Label("three", classes="heh")
async def test_lists_of_selectors_in_nested_css() -> None:
"""Regression test for https://github.com/Textualize/textual/issues/3969."""
app = ListOfNestedSelectorsApp()
red = Color.parse("red")
async with app.run_test():
assert app.query_one(".foo").styles.background == red
assert app.query_one(".bar").styles.background == red
assert app.query_one(".heh").styles.background != red
|
ListOfNestedSelectorsApp
|
python
|
pytorch__pytorch
|
torch/_inductor/template_heuristics/decompose_k.py
|
{
"start": 608,
"end": 1265
}
|
class ____(TemplateConfigHeuristics):
"""empty heuristics to skip decompose k on anything not cuda"""
# on CUDA, we don't support hip for decompose_k yet
@register_template_heuristic(
decompose_k_subgraph_template.uid,
"cuda",
register=torch.version.hip is None,
op_name="mm",
)
# TODO(coconutruben): enable decompose k on AMD by removing the register bool
# and benchmarking it for performance and stability
# TODO(coconutruben): enable decompose k on other devices (xpu, cpu, mps, mtia)
# by either adding specific register_template_heuristic tags, or setting the
# device to None (enabled on all devices)
|
EmptyDecomposeKConfigHeuristics
|
python
|
mlflow__mlflow
|
mlflow/gateway/base_models.py
|
{
"start": 57,
"end": 340
}
|
class ____(
BaseModel,
# Allow extra fields for pydantic request models, e.g. to support
# vendor-specific embeddings parameters
extra="allow",
):
"""
A pydantic model representing Gateway request data, such as a chat or completions request
"""
|
RequestModel
|
python
|
plotly__plotly.py
|
plotly/graph_objs/isosurface/colorbar/_title.py
|
{
"start": 233,
"end": 3992
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "isosurface.colorbar"
_path_str = "isosurface.colorbar.title"
_valid_props = {"font", "side", "text"}
@property
def font(self):
"""
Sets this color bar's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.isosurface.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.isosurface.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Defaults to "top" when `orientation` if "v" and
defaults to "right" when `orientation` if "h".
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
@property
def text(self):
"""
Sets the title of the color bar.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.isosurface.colorbar.Title`
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
Returns
-------
Title
"""
super().__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.isosurface.colorbar.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.isosurface.colorbar.Title`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("side", arg, side)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Title
|
python
|
fluentpython__example-code-2e
|
24-class-metaprog/hours/hours.py
|
{
"start": 1278,
"end": 2559
}
|
class ____:
h: int
_m: int
_s: float
def __class_getitem__(cls, parts: Union[slice, float]) -> 'Hours':
if isinstance(parts, slice):
h = parts.start or 0
m = valid_base_60(parts.stop or 0, 'minutes')
s = valid_base_60(parts.step or 0, 'seconds')
else:
h, m, s = normalize(parts * 3600)
return Hours(h, m, s)
def __init__(self, h: float = 0, m: float = 0, s: float = 0):
if h < 0 or m < 0 or s < 0:
raise ValueError('invalid negative argument')
self.h, self.m, self.s = normalize(h * 3600 + m * 60 + s)
def __repr__(self):
h, m, s = self
display_s = f'{s:06.3f}'
display_s = display_s.rstrip('0').rstrip('.')
if display_s == '00':
return f'{h}:{m:02d}'
return f'{h}:{m:02d}:{display_s}'
def __float__(self):
return self.h + self.m / 60 + self.s / 3600
def __eq__(self, other):
return repr(self) == repr(other)
def __iter__(self):
yield self.h
yield self.m
yield self.s
def __add__(self, other):
if not isinstance(other, Hours):
return NotImplemented
return Hours(*(a + b for a, b in zip(self, other)))
H = Hours
|
Hours
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/unnecessaryIsInstance1.py
|
{
"start": 182,
"end": 873
}
|
class ____(CustomClass1):
pass
def func1(p1: int, p2: int | str):
a = isinstance(p2, str)
b = isinstance(p2, (int, float))
# This should generate an error because this is always true.
c = isinstance(p2, (float, dict, int, str))
d = isinstance(p1, float)
e = isinstance(p2, (float, dict, int))
# This should generate an error because this is always true.
f = isinstance(p1, int)
# This should not generate an error because it's within an assert.
assert isinstance(p1, int)
g = CustomClass2()
# This should not generate an error because CustomClass2
# derives from an unknown type.
g = isinstance(g, CustomClass1)
|
CustomClass2
|
python
|
pytorch__pytorch
|
test/mobile/model_test/nn_ops.py
|
{
"start": 9096,
"end": 9565
}
|
class ____(torch.nn.Module):
def forward(self):
input = torch.tensor([[1, 2, 4, 5], [4, 3, 2, 9]])
input2 = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
embedding_matrix = torch.rand(10, 3)
offsets = torch.tensor([0, 4])
return len(
F.embedding(input, embedding_matrix),
F.embedding_bag(input2, embedding_matrix, offsets),
F.one_hot(torch.arange(0, 5) % 3, num_classes=5),
)
|
NNSparseModule
|
python
|
lxml__lxml
|
src/lxml/html/__init__.py
|
{
"start": 40005,
"end": 40747
}
|
class ____(MutableMapping):
def __init__(self, inputs):
self.inputs = inputs
def __getitem__(self, item):
return self.inputs[item].value
def __setitem__(self, item, value):
self.inputs[item].value = value
def __delitem__(self, item):
raise KeyError(
"You cannot remove keys from ElementDict")
def keys(self):
return self.inputs.keys()
def __contains__(self, item):
return item in self.inputs
def __iter__(self):
return iter(self.inputs.keys())
def __len__(self):
return len(self.inputs)
def __repr__(self):
return '<%s for form %s>' % (
self.__class__.__name__,
self.inputs.form._name())
|
FieldsDict
|
python
|
kennethreitz__tablib
|
src/tablib/formats/__init__.py
|
{
"start": 1962,
"end": 2288
}
|
class ____(FormatDescriptorBase):
def __get__(self, obj, cls, **kwargs):
self.ensure_format_loaded()
return self._format.export_set(obj, **kwargs)
def __set__(self, obj, val):
self.ensure_format_loaded()
return self._format.import_set(obj, normalize_input(val))
|
ImportExportSetDescriptor
|
python
|
ray-project__ray
|
python/ray/util/multiprocessing/pool.py
|
{
"start": 5169,
"end": 5276
}
|
class ____(Exception):
def __init__(self, underlying):
self.underlying = underlying
|
PoolTaskError
|
python
|
huggingface__transformers
|
src/transformers/models/olmo3/convert_olmo3_weights_to_hf.py
|
{
"start": 3492,
"end": 18401
}
|
class ____(dist_cp.StorageReader):
"""
A :class:`~torch.distributed.checkpoint.StorageReader` based on :class:`~torch.distributed.checkpoint.FileSystemReader`
that can read data directly from cloud storage as well as a local directory.
"""
def __init__(
self,
path: Path | str,
*,
thread_count: int | None = None,
pre_download: bool = False,
work_dir: Path | str | None = None,
):
super().__init__()
if thread_count is not None and thread_count <= 0:
raise ValueError("thread count must be at least 1")
self.path = normalize_path(path)
self.thread_count = thread_count or 1
self.pre_download = pre_download
self.work_dir = normalize_path(work_dir) if work_dir is not None else None
self.storage_data: dict[MetadataIndex, _StorageInfo] = {}
self.load_id = generate_uuid()
self._metadata: Metadata | None = None
def _get_bytes(self, relative_path: str, offset: int, length: int) -> bytes:
full_path = f"{self.path}/{relative_path}"
return get_bytes_range(full_path, offset, length)
def _get_content_for_read(self, read_item: ReadItem) -> tuple[ReadItem, bytes]:
sinfo = self.storage_data[read_item.storage_index]
content = self._get_bytes(sinfo.relative_path, sinfo.offset, sinfo.length)
return (read_item, content)
def reset(self, checkpoint_id: Path | str | None = None) -> None:
self.storage_data = {}
if checkpoint_id:
self.path = normalize_path(checkpoint_id)
self.load_id = generate_uuid()
def read_data(self, plan: dist_cp.LoadPlan, planner: dist_cp.LoadPlanner) -> Future[None]:
with ThreadPoolExecutor(max_workers=self.thread_count) as executor:
read_item_content_futures = []
for read_item in plan.items:
read_item_content_futures.append(executor.submit(self._get_content_for_read, read_item))
read_item_content_results = []
for f in as_completed(read_item_content_futures):
try:
read_item_content_results.append(f.result())
except BaseException:
# NOTE: we might get an error here that can't be pickled, which causes a different failure
# later when PyTorch tries to reduce that error across ranks. So here we just make
# sure we're raising a simple error type that can be pickled.
raise RuntimeError(f"Original error:\n{traceback.format_exc()}")
# Modified from `FileSystemReader.read_data()`
for read_item, content in read_item_content_results:
bytes = io.BytesIO(content)
bytes.seek(0)
if read_item.type == LoadItemType.BYTE_IO:
planner.load_bytes(read_item, bytes)
else:
# NOTE: 'weights_only=False' needed to load torchao's float8 linear layer checkpoints
tensor = cast(torch.Tensor, torch.load(bytes, map_location="cpu", weights_only=False))
tensor = _narrow_tensor_by_index(tensor, read_item.storage_offsets, read_item.lengths)
target_tensor = planner.resolve_tensor(read_item).detach()
assert target_tensor.size() == tensor.size(), (
f"req {read_item.storage_index} mismatch sizes {target_tensor.size()} vs {tensor.size()}"
)
target_tensor.copy_(tensor)
planner.commit_tensor(read_item, target_tensor)
fut: Future = Future()
fut.set_result(None)
return fut
def read_metadata(self) -> Metadata:
if self._metadata is None:
try:
if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")):
raise ValueError(
"This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially "
"malicious. It's recommended to never unpickle data that could have come from an untrusted source, or "
"that could have been tampered with. If you already verified the pickle data and decided to use it, "
"you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it."
)
with (Path(self.path) / ".metadata").open("rb") as metadata_file:
metadata = pickle.load(metadata_file)
except FileNotFoundError as exc:
msg = f"'{self.path}' is not a distributed checkpoint folder."
suggested_dir = os.path.join(self.path, "model_and_optim")
if Path(os.path.join(suggested_dir, ".metadata")).exists():
msg += f" Did you mean to use '{suggested_dir}'?"
raise FileNotFoundError(msg) from exc
if getattr(metadata, "storage_meta", None) is None:
metadata.storage_meta = StorageMeta()
metadata.storage_meta.load_id = self.load_id
self._metadata = metadata
return self._metadata
def set_up_storage_reader(self, metadata: Metadata, is_coordinator: bool) -> None:
del is_coordinator
self.storage_data = metadata.storage_data
assert self.storage_data is not None
def prepare_local_plan(self, plan: dist_cp.LoadPlan) -> dist_cp.LoadPlan:
return plan
def prepare_global_plan(self, global_plan: list[dist_cp.LoadPlan]) -> list[dist_cp.LoadPlan]:
return global_plan
@property
def checkpoint_id(self) -> str:
return self.path
@classmethod
def validate_checkpoint_id(cls, checkpoint_id: Path | str) -> bool:
del checkpoint_id
return True
def load_model(model_path: str):
def _load_unsharded_keys(
dir: Path | str,
keys: list[str],
*,
pre_download: bool = False,
work_dir: Path | str | None = None,
) -> dict[str, Any]:
from torch.distributed.checkpoint.default_planner import _EmptyStateDictLoadPlanner
from torch.distributed.checkpoint.state_dict_loader import _load_state_dict
state_dict: dict[str, Any] = {}
_load_state_dict(
state_dict,
storage_reader=RemoteFileSystemReader(dir, pre_download=pre_download, work_dir=work_dir),
planner=_EmptyStateDictLoadPlanner(keys=keys),
no_dist=True,
)
return state_dict
if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")):
raise ValueError(
"This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially "
"malicious. It's recommended to never unpickle data that could have come from an untrusted source, or "
"that could have been tampered with. If you already verified the pickle data and decided to use it, "
"you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it."
)
with (Path(model_path) / ".metadata").open("rb") as metadata_file:
metadata = pickle.load(metadata_file)
keys = [key for key in metadata.state_dict_metadata.keys() if key.startswith("model.")]
# keys = ["model.blocks.0.attention.w_q.weight"]
return _load_unsharded_keys(
model_path,
keys,
# model_path, ["model.blocks.0.attention.w_q.weight", "model.blocks.0.attention.w_k.weight"]
)
def write_model(
model_path,
input_base_path,
include_tokenizer=True,
tokenizer_id=None,
safe_serialization=True,
tmp_cleanup=True,
):
os.makedirs(model_path, exist_ok=True)
tmp_model_path = os.path.join(model_path, "tmp")
os.makedirs(tmp_model_path, exist_ok=True)
config_path = Path(input_base_path) / "config.json"
olmo3_config = json.loads(config_path.read_text())
model_config = olmo3_config["model"]
block_config = model_config["block"]
attention_config = block_config["attention"]
tokenizer_config = olmo3_config["dataset"]["tokenizer"]
n_layers = model_config["n_layers"]
n_heads = attention_config["n_heads"]
dim = model_config["d_model"]
dims_per_head = dim // n_heads
base = attention_config["rope"]["theta"]
inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head))
max_position_embeddings = olmo3_config["train_module"]["max_sequence_length"]
if attention_config.get("n_kv_heads", None) is not None:
num_key_value_heads = model_config["n_kv_heads"] # for GQA / MQA
else:
num_key_value_heads = n_heads
print(f"Fetching all parameters from the checkpoint at {input_base_path}.")
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
loaded = load_model(os.path.join(input_base_path, "model_and_optim"))["model"]
print(loaded.keys())
# loaded = torch.load(os.path.join(input_base_path, "model.pt"), map_location="cpu", weights_only=True)
param_count = 0
index_dict: dict[str, Any] = {"weight_map": {}}
for layer_i in range(n_layers):
filename = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
# Unsharded
state_dict = {
f"model.layers.{layer_i}.self_attn.q_proj.weight": loaded[f"blocks.{layer_i}.attention.w_q.weight"],
f"model.layers.{layer_i}.self_attn.k_proj.weight": loaded[f"blocks.{layer_i}.attention.w_k.weight"],
f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"blocks.{layer_i}.attention.w_v.weight"],
f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"blocks.{layer_i}.attention.w_out.weight"],
f"model.layers.{layer_i}.self_attn.q_norm.weight": loaded[f"blocks.{layer_i}.attention.q_norm.weight"],
f"model.layers.{layer_i}.self_attn.k_norm.weight": loaded[f"blocks.{layer_i}.attention.k_norm.weight"],
f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"blocks.{layer_i}.feed_forward.w1.weight"],
f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"blocks.{layer_i}.feed_forward.w2.weight"],
f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"blocks.{layer_i}.feed_forward.w3.weight"],
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[
f"blocks.{layer_i}.attention_norm.weight"
],
f"model.layers.{layer_i}.post_feedforward_layernorm.weight": loaded[
f"blocks.{layer_i}.feed_forward_norm.weight"
],
}
state_dict[f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq"] = inv_freq
for k, v in state_dict.items():
index_dict["weight_map"][k] = filename
param_count += v.numel()
torch.save(state_dict, os.path.join(tmp_model_path, filename))
filename = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
# Unsharded
# TODO: Deal with weight-tying
state_dict = {
"model.embed_tokens.weight": loaded["embeddings.weight"],
"model.norm.weight": loaded["lm_head.norm.weight"],
"lm_head.weight": loaded["lm_head.w_out.weight"],
}
for k, v in state_dict.items():
index_dict["weight_map"][k] = filename
param_count += v.numel()
torch.save(state_dict, os.path.join(tmp_model_path, filename))
# Write configs
index_dict["metadata"] = {"total_size": param_count * 2}
write_json(index_dict, os.path.join(tmp_model_path, "pytorch_model.bin.index.json"))
config = Olmo3Config(
vocab_size=model_config["vocab_size"],
hidden_size=dim,
intermediate_size=block_config["feed_forward"]["hidden_size"],
num_hidden_layers=n_layers,
num_attention_heads=n_heads,
num_key_value_heads=num_key_value_heads,
max_position_embeddings=max_position_embeddings,
pad_token_id=tokenizer_config["pad_token_id"],
bos_token_id=None,
eos_token_id=tokenizer_config["eos_token_id"],
tie_word_embeddings=False,
rms_norm_eps=block_config["layer_norm"]["eps"],
rope_theta=base,
)
config.save_pretrained(tmp_model_path)
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
if include_tokenizer:
tokenizer_id = tokenizer_id or tokenizer_config["identifier"]
_write_tokenizer(model_path, tokenizer_id)
print("Loading the checkpoint in a Olmo 3 model.")
model = Olmo3ForCausalLM.from_pretrained(tmp_model_path, dtype=torch.bfloat16)
print("Resizing token embeddings to match tokenizer config.")
model.resize_token_embeddings(tokenizer_config["vocab_size"])
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format.")
model.save_pretrained(model_path, safe_serialization=safe_serialization)
if tmp_cleanup:
# Make cleanup optional; attempting to `rmtree` the `tmp_model_path` causes
# errors if using NFS.
shutil.rmtree(tmp_model_path)
def _write_tokenizer(
output_path: Path,
tokenizer_id: str,
) -> None:
print(f"Saving a tokenizer to {output_path}.")
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
tokenizer.save_pretrained(output_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_dir",
required=True,
help="Location of Olmo 3 weights, which contains config.yaml and model.pt.",
)
parser.add_argument(
"--no_tokenizer",
action="store_false",
dest="include_tokenizer",
help="If set, do not convert OLMo tokenizer to HF tokenizer.",
)
parser.add_argument(
"--tokenizer",
type=Path,
default=None,
help="Location of Olmo 3 tokenizer json file. Defaults to what is set in the config file.",
)
parser.add_argument(
"--output_dir",
required=True,
help="Location to write HF model and tokenizer",
)
parser.add_argument(
"--no_tmp_cleanup",
action="store_false",
dest="tmp_cleanup",
help="If passed, don't remove temp dir at end of HF conversion.",
)
parser.add_argument(
"--no_safe_serialization",
action="store_false",
dest="safe_serialization",
help="Whether or not to save using `safetensors`.",
)
args = parser.parse_args()
write_model(
model_path=args.output_dir,
input_base_path=args.input_dir,
safe_serialization=args.safe_serialization,
include_tokenizer=args.include_tokenizer,
tokenizer_id=args.tokenizer,
tmp_cleanup=args.tmp_cleanup,
)
if __name__ == "__main__":
main()
|
RemoteFileSystemReader
|
python
|
huggingface__transformers
|
tests/models/siglip2/test_modeling_siglip2.py
|
{
"start": 23979,
"end": 25078
}
|
class ____(Siglip2ModelTester):
def __init__(self, parent):
super().__init__(parent)
self.batch_size = self.vision_model_tester.batch_size
self.num_hidden_layers = self.vision_model_tester.num_hidden_layers
self.hidden_size = self.vision_model_tester.hidden_size
self.seq_length = self.vision_model_tester.seq_length
def prepare_config_and_inputs(self):
_, pixel_values, pixel_attention_mask, spatial_shapes = self.vision_model_tester.prepare_config_and_inputs()
config = self.get_config()
return config, pixel_values, pixel_attention_mask, spatial_shapes
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, pixel_attention_mask, spatial_shapes = config_and_inputs
inputs_dict = {
"pixel_values": pixel_values,
"pixel_attention_mask": pixel_attention_mask,
"spatial_shapes": spatial_shapes,
}
return config, inputs_dict
@require_torch
|
Siglip2ForImageClassificationModelTester
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/utils/dataproc.py
|
{
"start": 845,
"end": 960
}
|
class ____(Enum):
"""Contains types of long running operations."""
DIAGNOSE = "DIAGNOSE"
|
DataprocOperationType
|
python
|
pypa__warehouse
|
tests/unit/admin/views/test_users.py
|
{
"start": 48168,
"end": 57353
}
|
class ____:
def test_user_recover_account_complete(self, db_request, monkeypatch):
user = UserFactory.create(
totp_secret=b"aaaaabbbbbcccccddddd",
webauthn=[
WebAuthn(
label="fake", credential_id="fake", public_key="extremely fake"
)
],
recovery_codes=[
RecoveryCode(code="fake"),
],
)
assert user.totp_secret is not None
assert len(user.webauthn) == 1
assert len(user.recovery_codes.all()) == 1
db_request.matchdict["username"] = str(user.username)
db_request.params = {"username": user.username}
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/foobar")
db_request.user = user
service = pretend.stub(
find_userid=pretend.call_recorder(lambda username: user.username),
disable_password=pretend.call_recorder(
lambda userid, request, reason: None
),
)
db_request.find_service = pretend.call_recorder(lambda iface, context: service)
send_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(views, "send_password_reset_by_admin_email", send_email)
result = views.user_recover_account_complete(user, db_request)
assert user.totp_secret is None
assert len(user.webauthn) == 0
assert len(user.recovery_codes.all()) == 0
assert db_request.find_service.calls == [
pretend.call(IUserService, context=None)
]
assert send_email.calls == [pretend.call(db_request, user)]
assert service.disable_password.calls == [
pretend.call(user.id, db_request, reason=DisableReason.AdminInitiated)
]
assert db_request.route_path.calls == [
pretend.call("admin.user.detail", username=user.username)
]
assert result.status_code == 303
assert result.location == "/foobar"
def test_user_recover_account_complete_completes_active_account_recoveries(
self, db_request, monkeypatch
):
admin_user = UserFactory.create()
user = UserFactory.create(
totp_secret=b"aaaaabbbbbcccccddddd",
webauthn=[
WebAuthn(
label="fake", credential_id="fake", public_key="extremely fake"
)
],
recovery_codes=[
RecoveryCode(code="fake"),
],
)
account_recovery0 = user.record_observation(
request=db_request,
kind=ObservationKind.AccountRecovery,
actor=admin_user,
summary="Account Recovery",
payload={"completed": None},
)
account_recovery0.additional = {"status": "initiated"}
account_recovery1 = user.record_observation(
request=db_request,
kind=ObservationKind.AccountRecovery,
actor=admin_user,
summary="Account Recovery",
payload={"completed": None},
)
account_recovery1.additional = {"status": "initiated"}
assert user.totp_secret is not None
assert len(user.webauthn) == 1
assert len(user.recovery_codes.all()) == 1
db_request.matchdict["username"] = str(user.username)
db_request.params = {"username": user.username}
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/foobar")
db_request.user = user
service = pretend.stub(
find_userid=pretend.call_recorder(lambda username: user.username),
disable_password=pretend.call_recorder(
lambda userid, request, reason: None
),
)
db_request.find_service = pretend.call_recorder(lambda iface, context: service)
send_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(views, "send_password_reset_by_admin_email", send_email)
now = datetime.datetime.now(datetime.UTC)
with freezegun.freeze_time(now):
result = views.user_recover_account_complete(user, db_request)
assert user.totp_secret is None
assert len(user.webauthn) == 0
assert len(user.recovery_codes.all()) == 0
assert db_request.find_service.calls == [
pretend.call(IUserService, context=None)
]
assert account_recovery0.additional["status"] == "completed"
assert account_recovery0.payload["completed"] == str(now)
assert account_recovery1.additional["status"] == "completed"
assert account_recovery1.payload["completed"] == str(now)
assert send_email.calls == [pretend.call(db_request, user)]
assert service.disable_password.calls == [
pretend.call(user.id, db_request, reason=DisableReason.AdminInitiated)
]
assert db_request.route_path.calls == [
pretend.call("admin.user.detail", username=user.username)
]
assert result.status_code == 303
assert result.location == "/foobar"
def test_user_recover_account_complete_bad_confirm(self, db_request, monkeypatch):
user = UserFactory.create()
db_request.matchdict["username"] = str(user.username)
db_request.params = {"username": "wrong"}
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/foobar")
db_request.user = UserFactory.create()
service = pretend.stub(
find_userid=pretend.call_recorder(lambda username: user.username),
disable_password=pretend.call_recorder(lambda userid, reason: None),
)
db_request.find_service = pretend.call_recorder(lambda iface, context: service)
send_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(views, "send_password_reset_by_admin_email", send_email)
result = views.user_recover_account_complete(user, db_request)
assert db_request.find_service.calls == []
assert send_email.calls == []
assert service.disable_password.calls == []
assert db_request.route_path.calls == [
pretend.call("admin.user.detail", username=user.username)
]
assert result.status_code == 303
assert result.location == "/foobar"
def test_user_recover_account_complete_redirects_actual_name(self, db_request):
user = UserFactory.create(username="wu-tang")
db_request.matchdict["username"] = "Wu-Tang"
db_request.current_route_path = pretend.call_recorder(
lambda username: "/user/the-redirect/"
)
result = views.user_recover_account_complete(user, db_request)
assert isinstance(result, HTTPMovedPermanently)
assert result.headers["Location"] == "/user/the-redirect/"
assert db_request.current_route_path.calls == [
pretend.call(username=user.username)
]
def test_user_recover_account_complete_with_override_email_sets_as_primary(
self, db_request, monkeypatch
):
user = UserFactory.create(with_verified_primary_email=True)
existing_primary_email = user.primary_email
assert len(user.emails) == 1
# Create preconditions from `views.user_recover_account_initiate`
override_to_email = EmailFactory.create(
user=user, primary=False, verified=False
)
recovery_observation = user.record_observation(
request=db_request,
kind=ObservationKind.AccountRecovery,
actor=user,
summary="Account Recovery",
payload={
"initiated": "2021-01-01T00:00:00+00:00",
"completed": None,
"override_to_email": override_to_email.email,
},
)
recovery_observation.additional = {"status": "initiated"}
assert len(user.active_account_recoveries) == 1
assert len(user.emails) == 2
db_request.method = "POST"
db_request.matchdict["username"] = str(user.username)
db_request.params = {"username": user.username}
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/foobar")
db_request.user = user
service = pretend.stub(
find_userid=pretend.call_recorder(lambda username: user.username),
disable_password=pretend.call_recorder(
lambda userid, request, reason: None
),
)
db_request.find_service = pretend.call_recorder(lambda iface, context: service)
send_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(views, "send_password_reset_by_admin_email", send_email)
result = views.user_recover_account_complete(user, db_request)
assert isinstance(result, HTTPSeeOther)
assert result.location == "/foobar"
assert existing_primary_email.primary is False
assert existing_primary_email.verified is True
assert user.primary_email == override_to_email
assert user.primary_email.verified is True
|
TestUserRecoverAccountComplete
|
python
|
django__django
|
tests/admin_views/test_nav_sidebar.py
|
{
"start": 814,
"end": 4268
}
|
class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super",
password="secret",
email="super@example.com",
)
def setUp(self):
self.client.force_login(self.superuser)
def test_sidebar_not_on_index(self):
response = self.client.get(reverse("test_with_sidebar:index"))
self.assertContains(response, '<div class="main" id="main">')
self.assertNotContains(
response, '<nav class="sticky" id="nav-sidebar" aria-label="Sidebar">'
)
def test_sidebar_disabled(self):
response = self.client.get(reverse("test_without_sidebar:index"))
self.assertNotContains(
response, '<nav class="sticky" id="nav-sidebar" aria-label="Sidebar">'
)
def test_sidebar_unauthenticated(self):
self.client.logout()
response = self.client.get(reverse("test_with_sidebar:login"))
self.assertNotContains(
response, '<nav class="sticky" id="nav-sidebar" aria-label="Sidebar">'
)
def test_sidebar_aria_current_page(self):
url = reverse("test_with_sidebar:auth_user_changelist")
response = self.client.get(url)
self.assertContains(
response, '<nav class="sticky" id="nav-sidebar" aria-label="Sidebar">'
)
self.assertContains(
response, '<a href="%s" aria-current="page">Users</a>' % url
)
@override_settings(
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
}
]
)
def test_sidebar_aria_current_page_missing_without_request_context_processor(self):
url = reverse("test_with_sidebar:auth_user_changelist")
response = self.client.get(url)
self.assertContains(
response, '<nav class="sticky" id="nav-sidebar" aria-label="Sidebar">'
)
# Does not include aria-current attribute.
self.assertContains(response, '<a href="%s">Users</a>' % url)
@override_settings(DEBUG=True)
def test_included_app_list_template_context_fully_set(self):
# All context variables should be set when rendering the sidebar.
url = reverse("test_with_sidebar:auth_user_changelist")
with self.assertNoLogs("django.template", "DEBUG"):
self.client.get(url)
def test_sidebar_model_name_non_ascii(self):
url = reverse("test_with_sidebar:admin_views_héllo_changelist")
response = self.client.get(url)
self.assertContains(
response, '<div class="app-admin_views module current-app">'
)
self.assertContains(response, '<tr class="model-héllo current-model">')
self.assertContains(
response,
'<th scope="row" id="admin_views-héllo">'
'<a href="/test_sidebar/admin/admin_views/h%C3%A9llo/" aria-current="page">'
"Héllos</a></th>",
html=True,
)
@override_settings(ROOT_URLCONF="admin_views.test_nav_sidebar")
|
AdminSidebarTests
|
python
|
fastapi__sqlmodel
|
docs_src/tutorial/code_structure/tutorial001/models.py
|
{
"start": 88,
"end": 306
}
|
class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
headquarters: str
heroes: List["Hero"] = Relationship(back_populates="team")
|
Team
|
python
|
pandas-dev__pandas
|
pandas/tests/tslibs/test_array_to_datetime.py
|
{
"start": 10294,
"end": 10750
}
|
class ____(datetime):
pass
@pytest.mark.parametrize("klass", [SubDatetime, datetime, Timestamp])
def test_datetime_subclass(klass):
# GH 25851
# ensure that subclassed datetime works with
# array_to_datetime
arr = np.array([klass(2000, 1, 1)], dtype=object)
result, _ = tslib.array_to_datetime(arr)
expected = np.array(["2000-01-01T00:00:00.000000"], dtype="M8[us]")
tm.assert_numpy_array_equal(result, expected)
|
SubDatetime
|
python
|
numba__numba
|
numba/tests/test_compiler_flags.py
|
{
"start": 308,
"end": 602
}
|
class ____(TestCase):
def test_setting_invalid_attribute(self):
flags = Flags()
msg = "'Flags' object has no attribute 'this_really_does_not_exist'"
with self.assertRaisesRegex(AttributeError, msg):
flags.this_really_does_not_exist = True
|
TestCompilerFlags
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/elements/metric_test.py
|
{
"start": 1159,
"end": 20076
}
|
class ____(DeltaGeneratorTestCase):
"""Test ability to marshall metric protos and invalid input."""
def test_no_value(self):
st.metric("label_test", None)
c = self.get_delta_from_queue().new_element.metric
assert c.label == "label_test"
# This is an em dash. Not a regular "-"
assert c.body == "—"
assert (
c.label_visibility.value
== LabelVisibilityMessage.LabelVisibilityOptions.VISIBLE
)
def test_label_and_value(self):
"""Test that metric can be called with label and value passed in."""
st.metric("label_test", "123")
c = self.get_delta_from_queue().new_element.metric
assert c.label == "label_test"
assert c.body == "123"
assert c.color == MetricProto.MetricColor.GRAY
assert c.direction == MetricProto.MetricDirection.NONE
assert not c.show_border
@parameterized.expand(
[
("visible", LabelVisibilityMessage.LabelVisibilityOptions.VISIBLE),
("hidden", LabelVisibilityMessage.LabelVisibilityOptions.HIDDEN),
("collapsed", LabelVisibilityMessage.LabelVisibilityOptions.COLLAPSED),
]
)
def test_label_visibility(self, label_visibility_value, proto_value):
"""Test that metric can be called with label_visibility param."""
st.metric("label_test", "123", label_visibility=label_visibility_value)
c = self.get_delta_from_queue().new_element.metric
assert c.label == "label_test"
assert c.body == "123"
assert c.label_visibility.value == proto_value
def test_border(self):
"""Test that metric can be called with border param."""
st.metric("label_test", "123", border=True)
c = self.get_delta_from_queue().new_element.metric
assert c.label == "label_test"
assert c.body == "123"
assert c.show_border
def test_label_and_value_and_delta_and_delta_color(self):
"""Test that metric can be called with label, value, delta, and delta
colors passed in."""
st.metric("label_test", "123", -321, delta_color="normal")
c = self.get_delta_from_queue().new_element.metric
assert c.label == "label_test"
assert c.body == "123"
assert c.delta == "-321"
assert c.color == MetricProto.MetricColor.RED
assert c.direction == MetricProto.MetricDirection.DOWN
def test_value(self):
"""Test that metric delta returns the correct proto value"""
arg_values = ["some str", 123, -1.234, None]
proto_values = [
"some str",
"123",
"-1.234",
"—",
]
for arg_value, proto_value in zip(arg_values, proto_values, strict=False):
st.metric("label_test", arg_value)
c = self.get_delta_from_queue().new_element.metric
assert c.label == "label_test"
assert proto_value == c.body
def test_delta_values(self):
"""Test that metric delta returns the correct proto value"""
arg_values = [" -253", "+25", "26", 123, -123, 1.234, -1.5, None, ""]
delta_values = ["-253", "+25", "26", "123", "-123", "1.234", "-1.5", "", ""]
for arg_value, delta_value in zip(arg_values, delta_values, strict=False):
st.metric("label_test", "4312", arg_value)
c = self.get_delta_from_queue().new_element.metric
assert c.label == "label_test"
assert delta_value == c.delta
def test_delta_color(self):
"""Test that metric delta colors returns the correct proto value."""
arg_delta_values = ["-123", -123, -1.23, "123", 123, 1.23, None, ""]
arg_delta_color_values = [
"normal",
"inverse",
"off",
"normal",
"inverse",
"off",
"normal",
"normal",
]
color_values = [
MetricProto.MetricColor.RED,
MetricProto.MetricColor.GREEN,
MetricProto.MetricColor.GRAY,
MetricProto.MetricColor.GREEN,
MetricProto.MetricColor.RED,
MetricProto.MetricColor.GRAY,
MetricProto.MetricColor.GRAY,
MetricProto.MetricColor.GRAY,
]
direction_values = [
MetricProto.MetricDirection.DOWN,
MetricProto.MetricDirection.DOWN,
MetricProto.MetricDirection.DOWN,
MetricProto.MetricDirection.UP,
MetricProto.MetricDirection.UP,
MetricProto.MetricDirection.UP,
MetricProto.MetricDirection.NONE,
MetricProto.MetricDirection.NONE,
]
for (
arg_delta_value,
arg_delta_color_value,
color_value,
direction_value,
) in zip(
arg_delta_values,
arg_delta_color_values,
color_values,
direction_values,
strict=False,
):
st.metric(
"label_test", "4312", arg_delta_value, delta_color=arg_delta_color_value
)
c = self.get_delta_from_queue().new_element.metric
assert c.label == "label_test"
assert c.color == color_value
assert c.direction == direction_value
def test_delta_arrow_default(self):
"""Test that metric delta arrow defaults to auto."""
st.metric("label_test", "123", 123)
c = self.get_delta_from_queue().new_element.metric
assert c.direction == MetricProto.MetricDirection.UP
@parameterized.expand(
[
("auto", 5, MetricProto.MetricDirection.UP, MetricProto.MetricColor.GREEN),
("up", -5, MetricProto.MetricDirection.UP, MetricProto.MetricColor.RED),
(
"down",
5,
MetricProto.MetricDirection.DOWN,
MetricProto.MetricColor.GREEN,
),
("off", 5, MetricProto.MetricDirection.NONE, MetricProto.MetricColor.GREEN),
]
)
def test_delta_arrow_values(
self,
delta_arrow_value,
delta,
expected_direction,
expected_color,
):
"""Test that metric overrides direction according to delta arrow setting."""
st.metric(
"label_test",
"123",
delta,
delta_arrow=delta_arrow_value,
)
c = self.get_delta_from_queue().new_element.metric
assert c.direction == expected_direction
assert c.color == expected_color
def test_delta_arrow_invalid(self):
"""Test that invalid delta arrow raises an error."""
with pytest.raises(StreamlitValueError):
st.metric("label_test", "123", 5, delta_arrow="invalid") # type: ignore[arg-type]
def test_metric_in_column(self):
col1, col2, col3, col4, col5 = st.columns(5)
with col1:
st.metric("Column 1", 123, 123)
with col2:
st.metric("Column 2", 123, 123)
with col3:
st.metric("Column 3", 123, 123)
col4.metric("Column 4", -123, -123)
col5.metric("Column 5", "-123", 0)
all_deltas = self.get_all_deltas_from_queue()
# 11 elements will be created: 1 horizontal block, 5 columns, 5 widget
assert len(all_deltas) == 11
metric_proto = self.get_delta_from_queue().new_element.metric
assert metric_proto.label == "Column 5"
def test_invalid_label(self):
with pytest.raises(TypeError) as exc:
st.metric(123, "-321")
assert str(exc.value) == (
"'123' is of type <class 'int'>, which is not an accepted type. "
"label only accepts: str. Please convert the label to an accepted type."
)
def test_invalid_label_visibility(self):
with pytest.raises(StreamlitAPIException) as e:
st.metric("label_test", "123", label_visibility="wrong_value")
assert (
str(e.value)
== "Unsupported label_visibility option 'wrong_value'. Valid values are 'visible', 'hidden' or 'collapsed'."
)
def test_empty_label_warning(self):
"""Test that a warning is logged if st.metric was called with empty label."""
with self.assertLogs(_LOGGER) as logs:
st.metric(label="", value="123")
assert (
"`label` got an empty value. This is discouraged for accessibility reasons"
in logs.records[0].msg
)
# Check that the stack trace is included in the warning message:
assert logs.records[0].stack_info is not None
def test_invalid_value(self):
with pytest.raises(TypeError) as exc:
st.metric("Testing", [1, 2, 3])
assert str(exc.value) == (
"'[1, 2, 3]' is of type <class 'list'>, which is not an accepted type. "
"Please convert the value to an accepted number type."
)
def test_invalid_delta(self):
with pytest.raises(TypeError) as exc:
st.metric("Testing", "123", [123])
assert str(exc.value) == (
"'[123]' is of type <class 'list'>, which is not an accepted type. "
"Please convert the value to an accepted number type."
)
def test_invalid_delta_color(self):
with pytest.raises(StreamlitAPIException) as exc:
st.metric("Hello World.", 123, 0, delta_color="Invalid")
assert (
str(exc.value)
== "'Invalid' is not an accepted value. delta_color only accepts: 'normal', 'inverse', or 'off'"
)
def test_help(self):
st.metric("label_test", value="500", help=" help text")
c = self.get_delta_from_queue().new_element.metric
assert c.help == "help text"
def test_height_default(self):
"""Test that height defaults to content."""
st.metric("label_test", "123")
c = self.get_delta_from_queue().new_element
assert c.metric.label == "label_test"
assert c.metric.body == "123"
assert (
c.height_config.WhichOneof("height_spec")
== HeightConfigFields.USE_CONTENT.value
)
assert c.height_config.use_content
def test_height_types(self):
"""Test that metric can be called with different height types."""
test_cases = [
(500, HeightConfigFields.PIXEL_HEIGHT.value, "pixel_height", 500),
("stretch", HeightConfigFields.USE_STRETCH.value, "use_stretch", True),
("content", HeightConfigFields.USE_CONTENT.value, "use_content", True),
]
for height_value, expected_height_spec, field_name, field_value in test_cases:
with self.subTest(height_value=height_value):
st.metric("label_test", "123", height=height_value)
c = self.get_delta_from_queue().new_element
assert c.metric.label == "label_test"
assert c.metric.body == "123"
assert c.height_config.WhichOneof("height_spec") == expected_height_spec
assert getattr(c.height_config, field_name) == field_value
def test_invalid_height(self):
"""Test that metric raises an error with invalid height."""
test_cases = [
(
"invalid",
"Invalid height value: 'invalid'. Height must be either an integer (pixels), 'stretch', or 'content'.",
),
(
-100,
"Invalid height value: -100. Height must be either an integer (pixels), 'stretch', or 'content'.",
),
(
0,
"Invalid height value: 0. Height must be either an integer (pixels), 'stretch', or 'content'.",
),
(
100.5,
"Invalid height value: 100.5. Height must be either an integer (pixels), 'stretch', or 'content'.",
),
]
for height_value, expected_error_message in test_cases:
with self.subTest(height_value=height_value):
with pytest.raises(StreamlitAPIException) as exc:
st.metric("label_test", "123", height=height_value)
assert str(exc.value) == expected_error_message
def test_width_types(self):
"""Test that metric can be called with different width types."""
test_cases = [
(500, WidthConfigFields.PIXEL_WIDTH.value, "pixel_width", 500),
("stretch", WidthConfigFields.USE_STRETCH.value, "use_stretch", True),
("content", WidthConfigFields.USE_CONTENT.value, "use_content", True),
(None, WidthConfigFields.USE_STRETCH.value, "use_stretch", True),
]
for width_value, expected_width_spec, field_name, field_value in test_cases:
with self.subTest(width_value=width_value):
if width_value is None:
st.metric("label_test", "123")
else:
st.metric("label_test", "123", width=width_value)
c = self.get_delta_from_queue().new_element
assert c.metric.label == "label_test"
assert c.metric.body == "123"
assert c.width_config.WhichOneof("width_spec") == expected_width_spec
assert getattr(c.width_config, field_name) == field_value
def test_invalid_width(self):
"""Test that metric raises an error with invalid width."""
test_cases = [
(
"invalid",
"Invalid width value: 'invalid'. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
(
-100,
"Invalid width value: -100. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
(
0,
"Invalid width value: 0. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
(
100.5,
"Invalid width value: 100.5. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
]
for width_value, expected_error_message in test_cases:
with self.subTest(width_value=width_value):
with pytest.raises(StreamlitAPIException) as exc:
st.metric("label_test", "123", width=width_value)
assert str(exc.value) == expected_error_message
def test_chart_data_none(self):
"""Test that metric works with default chart_data=None."""
st.metric("label_test", "123")
c = self.get_delta_from_queue().new_element.metric
assert c.label == "label_test"
assert c.body == "123"
assert len(c.chart_data) == 0
def test_chart_data_valid_list(self):
"""Test that metric can be called with valid chart_data list."""
chart_data = [1, 2, 3, 4, 5]
st.metric("label_test", "123", chart_data=chart_data)
c = self.get_delta_from_queue().new_element.metric
assert c.label == "label_test"
assert c.body == "123"
assert list(c.chart_data) == [1.0, 2.0, 3.0, 4.0, 5.0]
def test_chart_data_valid_mixed_numeric(self):
"""Test that metric can be called with mixed numeric types in chart_data."""
chart_data = [1, 2.5, -3, 0, 10.7]
st.metric("label_test", "123", chart_data=chart_data)
c = self.get_delta_from_queue().new_element.metric
assert c.label == "label_test"
assert c.body == "123"
assert list(c.chart_data) == [1.0, 2.5, -3.0, 0.0, 10.7]
def test_chart_data_string_numbers(self):
"""Test that metric can convert string numbers in chart_data."""
chart_data = ["1", "2.5", "-3", "0"]
st.metric("label_test", "123", chart_data=chart_data)
c = self.get_delta_from_queue().new_element.metric
assert c.label == "label_test"
assert c.body == "123"
assert list(c.chart_data) == [1.0, 2.5, -3.0, 0.0]
def test_chart_data_empty_list(self):
"""Test that metric works with empty chart_data list."""
st.metric("label_test", "123", chart_data=[])
c = self.get_delta_from_queue().new_element.metric
assert c.label == "label_test"
assert c.body == "123"
assert len(c.chart_data) == 0
def test_chart_data_invalid_values(self):
"""Test that metric raises error with invalid chart_data values."""
chart_data = [1, 2, "invalid", 4]
with pytest.raises(StreamlitAPIException) as exc:
st.metric("label_test", "123", chart_data=chart_data)
assert "Only numeric values are supported for chart data sequence" in str(
exc.value
)
assert "'invalid' is of type <class 'str'>" in str(exc.value)
assert "cannot be converted to float" in str(exc.value)
def test_chart_data_invalid_non_sequence(self):
"""Test that metric raises error with invalid chart_data non-sequence values."""
chart_data = [1, 2, {"invalid": "dict"}, 4]
with pytest.raises(StreamlitAPIException) as exc:
st.metric("label_test", "123", chart_data=chart_data)
assert "Only numeric values are supported for chart data sequence" in str(
exc.value
)
@parameterized.expand(
[
("line", MetricProto.ChartType.LINE),
("bar", MetricProto.ChartType.BAR),
("area", MetricProto.ChartType.AREA),
]
)
def test_chart_type_valid_values(self, chart_type_value, expected_proto_value):
"""Test that metric can be called with valid chart_type values."""
st.metric("label_test", "123", chart_type=chart_type_value)
c = self.get_delta_from_queue().new_element.metric
assert c.label == "label_test"
assert c.body == "123"
assert c.chart_type == expected_proto_value
def test_chart_type_default(self):
"""Test that chart_type defaults to line."""
st.metric("label_test", "123")
c = self.get_delta_from_queue().new_element.metric
assert c.label == "label_test"
assert c.body == "123"
assert c.chart_type == MetricProto.ChartType.LINE
def test_chart_data_and_chart_type_together(self):
"""Test that metric can be called with both chart_data and chart_type."""
chart_data = [10, 20, 15, 25, 30]
st.metric("label_test", "123", chart_data=chart_data, chart_type="bar")
c = self.get_delta_from_queue().new_element.metric
assert c.label == "label_test"
assert c.body == "123"
assert list(c.chart_data) == [10.0, 20.0, 15.0, 25.0, 30.0]
assert c.chart_type == MetricProto.ChartType.BAR
|
MetricTest
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py
|
{
"start": 157312,
"end": 165831
}
|
class ____(GeneratedAirbyteSource):
class Disable:
@public
def __init__(
self,
):
self.mode = "disable"
class Allow:
@public
def __init__(
self,
):
self.mode = "allow"
class Prefer:
@public
def __init__(
self,
):
self.mode = "prefer"
class Require:
@public
def __init__(
self,
):
self.mode = "require"
class VerifyCa:
@public
def __init__(
self,
ca_certificate: str,
client_certificate: Optional[str] = None,
client_key: Optional[str] = None,
client_key_password: Optional[str] = None,
):
self.mode = "verify-ca"
self.ca_certificate = check.str_param(ca_certificate, "ca_certificate")
self.client_certificate = check.opt_str_param(client_certificate, "client_certificate")
self.client_key = check.opt_str_param(client_key, "client_key")
self.client_key_password = check.opt_str_param(
client_key_password, "client_key_password"
)
class VerifyFull:
@public
def __init__(
self,
ca_certificate: str,
client_certificate: Optional[str] = None,
client_key: Optional[str] = None,
client_key_password: Optional[str] = None,
):
self.mode = "verify-full"
self.ca_certificate = check.str_param(ca_certificate, "ca_certificate")
self.client_certificate = check.opt_str_param(client_certificate, "client_certificate")
self.client_key = check.opt_str_param(client_key, "client_key")
self.client_key_password = check.opt_str_param(
client_key_password, "client_key_password"
)
class Standard:
@public
def __init__(
self,
):
self.method = "Standard"
class LogicalReplicationCDC:
@public
def __init__(
self,
replication_slot: str,
publication: str,
plugin: Optional[str] = None,
initial_waiting_seconds: Optional[int] = None,
):
self.method = "CDC"
self.plugin = check.opt_str_param(plugin, "plugin")
self.replication_slot = check.str_param(replication_slot, "replication_slot")
self.publication = check.str_param(publication, "publication")
self.initial_waiting_seconds = check.opt_int_param(
initial_waiting_seconds, "initial_waiting_seconds"
)
class NoTunnel:
@public
def __init__(
self,
):
self.tunnel_method = "NO_TUNNEL"
class SSHKeyAuthentication:
@public
def __init__(self, tunnel_host: str, tunnel_port: int, tunnel_user: str, ssh_key: str):
self.tunnel_method = "SSH_KEY_AUTH"
self.tunnel_host = check.str_param(tunnel_host, "tunnel_host")
self.tunnel_port = check.int_param(tunnel_port, "tunnel_port")
self.tunnel_user = check.str_param(tunnel_user, "tunnel_user")
self.ssh_key = check.str_param(ssh_key, "ssh_key")
class PasswordAuthentication:
@public
def __init__(
self, tunnel_host: str, tunnel_port: int, tunnel_user: str, tunnel_user_password: str
):
self.tunnel_method = "SSH_PASSWORD_AUTH"
self.tunnel_host = check.str_param(tunnel_host, "tunnel_host")
self.tunnel_port = check.int_param(tunnel_port, "tunnel_port")
self.tunnel_user = check.str_param(tunnel_user, "tunnel_user")
self.tunnel_user_password = check.str_param(
tunnel_user_password, "tunnel_user_password"
)
@public
def __init__(
self,
name: str,
host: str,
port: int,
database: str,
username: str,
ssl_mode: Union[
"PostgresSource.Disable",
"PostgresSource.Allow",
"PostgresSource.Prefer",
"PostgresSource.Require",
"PostgresSource.VerifyCa",
"PostgresSource.VerifyFull",
],
replication_method: Union[
"PostgresSource.Standard", "PostgresSource.LogicalReplicationCDC"
],
tunnel_method: Union[
"PostgresSource.NoTunnel",
"PostgresSource.SSHKeyAuthentication",
"PostgresSource.PasswordAuthentication",
],
schemas: Optional[list[str]] = None,
password: Optional[str] = None,
jdbc_url_params: Optional[str] = None,
ssl: Optional[bool] = None,
):
"""Airbyte Source for Postgres.
Documentation can be found at https://docs.airbyte.com/integrations/sources/postgres
Args:
name (str): The name of the destination.
host (str): Hostname of the database.
port (int): Port of the database.
database (str): Name of the database.
schemas (Optional[List[str]]): The list of schemas (case sensitive) to sync from. Defaults to public.
username (str): Username to access the database.
password (Optional[str]): Password associated with the username.
jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (Eg. key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters.
ssl (Optional[bool]): Encrypt data using SSL. When activating SSL, please select one of the connection modes.
ssl_mode (Union[PostgresSource.Disable, PostgresSource.Allow, PostgresSource.Prefer, PostgresSource.Require, PostgresSource.VerifyCa, PostgresSource.VerifyFull]): SSL connection modes. disable - Disables encryption of communication between Airbyte and source database allow - Enables encryption only when required by the source database prefer - allows unencrypted connection only if the source database does not support encryption require - Always require encryption. If the source database server does not support encryption, connection will fail verify-ca - Always require encryption and verifies that the source database server has a valid SSL certificate verify-full - This is the most secure mode. Always require encryption and verifies the identity of the source database server Read more in the docs.
replication_method (Union[PostgresSource.Standard, PostgresSource.LogicalReplicationCDC]): Replication method for extracting data from the database.
tunnel_method (Union[PostgresSource.NoTunnel, PostgresSource.SSHKeyAuthentication, PostgresSource.PasswordAuthentication]): Whether to initiate an SSH tunnel before connecting to the database, and if so, which kind of authentication to use.
"""
self.host = check.str_param(host, "host")
self.port = check.int_param(port, "port")
self.database = check.str_param(database, "database")
self.schemas = check.opt_nullable_list_param(schemas, "schemas", str)
self.username = check.str_param(username, "username")
self.password = check.opt_str_param(password, "password")
self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")
self.ssl = check.opt_bool_param(ssl, "ssl")
self.ssl_mode = check.inst_param(
ssl_mode,
"ssl_mode",
(
PostgresSource.Disable,
PostgresSource.Allow,
PostgresSource.Prefer,
PostgresSource.Require,
PostgresSource.VerifyCa,
PostgresSource.VerifyFull,
),
)
self.replication_method = check.inst_param(
replication_method,
"replication_method",
(PostgresSource.Standard, PostgresSource.LogicalReplicationCDC),
)
self.tunnel_method = check.inst_param(
tunnel_method,
"tunnel_method",
(
PostgresSource.NoTunnel,
PostgresSource.SSHKeyAuthentication,
PostgresSource.PasswordAuthentication,
),
)
super().__init__("Postgres", name)
|
PostgresSource
|
python
|
ipython__ipython
|
IPython/core/magic.py
|
{
"start": 1996,
"end": 10898
}
|
class ____:
pass
def on_off(tag):
"""Return an ON/OFF string for a 1/0 input. Simple utility function."""
return ["OFF", "ON"][tag]
def compress_dhist(dh):
"""Compress a directory history into a new one with at most 20 entries.
Return a new list made from the first and last 10 elements of dhist after
removal of duplicates.
"""
head, tail = dh[:-10], dh[-10:]
newhead = []
done = set()
for h in head:
if h in done:
continue
newhead.append(h)
done.add(h)
return newhead + tail
def needs_local_scope(func):
"""Decorator to mark magic functions which need to local scope to run."""
func.needs_local_scope = True
return func
# -----------------------------------------------------------------------------
# Class and method decorators for registering magics
# -----------------------------------------------------------------------------
def magics_class(cls):
"""Class decorator for all subclasses of the main Magics class.
Any class that subclasses Magics *must* also apply this decorator, to
ensure that all the methods that have been decorated as line/cell magics
get correctly registered in the class instance. This is necessary because
when method decorators run, the class does not exist yet, so they
temporarily store their information into a module global. Application of
this class decorator copies that global data to the class instance and
clears the global.
Obviously, this mechanism is not thread-safe, which means that the
*creation* of subclasses of Magic should only be done in a single-thread
context. Instantiation of the classes has no restrictions. Given that
these classes are typically created at IPython startup time and before user
application code becomes active, in practice this should not pose any
problems.
"""
cls.registered = True
cls.magics = dict(line=magics["line"], cell=magics["cell"])
magics["line"] = {}
magics["cell"] = {}
return cls
def record_magic(dct, magic_kind, magic_name, func):
"""Utility function to store a function as a magic of a specific kind.
Parameters
----------
dct : dict
A dictionary with 'line' and 'cell' subdicts.
magic_kind : str
Kind of magic to be stored.
magic_name : str
Key to store the magic as.
func : function
Callable object to store.
"""
if magic_kind == "line_cell":
dct["line"][magic_name] = dct["cell"][magic_name] = func
else:
dct[magic_kind][magic_name] = func
def validate_type(magic_kind):
"""Ensure that the given magic_kind is valid.
Check that the given magic_kind is one of the accepted spec types (stored
in the global `magic_spec`), raise ValueError otherwise.
"""
if magic_kind not in magic_spec:
raise ValueError(
"magic_kind must be one of %s, %s given" % magic_kinds, magic_kind
)
# The docstrings for the decorator below will be fairly similar for the two
# types (method and function), so we generate them here once and reuse the
# templates below.
_docstring_template = """Decorate the given {0} as {1} magic.
The decorator can be used with or without arguments, as follows.
i) without arguments: it will create a {1} magic named as the {0} being
decorated::
@deco
def foo(...)
will create a {1} magic named `foo`.
ii) with one string argument: which will be used as the actual name of the
resulting magic::
@deco('bar')
def foo(...)
will create a {1} magic named `bar`.
To register a class magic use ``Interactiveshell.register_magic(class or instance)``.
"""
# These two are decorator factories. While they are conceptually very similar,
# there are enough differences in the details that it's simpler to have them
# written as completely standalone functions rather than trying to share code
# and make a single one with convoluted logic.
def _method_magic_marker(magic_kind):
"""Decorator factory for methods in Magics subclasses."""
validate_type(magic_kind)
# This is a closure to capture the magic_kind. We could also use a class,
# but it's overkill for just that one bit of state.
def magic_deco(arg):
if callable(arg):
# "Naked" decorator call (just @foo, no args)
func = arg
name = func.__name__
retval = arg
record_magic(magics, magic_kind, name, name)
elif isinstance(arg, str):
# Decorator called with arguments (@foo('bar'))
name = arg
def mark(func, *a, **kw):
record_magic(magics, magic_kind, name, func.__name__)
return func
retval = mark
else:
raise TypeError("Decorator can only be called with string or function")
return retval
# Ensure the resulting decorator has a usable docstring
magic_deco.__doc__ = _docstring_template.format("method", magic_kind)
return magic_deco
def _function_magic_marker(magic_kind):
"""Decorator factory for standalone functions."""
validate_type(magic_kind)
# This is a closure to capture the magic_kind. We could also use a class,
# but it's overkill for just that one bit of state.
def magic_deco(arg):
# Find get_ipython() in the caller's namespace
caller = sys._getframe(1)
for ns in ["f_locals", "f_globals", "f_builtins"]:
get_ipython = getattr(caller, ns).get("get_ipython")
if get_ipython is not None:
break
else:
raise NameError(
"Decorator can only run in context where `get_ipython` exists"
)
ip = get_ipython()
if callable(arg):
# "Naked" decorator call (just @foo, no args)
func = arg
name = func.__name__
ip.register_magic_function(func, magic_kind, name)
retval = arg
elif isinstance(arg, str):
# Decorator called with arguments (@foo('bar'))
name = arg
def mark(func, *a, **kw):
ip.register_magic_function(func, magic_kind, name)
return func
retval = mark
else:
raise TypeError("Decorator can only be called with string or function")
return retval
# Ensure the resulting decorator has a usable docstring
ds = _docstring_template.format("function", magic_kind)
ds += dedent(
"""
Note: this decorator can only be used in a context where IPython is already
active, so that the `get_ipython()` call succeeds. You can therefore use
it in your startup files loaded after IPython initializes, but *not* in the
IPython configuration file itself, which is executed before IPython is
fully up and running. Any file located in the `startup` subdirectory of
your configuration profile will be OK in this sense.
"""
)
magic_deco.__doc__ = ds
return magic_deco
MAGIC_NO_VAR_EXPAND_ATTR = "_ipython_magic_no_var_expand"
MAGIC_OUTPUT_CAN_BE_SILENCED = "_ipython_magic_output_can_be_silenced"
def no_var_expand(magic_func):
"""Mark a magic function as not needing variable expansion
By default, IPython interprets `{a}` or `$a` in the line passed to magics
as variables that should be interpolated from the interactive namespace
before passing the line to the magic function.
This is not always desirable, e.g. when the magic executes Python code
(%timeit, %time, etc.).
Decorate magics with `@no_var_expand` to opt-out of variable expansion.
.. versionadded:: 7.3
"""
setattr(magic_func, MAGIC_NO_VAR_EXPAND_ATTR, True)
return magic_func
def output_can_be_silenced(magic_func):
"""Mark a magic function so its output may be silenced.
The output is silenced if the Python code used as a parameter of
the magic ends in a semicolon, not counting a Python comment that can
follow it.
"""
setattr(magic_func, MAGIC_OUTPUT_CAN_BE_SILENCED, True)
return magic_func
# Create the actual decorators for public use
# These three are used to decorate methods in class definitions
line_magic = _method_magic_marker("line")
cell_magic = _method_magic_marker("cell")
line_cell_magic = _method_magic_marker("line_cell")
# These three decorate standalone functions and perform the decoration
# immediately. They can only run where get_ipython() works
register_line_magic = _function_magic_marker("line")
register_cell_magic = _function_magic_marker("cell")
register_line_cell_magic = _function_magic_marker("line_cell")
# -----------------------------------------------------------------------------
# Core Magic classes
# -----------------------------------------------------------------------------
|
Bunch
|
python
|
PyCQA__pyflakes
|
pyflakes/messages.py
|
{
"start": 9242,
"end": 9487
}
|
class ____(Message):
message = "'...' %% ... has unsupported format character %r"
def __init__(self, filename, loc, c):
Message.__init__(self, filename, loc)
self.message_args = (c,)
|
PercentFormatUnsupportedFormatCharacter
|
python
|
realpython__materials
|
python-unittest/test_age.py
|
{
"start": 53,
"end": 1605
}
|
class ____(unittest.TestCase):
def test_child(self):
"""Test for 'Child'"""
self.assertEqual(categorize_by_age(5), "Child")
def test_adolescent(self):
"""Test for 'Adolescent'"""
self.assertEqual(categorize_by_age(15), "Adolescent")
def test_adult(self):
"""Test for 'Adult'"""
self.assertEqual(categorize_by_age(30), "Adult")
def test_golden_age(self):
"""Test for 'Golden age'"""
self.assertEqual(categorize_by_age(70), "Golden age")
def test_negative_age(self):
"""Test for negative age"""
self.assertEqual(categorize_by_age(-1), "Invalid age: -1")
def test_too_old(self):
"""Test for too old"""
self.assertEqual(categorize_by_age(151), "Invalid age: 151")
def test_boundary_child_adolescent(self):
"""Test for boundary between 'Child' and 'Adolescent'"""
self.assertEqual(categorize_by_age(9), "Child")
self.assertEqual(categorize_by_age(10), "Adolescent")
def test_boundary_adolescent_adult(self):
"""Test for boundary between 'Adolescent' and 'Adult'"""
self.assertEqual(categorize_by_age(18), "Adolescent")
self.assertEqual(categorize_by_age(19), "Adult")
def test_boundary_adult_golden_age(self):
"""Test for boundary between 'Adult' and 'Golden age'"""
self.assertEqual(categorize_by_age(65), "Adult")
self.assertEqual(categorize_by_age(66), "Golden age")
if __name__ == "__main__":
unittest.main(verbosity=2)
|
TestCategorizeByAge
|
python
|
google__python-fire
|
fire/console/console_attr.py
|
{
"start": 3238,
"end": 3623
}
|
class ____(BoxLineCharacters):
"""unicode Box/line drawing characters (cp437 compatible unicode)."""
dl = '┐'
dr = '┌'
h = '─'
hd = '┬'
hu = '┴'
ul = '┘'
ur = '└'
v = '│'
vh = '┼'
vl = '┤'
vr = '├'
d_dl = '╗'
d_dr = '╔'
d_h = '═'
d_hd = '╦'
d_hu = '╩'
d_ul = '╝'
d_ur = '╚'
d_v = '║'
d_vh = '╬'
d_vl = '╣'
d_vr = '╠'
|
BoxLineCharactersUnicode
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/linalg/linear_operator_addition_test.py
|
{
"start": 1442,
"end": 6853
}
|
class ____(test.TestCase):
"""Tests correctness of addition with combinations of a few Adders.
Tests here are done with the _DEFAULT_ADDITION_TIERS, which means
add_operators should reduce all operators resulting in one single operator.
This shows that we are able to correctly combine adders using the tiered
system. All Adders should be tested separately, and there is no need to test
every Adder within this class.
"""
def test_one_operator_is_returned_unchanged(self):
op_a = linalg.LinearOperatorDiag([1., 1.])
op_sum = add_operators([op_a])
self.assertEqual(1, len(op_sum))
self.assertIs(op_sum[0], op_a)
def test_at_least_one_operators_required(self):
with self.assertRaisesRegex(ValueError, "must contain at least one"):
add_operators([])
def test_attempting_to_add_numbers_raises(self):
with self.assertRaisesRegex(TypeError, "contain only LinearOperator"):
add_operators([1, 2])
@test_util.run_deprecated_v1
def test_two_diag_operators(self):
op_a = linalg.LinearOperatorDiag(
[1., 1.], is_positive_definite=True, name="A")
op_b = linalg.LinearOperatorDiag(
[2., 2.], is_positive_definite=True, name="B")
with self.cached_session():
op_sum = add_operators([op_a, op_b])
self.assertEqual(1, len(op_sum))
op = op_sum[0]
self.assertIsInstance(op, linalg_lib.LinearOperatorDiag)
self.assertAllClose([[3., 0.], [0., 3.]], op.to_dense())
# Adding positive definite operators produces positive def.
self.assertTrue(op.is_positive_definite)
# Real diagonal ==> self-adjoint.
self.assertTrue(op.is_self_adjoint)
# Positive definite ==> non-singular
self.assertTrue(op.is_non_singular)
# Enforce particular name for this simple case
self.assertEqual("Add/B__A/", op.name)
@test_util.run_deprecated_v1
def test_three_diag_operators(self):
op1 = linalg.LinearOperatorDiag(
[1., 1.], is_positive_definite=True, name="op1")
op2 = linalg.LinearOperatorDiag(
[2., 2.], is_positive_definite=True, name="op2")
op3 = linalg.LinearOperatorDiag(
[3., 3.], is_positive_definite=True, name="op3")
with self.cached_session():
op_sum = add_operators([op1, op2, op3])
self.assertEqual(1, len(op_sum))
op = op_sum[0]
self.assertTrue(isinstance(op, linalg_lib.LinearOperatorDiag))
self.assertAllClose([[6., 0.], [0., 6.]], op.to_dense())
# Adding positive definite operators produces positive def.
self.assertTrue(op.is_positive_definite)
# Real diagonal ==> self-adjoint.
self.assertTrue(op.is_self_adjoint)
# Positive definite ==> non-singular
self.assertTrue(op.is_non_singular)
@test_util.run_deprecated_v1
def test_diag_tril_diag(self):
op1 = linalg.LinearOperatorDiag(
[1., 1.], is_non_singular=True, name="diag_a")
op2 = linalg.LinearOperatorLowerTriangular(
[[2., 0.], [0., 2.]],
is_self_adjoint=True,
is_non_singular=True,
name="tril")
op3 = linalg.LinearOperatorDiag(
[3., 3.], is_non_singular=True, name="diag_b")
with self.cached_session():
op_sum = add_operators([op1, op2, op3])
self.assertEqual(1, len(op_sum))
op = op_sum[0]
self.assertIsInstance(op, linalg_lib.LinearOperatorLowerTriangular)
self.assertAllClose([[6., 0.], [0., 6.]], op.to_dense())
# The diag operators will be self-adjoint (because real and diagonal).
# The TriL operator has the self-adjoint hint set.
self.assertTrue(op.is_self_adjoint)
# Even though op1/2/3 are non-singular, this does not imply op is.
# Since no custom hint was provided, we default to None (unknown).
self.assertEqual(None, op.is_non_singular)
@test_util.run_deprecated_v1
def test_matrix_diag_tril_diag_uses_custom_name(self):
op0 = linalg.LinearOperatorFullMatrix(
[[-1., -1.], [-1., -1.]], name="matrix")
op1 = linalg.LinearOperatorDiag([1., 1.], name="diag_a")
op2 = linalg.LinearOperatorLowerTriangular(
[[2., 0.], [1.5, 2.]], name="tril")
op3 = linalg.LinearOperatorDiag([3., 3.], name="diag_b")
with self.cached_session():
op_sum = add_operators([op0, op1, op2, op3], operator_name="my_operator")
self.assertEqual(1, len(op_sum))
op = op_sum[0]
self.assertIsInstance(op, linalg_lib.LinearOperatorFullMatrix)
self.assertAllClose([[5., -1.], [0.5, 5.]], op.to_dense())
self.assertEqual("my_operator", op.name)
def test_incompatible_domain_dimensions_raises(self):
op1 = linalg.LinearOperatorFullMatrix(rng.rand(2, 3))
op2 = linalg.LinearOperatorDiag(rng.rand(2, 4))
with self.assertRaisesRegex(ValueError, "must.*same `domain_dimension`"):
add_operators([op1, op2])
def test_incompatible_range_dimensions_raises(self):
op1 = linalg.LinearOperatorFullMatrix(rng.rand(2, 3))
op2 = linalg.LinearOperatorDiag(rng.rand(3, 3))
with self.assertRaisesRegex(ValueError, "must.*same `range_dimension`"):
add_operators([op1, op2])
def test_non_broadcastable_batch_shape_raises(self):
op1 = linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3))
op2 = linalg.LinearOperatorDiag(rng.rand(4, 3, 3))
with self.assertRaisesRegex(ValueError, "Incompatible shapes"):
add_operators([op1, op2])
|
LinearOperatorAdditionCorrectnessTest
|
python
|
numba__numba
|
numba/cuda/cudadrv/driver.py
|
{
"start": 81144,
"end": 81662
}
|
class ____(Module):
def get_function(self, name):
handle = driver.cuModuleGetFunction(self.handle, name.encode('utf8'))
return CudaPythonFunction(weakref.proxy(self), handle, name)
def get_global_symbol(self, name):
ptr, size = driver.cuModuleGetGlobal(self.handle, name.encode('utf8'))
return MemoryPointer(self.context, ptr, size), size
FuncAttr = namedtuple("FuncAttr", ["regs", "shared", "local", "const",
"maxthreads"])
|
CudaPythonModule
|
python
|
walkccc__LeetCode
|
solutions/841. Keys and Rooms/841.py
|
{
"start": 0,
"end": 281
}
|
class ____:
def canVisitAllRooms(self, rooms: list[list[int]]) -> bool:
seen = [False] * len(rooms)
def dfs(node: int) -> None:
seen[node] = True
for child in rooms[node]:
if not seen[child]:
dfs(child)
dfs(0)
return all(seen)
|
Solution
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-orb/components.py
|
{
"start": 2394,
"end": 5235
}
|
class ____(StreamSlicer):
plans_stream: Stream
subscriptions_stream: Stream
config: Config
def stream_slices(self) -> Iterable[StreamSlice]:
"""
This stream is sliced per `subscription_id` and day, as well as `billable_metric_id`
if a grouping key is provided. This is because the API only supports a
single billable_metric_id per API call when using a group_by param.
"""
slice_yielded = False
subscriptions_stream = self.subscriptions_stream
plans_stream = self.plans_stream
# if using a group_by key, populate prices_by_plan_id so that each
# billable metric will get its own slice
if self.config.get("subscription_usage_grouping_key"):
metric_ids_by_plan_id = {}
for plan in plans_stream.read_records(sync_mode=SyncMode.full_refresh):
# if a plan_id filter is specified, skip any plan that doesn't match
if self.config.get("plan_id") and plan["id"] != self.config.get("plan_id"):
continue
prices = plan.get("prices", [])
metric_ids_by_plan_id[plan["id"]] = [(price.get("billable_metric") or {}).get("id") for price in prices]
for subscription in subscriptions_stream.read_records(sync_mode=SyncMode.full_refresh):
subscription_id = subscription["id"]
subscription_plan_id = subscription["plan_id"]
# if filtering subscription usage by plan ID, skip any subscription that doesn't match the plan_id
if self.config.get("plan_id") and subscription_plan_id != self.config.get("plan_id"):
continue
slice = {
"subscription_id": subscription_id,
}
# if using a group_by key, yield one slice per billable_metric_id.
# otherwise, yield slices without a billable_metric_id because
# each API call will return usage broken down by billable metric
# when grouping isn't used.
if self.config.get("subscription_usage_grouping_key"):
metric_ids = metric_ids_by_plan_id.get(subscription_plan_id)
if metric_ids is not None:
for metric_id in metric_ids:
# self.logger.warning("stream_slices is about to yield the following slice: %s", slice)
yield {**slice, "billable_metric_id": metric_id}
slice_yielded = True
else:
# self.logger.warning("stream_slices is about to yield the following slice: %s", slice)
yield slice
slice_yielded = True
if not slice_yielded:
# yield an empty slice to checkpoint state later
yield {}
|
SubscriptionUsagePartitionRouter
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/operators/test_athena.py
|
{
"start": 2561,
"end": 17537
}
|
class ____:
@pytest.fixture(autouse=True)
def _setup_test_cases(self):
args = {
"owner": "airflow",
"start_date": DEFAULT_DATE,
}
self.dag = DAG(TEST_DAG_ID, default_args=args, schedule="@once")
self.default_op_kwargs = dict(
task_id="test_athena_operator",
query="SELECT * FROM TEST_TABLE",
database="TEST_DATABASE",
client_request_token="eac427d0-1c6d-4dfb-96aa-2835d3ac6595",
sleep_time=0,
max_polling_attempts=3,
)
self.athena = AthenaOperator(
**self.default_op_kwargs, output_location="s3://test_s3_bucket/", aws_conn_id=None, dag=self.dag
)
with mock.patch("airflow.providers.amazon.aws.links.athena.AthenaQueryResultsLink.persist") as m:
self.mocked_athena_result_link = m
yield
def test_base_aws_op_attributes(self):
op = AthenaOperator(**self.default_op_kwargs)
assert op.hook.aws_conn_id == "aws_default"
assert op.hook._region_name is None
assert op.hook._verify is None
assert op.hook._config is None
assert op.hook.log_query is True
op = AthenaOperator(
**self.default_op_kwargs,
aws_conn_id="aws-test-custom-conn",
region_name="eu-west-1",
verify=False,
botocore_config={"read_timeout": 42},
log_query=False,
)
assert op.hook.aws_conn_id == "aws-test-custom-conn"
assert op.hook._region_name == "eu-west-1"
assert op.hook._verify is False
assert op.hook._config is not None
assert op.hook._config.read_timeout == 42
assert op.hook.log_query is False
def test_init(self):
assert self.athena.task_id == MOCK_DATA["task_id"]
assert self.athena.query == MOCK_DATA["query"]
assert self.athena.database == MOCK_DATA["database"]
assert self.athena.catalog == MOCK_DATA["catalog"]
assert self.athena.client_request_token == MOCK_DATA["client_request_token"]
assert self.athena.sleep_time == 0
@mock.patch.object(AthenaHook, "check_query_status", side_effect=("SUCCEEDED",))
@mock.patch.object(AthenaHook, "run_query", return_value=ATHENA_QUERY_ID)
@mock.patch.object(AthenaHook, "get_conn")
def test_hook_run_override_catalog(self, mock_conn, mock_run_query, mock_check_query_status):
query_context_catalog = {"Database": MOCK_DATA["database"], "Catalog": "MyCatalog"}
self.athena.catalog = "MyCatalog"
self.athena.execute({})
mock_run_query.assert_called_once_with(
MOCK_DATA["query"],
query_context_catalog,
result_configuration,
MOCK_DATA["client_request_token"],
MOCK_DATA["workgroup"],
)
assert mock_check_query_status.call_count == 1
@mock.patch.object(AthenaHook, "check_query_status", side_effect=("SUCCEEDED",))
@mock.patch.object(AthenaHook, "run_query", return_value=ATHENA_QUERY_ID)
@mock.patch.object(AthenaHook, "get_conn")
def test_hook_run_small_success_query(self, mock_conn, mock_run_query, mock_check_query_status):
self.athena.execute({})
mock_run_query.assert_called_once_with(
MOCK_DATA["query"],
query_context,
result_configuration,
MOCK_DATA["client_request_token"],
MOCK_DATA["workgroup"],
)
assert mock_check_query_status.call_count == 1
# Validate call persist Athena Query result link
self.mocked_athena_result_link.assert_called_once_with(
aws_partition=mock.ANY,
context=mock.ANY,
operator=mock.ANY,
region_name=mock.ANY,
query_execution_id=ATHENA_QUERY_ID,
)
@mock.patch.object(
AthenaHook,
"check_query_status",
side_effect="SUCCEEDED",
)
@mock.patch.object(AthenaHook, "run_query", return_value=ATHENA_QUERY_ID)
@mock.patch.object(AthenaHook, "get_conn")
def test_hook_run_big_success_query(self, mock_conn, mock_run_query, mock_check_query_status):
self.athena.execute({})
mock_run_query.assert_called_once_with(
MOCK_DATA["query"],
query_context,
result_configuration,
MOCK_DATA["client_request_token"],
MOCK_DATA["workgroup"],
)
@mock.patch.object(AthenaHook, "get_state_change_reason")
@mock.patch.object(AthenaHook, "check_query_status", return_value="FAILED")
@mock.patch.object(AthenaHook, "run_query", return_value=ATHENA_QUERY_ID)
@mock.patch.object(AthenaHook, "get_conn")
def test_hook_run_failure_query(
self,
mock_conn,
mock_run_query,
mock_check_query_status,
mock_get_state_change_reason,
):
with pytest.raises(AirflowException):
self.athena.execute({})
mock_run_query.assert_called_once_with(
MOCK_DATA["query"],
query_context,
result_configuration,
MOCK_DATA["client_request_token"],
MOCK_DATA["workgroup"],
)
assert mock_get_state_change_reason.call_count == 1
@mock.patch.object(AthenaHook, "check_query_status", return_value="CANCELLED")
@mock.patch.object(AthenaHook, "run_query", return_value=ATHENA_QUERY_ID)
@mock.patch.object(AthenaHook, "get_conn")
def test_hook_run_cancelled_query(self, mock_conn, mock_run_query, mock_check_query_status):
with pytest.raises(AirflowException):
self.athena.execute({})
mock_run_query.assert_called_once_with(
MOCK_DATA["query"],
query_context,
result_configuration,
MOCK_DATA["client_request_token"],
MOCK_DATA["workgroup"],
)
@mock.patch.object(AthenaHook, "check_query_status", return_value="RUNNING")
@mock.patch.object(AthenaHook, "run_query", return_value=ATHENA_QUERY_ID)
@mock.patch.object(AthenaHook, "get_conn")
def test_hook_run_failed_query_with_max_tries(self, mock_conn, mock_run_query, mock_check_query_status):
with pytest.raises(AirflowException):
self.athena.execute({})
mock_run_query.assert_called_once_with(
MOCK_DATA["query"],
query_context,
result_configuration,
MOCK_DATA["client_request_token"],
MOCK_DATA["workgroup"],
)
@pytest.mark.db_test
@mock.patch.object(AthenaHook, "check_query_status", side_effect=("SUCCEEDED",))
@mock.patch.object(AthenaHook, "run_query", return_value=ATHENA_QUERY_ID)
@mock.patch.object(AthenaHook, "get_conn")
def test_return_value(
self,
mock_conn,
mock_run_query,
mock_check_query_status,
session,
clean_dags_dagruns_and_dagbundles,
testing_dag_bundle,
):
"""Test we return the right value -- that will get put in to XCom by the execution engine"""
if AIRFLOW_V_3_0_PLUS:
from airflow.models.dag_version import DagVersion
sync_dag_to_db(self.dag)
dag_version = DagVersion.get_latest_version(self.dag.dag_id)
ti = TaskInstance(task=self.athena, dag_version_id=dag_version.id)
dag_run = DagRun(
dag_id=self.dag.dag_id,
logical_date=timezone.utcnow(),
run_id="test",
run_type=DagRunType.MANUAL,
state=DagRunState.RUNNING,
)
else:
dag_run = DagRun(
dag_id=self.dag.dag_id,
execution_date=timezone.utcnow(),
run_id="test",
run_type=DagRunType.MANUAL,
state=DagRunState.RUNNING,
)
ti = TaskInstance(task=self.athena)
ti.dag_run = dag_run
session.add(ti)
session.commit()
assert self.athena.execute(ti.get_template_context()) == ATHENA_QUERY_ID
@mock.patch.object(AthenaHook, "check_query_status", side_effect=("SUCCEEDED",))
@mock.patch.object(AthenaHook, "run_query", return_value=ATHENA_QUERY_ID)
@mock.patch.object(AthenaHook, "get_conn")
def test_optional_output_location(self, mock_conn, mock_run_query, mock_check_query_status):
op = AthenaOperator(**self.default_op_kwargs, aws_conn_id=None)
op.execute({})
mock_run_query.assert_called_once_with(
MOCK_DATA["query"],
query_context,
{}, # Should be an empty dict since we do not provide output_location
MOCK_DATA["client_request_token"],
MOCK_DATA["workgroup"],
)
@mock.patch.object(AthenaHook, "run_query", return_value=ATHENA_QUERY_ID)
def test_is_deferred(self, mock_run_query):
self.athena.deferrable = True
with pytest.raises(TaskDeferred) as deferred:
self.athena.execute(None)
assert isinstance(deferred.value.trigger, AthenaTrigger)
# Validate call persist Athena Query result link
self.mocked_athena_result_link.assert_called_once_with(
aws_partition=mock.ANY,
context=mock.ANY,
operator=mock.ANY,
region_name=mock.ANY,
query_execution_id=ATHENA_QUERY_ID,
)
def test_execute_complete_reassigns_query_execution_id_after_deferring(self):
"""Assert that we use query_execution_id from event after deferral."""
operator = AthenaOperator(
task_id="test_athena_operator",
query="SELECT * FROM TEST_TABLE",
database="TEST_DATABASE",
deferrable=True,
)
assert operator.query_execution_id is None
query_execution_id = "123456"
operator.execute_complete(
context=None,
event={"status": "success", "value": query_execution_id},
)
assert operator.query_execution_id == query_execution_id
@mock.patch.object(AthenaHook, "region_name", new_callable=mock.PropertyMock)
@mock.patch.object(AthenaHook, "get_conn")
def test_operator_openlineage_data(self, mock_conn, mock_region_name):
mock_region_name.return_value = "eu-west-1"
def mock_get_table_metadata(CatalogName, DatabaseName, TableName):
with open(os.path.dirname(__file__) + "/athena_metadata.json") as f:
return json.load(f)[TableName]
mock_conn.return_value.get_table_metadata = mock_get_table_metadata
op = AthenaOperator(
task_id="test_athena_openlineage",
query="INSERT INTO TEST_TABLE SELECT CUSTOMER_EMAIL FROM DISCOUNTS",
database="TEST_DATABASE",
output_location="s3://test_s3_bucket",
client_request_token="eac427d0-1c6d-4dfb-96aa-2835d3ac6595",
sleep_time=0,
max_polling_attempts=3,
dag=self.dag,
)
op.query_execution_id = "12345" # Mocking what will be available after execution
expected_lineage = OperatorLineage(
inputs=[
Dataset(
namespace="awsathena://athena.eu-west-1.amazonaws.com",
name="AwsDataCatalog.TEST_DATABASE.DISCOUNTS",
facets={
"symlinks": SymlinksDatasetFacet(
identifiers=[
Identifier(
namespace="s3://bucket",
name="/discount/data/path/",
type="TABLE",
)
],
),
"schema": SchemaDatasetFacet(
fields=[
SchemaDatasetFacetFields(
name="ID",
type="int",
description="from deserializer",
),
SchemaDatasetFacetFields(
name="AMOUNT_OFF",
type="int",
description="from deserializer",
),
SchemaDatasetFacetFields(
name="CUSTOMER_EMAIL",
type="varchar",
description="from deserializer",
),
SchemaDatasetFacetFields(
name="STARTS_ON",
type="timestamp",
description="from deserializer",
),
SchemaDatasetFacetFields(
name="ENDS_ON",
type="timestamp",
description=None,
),
],
),
},
)
],
outputs=[
Dataset(
namespace="awsathena://athena.eu-west-1.amazonaws.com",
name="AwsDataCatalog.TEST_DATABASE.TEST_TABLE",
facets={
"symlinks": SymlinksDatasetFacet(
identifiers=[
Identifier(
namespace="s3://bucket",
name="/data/test_table/data/path",
type="TABLE",
)
],
),
"schema": SchemaDatasetFacet(
fields=[
SchemaDatasetFacetFields(
name="column",
type="string",
description="from deserializer",
)
],
),
},
),
Dataset(namespace="s3://test_s3_bucket", name="/"),
],
job_facets={
"sql": SQLJobFacet(
query="INSERT INTO TEST_TABLE SELECT CUSTOMER_EMAIL FROM DISCOUNTS",
)
},
run_facets={"externalQuery": ExternalQueryRunFacet(externalQueryId="12345", source="awsathena")},
)
assert op.get_openlineage_facets_on_complete(None) == expected_lineage
def test_template_fields(self):
validate_template_fields(self.athena)
|
TestAthenaOperator
|
python
|
keras-team__keras
|
keras/src/ops/numpy.py
|
{
"start": 103810,
"end": 104543
}
|
class ____(Operation):
def call(self, x):
return backend.numpy.expm1(x)
def compute_output_spec(self, x):
dtype = backend.standardize_dtype(x.dtype)
if "int" in dtype or dtype == "bool":
dtype = backend.floatx()
sparse = getattr(x, "sparse", False)
return KerasTensor(x.shape, dtype=dtype, sparse=sparse)
@keras_export(["keras.ops.expm1", "keras.ops.numpy.expm1"])
def expm1(x):
"""Calculate `exp(x) - 1` for all elements in the tensor.
Args:
x: Input values.
Returns:
Output tensor, element-wise exponential minus one.
"""
if any_symbolic_tensors((x,)):
return Expm1().symbolic_call(x)
return backend.numpy.expm1(x)
|
Expm1
|
python
|
apache__airflow
|
providers/standard/src/airflow/providers/standard/operators/latest_only.py
|
{
"start": 1386,
"end": 5017
}
|
class ____(BaseBranchOperator):
"""
Skip tasks that are not running during the most recent schedule interval.
If the task is run outside the latest schedule interval (i.e. run_type == DagRunType.MANUAL),
all directly downstream tasks will be skipped.
Note that downstream tasks are never skipped if the given DAG_Run is
marked as externally triggered.
Note that when used with timetables that produce zero-length or point-in-time data intervals
(e.g., ``DeltaTriggerTimetable``), this operator assumes each run is the latest
and does not skip downstream tasks.
"""
ui_color = "#e9ffdb" # nyanza
def choose_branch(self, context: Context) -> str | Iterable[str]:
# If the DAG Run is externally triggered, then return without
# skipping downstream tasks
dag_run: DagRun = context["dag_run"] # type: ignore[assignment]
if dag_run.run_type == DagRunType.MANUAL:
self.log.info("Manually triggered DAG_Run: allowing execution to proceed.")
return list(self.get_direct_relative_ids(upstream=False))
dates = self._get_compare_dates(dag_run)
if dates is None:
self.log.info("Last scheduled execution: allowing execution to proceed.")
return list(self.get_direct_relative_ids(upstream=False))
now = pendulum.now("UTC")
left_window, right_window = dates
self.log.info(
"Checking latest only with left_window: %s right_window: %s now: %s",
left_window,
right_window,
now,
)
if not left_window < now <= right_window:
self.log.info("Not latest execution, skipping downstream.")
# we return an empty list, thus the parent BaseBranchOperator
# won't exclude any downstream tasks from skipping.
return []
self.log.info("Latest, allowing execution to proceed.")
return list(self.get_direct_relative_ids(upstream=False))
def _get_compare_dates(self, dag_run: DagRun) -> tuple[DateTime, DateTime] | None:
dagrun_date: DateTime
if AIRFLOW_V_3_0_PLUS:
dagrun_date = dag_run.logical_date or dag_run.run_after # type: ignore[assignment]
else:
dagrun_date = dag_run.logical_date # type: ignore[assignment]
from airflow.timetables.base import DataInterval, TimeRestriction
if dag_run.data_interval_start:
start = pendulum.instance(dag_run.data_interval_start)
else:
start = dagrun_date
if dag_run.data_interval_end:
end = pendulum.instance(dag_run.data_interval_end)
else:
end = dagrun_date
current_interval = DataInterval(
start=start,
end=end,
)
time_restriction = TimeRestriction(
earliest=None, latest=current_interval.end - timedelta(microseconds=1), catchup=True
)
if prev_info := self.dag.timetable.next_dagrun_info(
last_automated_data_interval=current_interval,
restriction=time_restriction,
):
left = prev_info.data_interval.end
else:
left = current_interval.start
time_restriction = TimeRestriction(earliest=current_interval.end, latest=None, catchup=True)
next_info = self.dag.timetable.next_dagrun_info(
last_automated_data_interval=current_interval,
restriction=time_restriction,
)
if not next_info:
return None
return (left, next_info.data_interval.end)
|
LatestOnlyOperator
|
python
|
tensorflow__tensorflow
|
tensorflow/python/debug/lib/debug_service_pb2_grpc.py
|
{
"start": 1267,
"end": 2466
}
|
class ____(object):
"""EventListener: Receives Event protos, e.g., from debugged TensorFlow
runtime(s).
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SendEvents = channel.stream_stream(
'/tensorflow.EventListener/SendEvents',
request_serializer=tensorflow_dot_core_dot_util_dot_event__pb2.Event.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.FromString,
)
self.SendTracebacks = channel.unary_unary(
'/tensorflow.EventListener/SendTracebacks',
request_serializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.CallTraceback.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.FromString,
)
self.SendSourceFiles = channel.unary_unary(
'/tensorflow.EventListener/SendSourceFiles',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_debug__pb2.DebuggedSourceFiles.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.FromString,
)
|
EventListenerStub
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 289945,
"end": 290434
}
|
class ____(sgqlc.types.Interface):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("enterprise_resource_path", "enterprise_slug", "enterprise_url")
enterprise_resource_path = sgqlc.types.Field(
URI, graphql_name="enterpriseResourcePath"
)
enterprise_slug = sgqlc.types.Field(String, graphql_name="enterpriseSlug")
enterprise_url = sgqlc.types.Field(URI, graphql_name="enterpriseUrl")
|
EnterpriseAuditEntryData
|
python
|
pypa__pip
|
src/pip/_vendor/pkg_resources/__init__.py
|
{
"start": 66274,
"end": 66968
}
|
class ____(ZipManifests):
"""
Memoized zipfile manifests.
"""
class manifest_mod(NamedTuple):
manifest: dict[str, zipfile.ZipInfo]
mtime: float
def load(self, path: str) -> dict[str, zipfile.ZipInfo]: # type: ignore[override] # ZipManifests.load is a classmethod
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
|
MemoizedZipManifests
|
python
|
django__django
|
tests/template_tests/filter_tests/test_linenumbers.py
|
{
"start": 169,
"end": 1243
}
|
class ____(SimpleTestCase):
"""
The contents of "linenumbers" is escaped according to the current
autoescape setting.
"""
@setup({"linenumbers01": "{{ a|linenumbers }} {{ b|linenumbers }}"})
def test_linenumbers01(self):
output = self.engine.render_to_string(
"linenumbers01",
{"a": "one\n<two>\nthree", "b": mark_safe("one\n<two>\nthree")},
)
self.assertEqual(
output, "1. one\n2. <two>\n3. three 1. one\n2. <two>\n3. three"
)
@setup(
{
"linenumbers02": (
"{% autoescape off %}{{ a|linenumbers }} {{ b|linenumbers }}"
"{% endautoescape %}"
)
}
)
def test_linenumbers02(self):
output = self.engine.render_to_string(
"linenumbers02",
{"a": "one\n<two>\nthree", "b": mark_safe("one\n<two>\nthree")},
)
self.assertEqual(
output, "1. one\n2. <two>\n3. three 1. one\n2. <two>\n3. three"
)
|
LinenumbersTests
|
python
|
scipy__scipy
|
scipy/odr/_odrpack.py
|
{
"start": 2243,
"end": 5422
}
|
class ____(Exception):
"""
Exception stopping fitting.
You can raise this exception in your objective function to tell
`~scipy.odr.odr` to stop fitting.
"""
pass
# Backwards compatibility
odr_error = OdrError
odr_stop = OdrStop
__odrpack._set_exceptions(OdrError, OdrStop)
def _conv(obj, dtype=None):
""" Convert an object to the preferred form for input to the odr routine.
"""
if obj is None:
return obj
else:
if dtype is None:
obj = np.asarray(obj)
else:
obj = np.asarray(obj, dtype)
if obj.shape == ():
# Scalar.
return obj.dtype.type(obj)
else:
return obj
def _report_error(info):
""" Interprets the return code of the odr routine.
Parameters
----------
info : int
The return code of the odr routine.
Returns
-------
problems : list(str)
A list of messages about why the odr() routine stopped.
"""
stopreason = ('Blank',
'Sum of squares convergence',
'Parameter convergence',
'Both sum of squares and parameter convergence',
'Iteration limit reached')[info % 5]
if info >= 5:
# questionable results or fatal error
I = (info//10000 % 10,
info//1000 % 10,
info//100 % 10,
info//10 % 10,
info % 10)
problems = []
if I[0] == 0:
if I[1] != 0:
problems.append('Derivatives possibly not correct')
if I[2] != 0:
problems.append('Error occurred in callback')
if I[3] != 0:
problems.append('Problem is not full rank at solution')
problems.append(stopreason)
elif I[0] == 1:
if I[1] != 0:
problems.append('N < 1')
if I[2] != 0:
problems.append('M < 1')
if I[3] != 0:
problems.append('NP < 1 or NP > N')
if I[4] != 0:
problems.append('NQ < 1')
elif I[0] == 2:
if I[1] != 0:
problems.append('LDY and/or LDX incorrect')
if I[2] != 0:
problems.append('LDWE, LD2WE, LDWD, and/or LD2WD incorrect')
if I[3] != 0:
problems.append('LDIFX, LDSTPD, and/or LDSCLD incorrect')
if I[4] != 0:
problems.append('LWORK and/or LIWORK too small')
elif I[0] == 3:
if I[1] != 0:
problems.append('STPB and/or STPD incorrect')
if I[2] != 0:
problems.append('SCLB and/or SCLD incorrect')
if I[3] != 0:
problems.append('WE incorrect')
if I[4] != 0:
problems.append('WD incorrect')
elif I[0] == 4:
problems.append('Error in derivatives')
elif I[0] == 5:
problems.append('Error occurred in callback')
elif I[0] == 6:
problems.append('Numerical error detected')
return problems
else:
return [stopreason]
|
OdrStop
|
python
|
openai__openai-python
|
src/openai/resources/evals/runs/output_items.py
|
{
"start": 920,
"end": 5978
}
|
class ____(SyncAPIResource):
@cached_property
def with_raw_response(self) -> OutputItemsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return OutputItemsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> OutputItemsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return OutputItemsWithStreamingResponse(self)
def retrieve(
self,
output_item_id: str,
*,
eval_id: str,
run_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> OutputItemRetrieveResponse:
"""
Get an evaluation run output item by ID.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
if not output_item_id:
raise ValueError(f"Expected a non-empty value for `output_item_id` but received {output_item_id!r}")
return self._get(
f"/evals/{eval_id}/runs/{run_id}/output_items/{output_item_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=OutputItemRetrieveResponse,
)
def list(
self,
run_id: str,
*,
eval_id: str,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
status: Literal["fail", "pass"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[OutputItemListResponse]:
"""
Get a list of output items for an evaluation run.
Args:
after: Identifier for the last output item from the previous pagination request.
limit: Number of output items to retrieve.
order: Sort order for output items by timestamp. Use `asc` for ascending order or
`desc` for descending order. Defaults to `asc`.
status: Filter output items by status. Use `failed` to filter by failed output items or
`pass` to filter by passed output items.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return self._get_api_list(
f"/evals/{eval_id}/runs/{run_id}/output_items",
page=SyncCursorPage[OutputItemListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
"status": status,
},
output_item_list_params.OutputItemListParams,
),
),
model=OutputItemListResponse,
)
|
OutputItems
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pylint/invalid_return_type_length.py
|
{
"start": 1006,
"end": 1112
}
|
class ____:
def __len__(self):
print("raise some error")
raise NotImplementedError
|
Length6
|
python
|
pytorch__pytorch
|
test/test_dataloader.py
|
{
"start": 123247,
"end": 125053
}
|
class ____(TestCase):
def setUp(self):
super().setUp()
inps = torch.arange(10 * 5, dtype=torch.float32).view(10, 5)
tgts = torch.arange(10 * 5, dtype=torch.float32).view(10, 5)
self.dataset = TensorDataset(inps, tgts)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_custom_batch_pin(self):
test_cases = [
(collate_wrapper, self_module.SimpleCustomBatch),
(collate_into_packed_sequence, torch.nn.utils.rnn.PackedSequence),
(
collate_into_packed_sequence_batch_first,
torch.nn.utils.rnn.PackedSequence,
),
]
for collate_fn, elem_cls in test_cases:
loader = DataLoader(
self.dataset, batch_size=2, collate_fn=collate_fn, pin_memory=True
)
for sample in loader:
self.assertIsInstance(sample, elem_cls)
self.assertTrue(sample.is_pinned())
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_custom_batch_pin_worker(self):
test_cases = [
(collate_wrapper, self_module.SimpleCustomBatch),
(collate_into_packed_sequence, torch.nn.utils.rnn.PackedSequence),
(
collate_into_packed_sequence_batch_first,
torch.nn.utils.rnn.PackedSequence,
),
]
for collate_fn, elem_cls in test_cases:
loader = DataLoader(
self.dataset,
batch_size=2,
collate_fn=collate_fn,
pin_memory=True,
num_workers=1,
)
for sample in loader:
self.assertIsInstance(sample, elem_cls)
self.assertTrue(sample.is_pinned())
|
TestCustomPinFn
|
python
|
run-llama__llama_index
|
llama-index-integrations/llms/llama-index-llms-sambanovasystems/llama_index/llms/sambanovasystems/base.py
|
{
"start": 23309,
"end": 61986
}
|
class ____(LLM):
"""
SambaStudio model.
Setup:
To use, you should have the environment variables:
``SAMBASTUDIO_URL`` set with your SambaStudio deployed endpoint URL.
``SAMBASTUDIO_API_KEY`` set with your SambaStudio deployed endpoint Key.
https://docs.sambanova.ai/sambastudio/latest/index.html
Examples:
```python
SambaStudio(
sambastudio_url = set with your SambaStudio deployed endpoint URL,
sambastudio_api_key = set with your SambaStudio deployed endpoint Key.
model = model or expert name (set for CoE endpoints),
max_tokens = max number of tokens to generate,
temperature = model temperature,
context_window = model context window,
top_p = model top p,
top_k = model top k,
do_sample = whether to do sample
process_prompt = whether to process prompt
(set for CoE generic v1 and v2 endpoints)
stream_options = include usage to get generation metrics
special_tokens = start, start_role, end_role, end special tokens
(set for CoE generic v1 and v2 endpoints when process prompt
set to false or for StandAlone v1 and v2 endpoints)
model_kwargs: Optional = Extra Key word arguments to pass to the model.
)
```
Key init args — completion params:
model: str
The name of the model to use, e.g., Meta-Llama-3-70B-Instruct-4096
(set for CoE endpoints).
streaming: bool
Whether to use streaming
max_tokens: inthandler when using non streaming methods
max tokens to generate
context_window: int
model context window
temperature: float
model temperature
top_p: float
model top p
top_k: int
model top k
do_sample: bool
whether to do sample
process_prompt:
whether to process prompt (set for CoE generic v1 and v2 endpoints)
stream_options: dict
stream options, include usage to get generation metrics
special_tokens: dict
start, start_role, end_role and end special tokens
(set for CoE generic v1 and v2 endpoints when process prompt set to false
or for StandAlone v1 and v2 endpoints) default to llama3 special tokens
model_kwargs: dict
Extra Key word arguments to pass to the model.
Key init args — client params:
sambastudio_url: str
SambaStudio endpoint URL
sambastudio_api_key: str
SambaStudio endpoint api key
Instantiate:
```python
from llama_index.llms.sambanova import SambaStudio
llm = SambaStudio=(
sambastudio_url = set with your SambaStudio deployed endpoint URL,
sambastudio_api_key = set with your SambaStudio deployed endpoint Key.
model = model or expert name (set for CoE endpoints),
max_tokens = max number of tokens to generate,
temperature = model temperature,
context_window = model context window,
top_p = model top p,
top_k = model top k,
do_sample = whether to do sample
process_prompt = whether to process prompt
(set for CoE generic v1 and v2 endpoints)
stream_options = include usage to get generation metrics
special_tokens = start, start_role, end_role, and special tokens
(set for CoE generic v1 and v2 endpoints when process prompt
set to false or for StandAlone v1 and v2 endpoints)
model_kwargs: Optional = Extra Key word arguments to pass to the model.
)
```
Complete:
```python
prompt = "Tell me about Naruto Uzumaki in one sentence"
response = llm.complete(prompt)
```
Chat:
```python
messages = [
ChatMessage(role=MessageRole.SYSTEM, content=("You're a helpful assistant")),
ChatMessage(role=MessageRole.USER, content="Tell me about Naruto Uzumaki in one sentence")
]
response = llm.chat(messages)
```
Stream:
```python
prompt = "Tell me about Naruto Uzumaki in one sentence"
messages = [
ChatMessage(role=MessageRole.SYSTEM, content=("You're a helpful assistant")),
ChatMessage(role=MessageRole.USER, content="Tell me about Naruto Uzumaki in one sentence")
]
for chunk in llm.stream_complete(prompt):
print(chunk.text)
for chunk in llm.stream_chat(messages):
print(chunk.message.content)
```
Async:
```python
prompt = "Tell me about Naruto Uzumaki in one sentence"
asyncio.run(llm.acomplete(prompt))
messages = [
ChatMessage(role=MessageRole.SYSTEM, content=("You're a helpful assistant")),
ChatMessage(role=MessageRole.USER, content="Tell me about Naruto Uzumaki in one sentence")
]
asyncio.run(llm.achat(chat_text_msgs))
```
Response metadata and usage
```python
messages = [
ChatMessage(role=MessageRole.SYSTEM, content=("You're a helpful assistant")),
ChatMessage(role=MessageRole.USER, content="Tell me about Naruto Uzumaki in one sentence")
]
metadata_and_usage = llm.chat(messages).message.additional_kwargs
print(metadata_and_usage)
```
"""
model_config = ConfigDict(
protected_namespaces=("pydantic_model_",), arbitrary_types_allowed=True
)
sambastudio_url: str = Field(description="SambaStudio Url")
sambastudio_api_key: SecretStr = Field(description="SambaStudio api key")
base_url: str = Field(
default="", exclude=True, description="SambaStudio non streaming Url"
)
streaming_url: str = Field(
default="", exclude=True, description="SambaStudio streaming Url"
)
model: Optional[str] = Field(
default=None,
description="The name of the model or expert to use (for CoE endpoints)",
)
streaming: bool = Field(
default=False,
description="Whether to use streaming handler when using non streaming methods",
)
context_window: int = Field(default=4096, description="context window")
max_tokens: int = Field(default=1024, description="max tokens to generate")
temperature: Optional[float] = Field(default=0.7, description="model temperature")
top_p: Optional[float] = Field(default=None, description="model top p")
top_k: Optional[int] = Field(default=None, description="model top k")
do_sample: Optional[bool] = Field(
default=None, description="whether to do sampling"
)
process_prompt: Optional[bool] = Field(
default=True,
description="whether process prompt (for CoE generic v1 and v2 endpoints)",
)
stream_options: dict = Field(
default_factory=lambda: {"include_usage": True},
description="stream options, include usage to get generation metrics",
)
special_tokens: dict = Field(
default={
"start": "<|begin_of_text|>",
"start_role": "<|begin_of_text|><|start_header_id|>{role}<|end_header_id|>",
"end_role": "<|eot_id|>",
"end": "<|start_header_id|>assistant<|end_header_id|>\n",
},
description="start, start_role, end_role and end special tokens (set for CoE generic v1 and v2 endpoints when process prompt set to false or for StandAlone v1 and v2 endpoints) default to llama3 special tokens",
)
model_kwargs: Optional[Dict[str, Any]] = Field(
default=None, description="Key word arguments to pass to the model."
)
@classmethod
def class_name(cls) -> str:
return "SambaStudio"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=self.context_window,
num_output=self.max_tokens,
is_chat_model=True,
model_name=self.model,
)
def __init__(self, **kwargs: Any) -> None:
"""Init and validate environment variables."""
kwargs["sambastudio_url"] = get_from_param_or_env(
"sambastudio_url", kwargs.get("sambastudio_url"), "SAMBASTUDIO_URL"
)
kwargs["sambastudio_api_key"] = get_from_param_or_env(
"sambastudio_api_key",
kwargs.get("sambastudio_api_key"),
"SAMBASTUDIO_API_KEY",
)
kwargs["sambastudio_url"], kwargs["streaming_url"] = self._get_sambastudio_urls(
kwargs["sambastudio_url"]
)
super().__init__(**kwargs)
def _messages_to_string(self, messages: Sequence[ChatMessage]) -> str:
"""
Convert a sequence of ChatMessages to:
- dumped json string with Role / content dict structure when process_prompt is true,
- string with special tokens if process_prompt is false for generic V1 and V2 endpoints.
Args:
messages: sequence of ChatMessages
Returns:
str: string to send as model input depending on process_prompt param
"""
if self.process_prompt:
messages_dict: Dict[str, Any] = {
"conversation_id": "sambaverse-conversation-id",
"messages": [],
}
for message in messages:
messages_dict["messages"].append(
{
"role": message.role,
"content": message.content,
}
)
messages_string = json.dumps(messages_dict)
else:
messages_string = self.special_tokens["start"]
for message in messages:
messages_string += self.special_tokens["start_role"].format(
role=self._get_role(message)
)
messages_string += f" {message.content} "
messages_string += self.special_tokens["end_role"]
messages_string += self.special_tokens["end"]
return messages_string
def _get_sambastudio_urls(self, url: str) -> Tuple[str, str]:
"""
Get streaming and non streaming URLs from the given URL.
Args:
url: string with sambastudio base or streaming endpoint url
Returns:
base_url: string with url to do non streaming calls
streaming_url: string with url to do streaming calls
"""
if "chat/completions" in url:
base_url = url
stream_url = url
else:
if "stream" in url:
base_url = url.replace("stream/", "")
stream_url = url
else:
base_url = url
if "generic" in url:
stream_url = "generic/stream".join(url.split("generic"))
else:
raise ValueError("Unsupported URL")
return base_url, stream_url
def _handle_request(
self,
messages: Sequence[ChatMessage],
stop: Optional[List[str]] = None,
streaming: Optional[bool] = False,
) -> Response:
"""
Performs a post request to the LLM API.
Args:
messages_dicts: List of role / content dicts to use as input.
stop: list of stop tokens
streaming: whether to do a streaming call
Returns:
A request Response object
"""
# create request payload for openai compatible API
if "chat/completions" in self.sambastudio_url:
messages_dicts = _create_message_dicts(messages)
data = {
"messages": messages_dicts,
"max_tokens": self.max_tokens,
"stop": stop,
"model": self.model,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"stream": streaming,
"stream_options": self.stream_options,
}
data = {key: value for key, value in data.items() if value is not None}
headers = {
"Authorization": f"Bearer "
f"{self.sambastudio_api_key.get_secret_value()}",
"Content-Type": "application/json",
}
# create request payload for generic v1 API
elif "api/v2/predict/generic" in self.sambastudio_url:
items = [{"id": "item0", "value": self._messages_to_string(messages)}]
params: Dict[str, Any] = {
"select_expert": self.model,
"process_prompt": self.process_prompt,
"max_tokens_to_generate": self.max_tokens,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"do_sample": self.do_sample,
}
if self.model_kwargs is not None:
params = {**params, **self.model_kwargs}
params = {key: value for key, value in params.items() if value is not None}
data = {"items": items, "params": params}
headers = {"key": self.sambastudio_api_key.get_secret_value()}
# create request payload for generic v1 API
elif "api/predict/generic" in self.sambastudio_url:
params = {
"select_expert": self.model,
"process_prompt": self.process_prompt,
"max_tokens_to_generate": self.max_tokens,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"do_sample": self.do_sample,
}
if self.model_kwargs is not None:
params = {**params, **self.model_kwargs}
params = {
key: {"type": type(value).__name__, "value": str(value)}
for key, value in params.items()
if value is not None
}
if streaming:
data = {
"instance": self._messages_to_string(messages),
"params": params,
}
else:
data = {
"instances": [self._messages_to_string(messages)],
"params": params,
}
headers = {"key": self.sambastudio_api_key.get_secret_value()}
else:
raise ValueError(
f"Unsupported URL{self.sambastudio_url}"
"only openai, generic v1 and generic v2 APIs are supported"
)
http_session = requests.Session()
if streaming:
response = http_session.post(
self.streaming_url, headers=headers, json=data, stream=True
)
else:
response = http_session.post(
self.base_url, headers=headers, json=data, stream=False
)
if response.status_code != 200:
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}."
f"{response.text}."
)
return response
async def _handle_request_async(
self,
messages: Sequence[ChatMessage],
stop: Optional[List[str]] = None,
streaming: Optional[bool] = False,
) -> Response:
"""
Performs an async post request to the LLM API.
Args:
messages_dicts: List of role / content dicts to use as input.
stop: list of stop tokens
streaming: whether to do a streaming call
Returns:
A request Response object
"""
# create request payload for openai compatible API
if "chat/completions" in self.sambastudio_url:
messages_dicts = _create_message_dicts(messages)
data = {
"messages": messages_dicts,
"max_tokens": self.max_tokens,
"stop": stop,
"model": self.model,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"stream": streaming,
"stream_options": self.stream_options,
}
data = {key: value for key, value in data.items() if value is not None}
headers = {
"Authorization": f"Bearer "
f"{self.sambastudio_api_key.get_secret_value()}",
"Content-Type": "application/json",
}
# create request payload for generic v1 API
elif "api/v2/predict/generic" in self.sambastudio_url:
items = [{"id": "item0", "value": self._messages_to_string(messages)}]
params: Dict[str, Any] = {
"select_expert": self.model,
"process_prompt": self.process_prompt,
"max_tokens_to_generate": self.max_tokens,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"do_sample": self.do_sample,
}
if self.model_kwargs is not None:
params = {**params, **self.model_kwargs}
params = {key: value for key, value in params.items() if value is not None}
data = {"items": items, "params": params}
headers = {"key": self.sambastudio_api_key.get_secret_value()}
# create request payload for generic v1 API
elif "api/predict/generic" in self.sambastudio_url:
params = {
"select_expert": self.model,
"process_prompt": self.process_prompt,
"max_tokens_to_generate": self.max_tokens,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"do_sample": self.do_sample,
}
if self.model_kwargs is not None:
params = {**params, **self.model_kwargs}
params = {
key: {"type": type(value).__name__, "value": str(value)}
for key, value in params.items()
if value is not None
}
if streaming:
data = {
"instance": self._messages_to_string(messages),
"params": params,
}
else:
data = {
"instances": [self._messages_to_string(messages)],
"params": params,
}
headers = {"key": self.sambastudio_api_key.get_secret_value()}
else:
raise ValueError(
f"Unsupported URL{self.sambastudio_url}"
"only openai, generic v1 and generic v2 APIs are supported"
)
async with aiohttp.ClientSession() as session:
if streaming:
url = self.streaming_url
else:
url = self.base_url
async with session.post(
url,
headers=headers,
json=data,
) as response:
if response.status != 200:
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status}."
f"{response.text}."
)
response_dict = await response.json()
if response_dict.get("error"):
raise RuntimeError(
f"Sambanova /complete call failed with status code {response.status}.",
f"{response_dict}.",
)
return response_dict
def _process_response(self, response: Response) -> ChatMessage:
"""
Process a non streaming response from the api.
Args:
response: A request Response object
Returns:
generation: a ChatMessage with model generation
"""
# Extract json payload form response
try:
response_dict = response.json()
except Exception as e:
raise RuntimeError(
f"Sambanova /complete call failed couldn't get JSON response {e}"
f"response: {response.text}"
)
# process response payload for openai compatible API
if "chat/completions" in self.sambastudio_url:
content = response_dict["choices"][0]["message"]["content"]
response_metadata = {
"finish_reason": response_dict["choices"][0]["finish_reason"],
"usage": response_dict.get("usage"),
"model_name": response_dict["model"],
"system_fingerprint": response_dict["system_fingerprint"],
"created": response_dict["created"],
}
# process response payload for generic v2 API
elif "api/v2/predict/generic" in self.sambastudio_url:
content = response_dict["items"][0]["value"]["completion"]
response_metadata = response_dict["items"][0]
# process response payload for generic v1 API
elif "api/predict/generic" in self.sambastudio_url:
content = response_dict["predictions"][0]["completion"]
response_metadata = response_dict
else:
raise ValueError(
f"Unsupported URL{self.sambastudio_url}"
"only openai, generic v1 and generic v2 APIs are supported"
)
return ChatMessage(
content=content,
additional_kwargs=response_metadata,
role=MessageRole.ASSISTANT,
)
def _process_stream_response(self, response: Response) -> Iterator[ChatMessage]:
"""
Process a streaming response from the api.
Args:
response: An iterable request Response object
Yields:
generation: an Iterator[ChatMessage] with model partial generation
"""
try:
import sseclient
except ImportError:
raise ImportError(
"could not import sseclient library"
"Please install it with `pip install sseclient-py`."
)
# process response payload for openai compatible API
if "chat/completions" in self.sambastudio_url:
finish_reason = ""
content = ""
client = sseclient.SSEClient(response)
for event in client.events():
if event.event == "error_event":
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}."
f"{event.data}."
)
try:
# check if the response is not a final event ("[DONE]")
if event.data != "[DONE]":
if isinstance(event.data, str):
data = json.loads(event.data)
else:
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}."
f"{event.data}."
)
if data.get("error"):
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}."
f"{event.data}."
)
if len(data["choices"]) > 0:
finish_reason = data["choices"][0].get("finish_reason")
content += data["choices"][0]["delta"]["content"]
id = data["id"]
metadata = {}
else:
content += ""
id = data["id"]
metadata = {
"finish_reason": finish_reason,
"usage": data.get("usage"),
"model_name": data["model"],
"system_fingerprint": data["system_fingerprint"],
"created": data["created"],
}
if data.get("usage") is not None:
content += ""
id = data["id"]
metadata = {
"finish_reason": finish_reason,
"usage": data.get("usage"),
"model_name": data["model"],
"system_fingerprint": data["system_fingerprint"],
"created": data["created"],
}
yield ChatMessage(
role=MessageRole.ASSISTANT,
content=content,
additional_kwargs=metadata,
)
except Exception as e:
raise RuntimeError(
f"Error getting content chunk raw streamed response: {e}"
f"data: {event.data}"
)
# process response payload for generic v2 API
elif "api/v2/predict/generic" in self.sambastudio_url:
content = ""
for line in response.iter_lines():
try:
data = json.loads(line)
content += data["result"]["items"][0]["value"]["stream_token"]
id = data["result"]["items"][0]["id"]
if data["result"]["items"][0]["value"]["is_last_response"]:
metadata = {
"finish_reason": data["result"]["items"][0]["value"].get(
"stop_reason"
),
"prompt": data["result"]["items"][0]["value"].get("prompt"),
"usage": {
"prompt_tokens_count": data["result"]["items"][0][
"value"
].get("prompt_tokens_count"),
"completion_tokens_count": data["result"]["items"][0][
"value"
].get("completion_tokens_count"),
"total_tokens_count": data["result"]["items"][0][
"value"
].get("total_tokens_count"),
"start_time": data["result"]["items"][0]["value"].get(
"start_time"
),
"end_time": data["result"]["items"][0]["value"].get(
"end_time"
),
"model_execution_time": data["result"]["items"][0][
"value"
].get("model_execution_time"),
"time_to_first_token": data["result"]["items"][0][
"value"
].get("time_to_first_token"),
"throughput_after_first_token": data["result"]["items"][
0
]["value"].get("throughput_after_first_token"),
"batch_size_used": data["result"]["items"][0][
"value"
].get("batch_size_used"),
},
}
else:
metadata = {}
yield ChatMessage(
role=MessageRole.ASSISTANT,
content=content,
additional_kwargs=metadata,
)
except Exception as e:
raise RuntimeError(
f"Error getting content chunk raw streamed response: {e}"
f"line: {line}"
)
# process response payload for generic v1 API
elif "api/predict/generic" in self.sambastudio_url:
content = ""
for line in response.iter_lines():
try:
data = json.loads(line)
content += data["result"]["responses"][0]["stream_token"]
id = None
if data["result"]["responses"][0]["is_last_response"]:
metadata = {
"finish_reason": data["result"]["responses"][0].get(
"stop_reason"
),
"prompt": data["result"]["responses"][0].get("prompt"),
"usage": {
"prompt_tokens_count": data["result"]["responses"][
0
].get("prompt_tokens_count"),
"completion_tokens_count": data["result"]["responses"][
0
].get("completion_tokens_count"),
"total_tokens_count": data["result"]["responses"][
0
].get("total_tokens_count"),
"start_time": data["result"]["responses"][0].get(
"start_time"
),
"end_time": data["result"]["responses"][0].get(
"end_time"
),
"model_execution_time": data["result"]["responses"][
0
].get("model_execution_time"),
"time_to_first_token": data["result"]["responses"][
0
].get("time_to_first_token"),
"throughput_after_first_token": data["result"][
"responses"
][0].get("throughput_after_first_token"),
"batch_size_used": data["result"]["responses"][0].get(
"batch_size_used"
),
},
}
else:
metadata = {}
yield ChatMessage(
role=MessageRole.ASSISTANT,
content=content,
additional_kwargs=metadata,
)
except Exception as e:
raise RuntimeError(
f"Error getting content chunk raw streamed response: {e}"
f"line: {line}"
)
else:
raise ValueError(
f"Unsupported URL{self.sambastudio_url}"
"only openai, generic v1 and generic v2 APIs are supported"
)
async def _process_response_async(
self, response_dict: Dict[str, Any]
) -> ChatMessage:
"""
Process a non streaming response from the api.
Args:
response: A request Response object
Returns:
generation: a ChatMessage with model generation
"""
# process response payload for openai compatible API
if "chat/completions" in self.sambastudio_url:
content = response_dict["choices"][0]["message"]["content"]
response_metadata = {
"finish_reason": response_dict["choices"][0]["finish_reason"],
"usage": response_dict.get("usage"),
"model_name": response_dict["model"],
"system_fingerprint": response_dict["system_fingerprint"],
"created": response_dict["created"],
}
# process response payload for generic v2 API
elif "api/v2/predict/generic" in self.sambastudio_url:
content = response_dict["items"][0]["value"]["completion"]
response_metadata = response_dict["items"][0]
# process response payload for generic v1 API
elif "api/predict/generic" in self.sambastudio_url:
content = response_dict["predictions"][0]["completion"]
response_metadata = response_dict
else:
raise ValueError(
f"Unsupported URL{self.sambastudio_url}"
"only openai, generic v1 and generic v2 APIs are supported"
)
return ChatMessage(
content=content,
additional_kwargs=response_metadata,
role=MessageRole.ASSISTANT,
)
@llm_chat_callback()
def chat(
self,
messages: Sequence[ChatMessage],
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> ChatResponse:
"""
Calls the chat implementation of the SambaStudio model.
Args:
messages: the prompt composed of a list of messages.
stop: a list of strings on which the model should stop generating.
If generation stops due to a stop token, the stop token itself
SHOULD BE INCLUDED as part of the output. This is not enforced
across models right now, but it's a good practice to follow since
it makes it much easier to parse the output of the model
downstream and understand why generation stopped.
Returns:
ChatResponse with model generation
"""
# if self.streaming:
# stream_iter = self._stream(
# messages, stop=stop, **kwargs
# )
# if stream_iter:
# return generate_from_stream(stream_iter)
response = self._handle_request(messages, stop, streaming=False)
message = self._process_response(response)
return ChatResponse(message=message)
@llm_chat_callback()
def stream_chat(
self,
messages: Sequence[ChatMessage],
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> ChatResponseGen:
"""
Stream the output of the SambaStudio model.
Args:
messages: the prompt composed of a list of messages.
stop: a list of strings on which the model should stop generating.
If generation stops due to a stop token, the stop token itself
SHOULD BE INCLUDED as part of the output. This is not enforced
across models right now, but it's a good practice to follow since
it makes it much easier to parse the output of the model
downstream and understand why generation stopped.
Yields:
chunk: ChatResponseGen with model partial generation
"""
response = self._handle_request(messages, stop, streaming=True)
for ai_message_chunk in self._process_stream_response(response):
chunk = ChatResponse(message=ai_message_chunk)
yield chunk
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
complete_fn = chat_to_completion_decorator(self.chat)
return complete_fn(prompt, **kwargs)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
stream_complete_fn = stream_chat_to_completion_decorator(self.stream_chat)
return stream_complete_fn(prompt, **kwargs)
@llm_chat_callback()
async def achat(
self,
messages: Sequence[ChatMessage],
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> ChatResponse:
"""
Calls the chat implementation of the SambaStudio model.
Args:
messages: the prompt composed of a list of messages.
stop: a list of strings on which the model should stop generating.
If generation stops due to a stop token, the stop token itself
SHOULD BE INCLUDED as part of the output. This is not enforced
across models right now, but it's a good practice to follow since
it makes it much easier to parse the output of the model
downstream and understand why generation stopped.
Returns:
ChatResponse with model generation
"""
response_dict = await self._handle_request_async(
messages, stop, streaming=False
)
message = await self._process_response_async(response_dict)
return ChatResponse(message=message)
@llm_chat_callback()
async def astream_chat(
self,
messages: Sequence[ChatMessage],
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> ChatResponseAsyncGen:
raise NotImplementedError(
"SambaStudio does not currently support async streaming."
)
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
acomplete_fn = achat_to_completion_decorator(self.achat)
return await acomplete_fn(prompt, **kwargs)
@llm_completion_callback()
def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
raise NotImplementedError(
"SambaStudio does not currently support async streaming."
)
|
SambaStudio
|
python
|
doocs__leetcode
|
lcp/LCP 01. 猜数字/Solution.py
|
{
"start": 0,
"end": 138
}
|
class ____:
def game(self, guess: List[int], answer: List[int]) -> int:
return sum(a == b for a, b in zip(guess, answer))
|
Solution
|
python
|
sanic-org__sanic
|
guide/webapp/display/page/renderer.py
|
{
"start": 265,
"end": 2007
}
|
class ____(BaseRenderer):
def render(self, request: Request, language: str, path: str) -> Builder:
self._setup_request(request, language, path)
builder = self.get_builder(
full=request.headers.get("HX-Request") is None,
language=language,
)
self._body(request, builder, language, path)
return builder
def title(self) -> str:
request = Request.get_current()
title: str | None = None
if request and (
current_page := getattr(request.ctx, "current_page", None)
):
title = f"{self.base_title} - {current_page.meta.title}"
return title or self.base_title
def _setup_request(self, request: Request, language: str, path: str):
prev_page, current_page, next_page = Page.get(language, path)
request.ctx.language = (
Page.DEFAULT_LANGUAGE if language == "api" else language
)
request.ctx.current_page = current_page
request.ctx.previous_page = prev_page
request.ctx.next_page = next_page
def _body(
self, request: Request, builder: Builder, language: str, path: str
):
current_page = request.ctx.current_page
with self._base(request, builder, current_page):
if current_page is None:
builder.h1("Not found")
return
builder(HTML(current_page.content))
@contextmanager
def _base(self, request: Request, builder: Builder, page: Page | None):
layout_type: type[BaseLayout] = (
page.get_layout() if page else BaseLayout
)
layout = layout_type(builder)
with layout(request, builder.full):
yield
|
PageRenderer
|
python
|
apache__airflow
|
providers/dingding/src/airflow/providers/dingding/operators/dingding.py
|
{
"start": 1103,
"end": 2844
}
|
class ____(BaseOperator):
"""
This operator allows to send DingTalk message using Custom Robot API.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DingdingOperator`
:param dingding_conn_id: Dingding connection id that has access token in the password field,
and optional host name in host field, if host not set than default
``https://oapi.dingtalk.com`` will use.
:param message_type: Message type you want to send to Dingding, support five type so far
including ``text``, ``link``, ``markdown``, ``actionCard``, ``feedCard``.
:param message: The message send to chat group
:param at_mobiles: Remind specific users with this message
:param at_all: Remind all people in group or not. If True, will overwrite ``at_mobiles``
"""
template_fields: Sequence[str] = ("message",)
ui_color = "#4ea4d4" # Operator icon color
def __init__(
self,
*,
dingding_conn_id: str = "dingding_default",
message_type: str = "text",
message: str | dict | None = None,
at_mobiles: list[str] | None = None,
at_all: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dingding_conn_id = dingding_conn_id
self.message_type = message_type
self.message = message
self.at_mobiles = at_mobiles
self.at_all = at_all
def execute(self, context: Context) -> None:
self.log.info("Sending Dingding message.")
hook = DingdingHook(
self.dingding_conn_id, self.message_type, self.message, self.at_mobiles, self.at_all
)
hook.send()
|
DingdingOperator
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.