language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/dataclassHash1.py
|
{
"start": 150,
"end": 264
}
|
class ____:
a: int
# This should generate an error.
v1: Hashable = DC1(0)
@dataclass(eq=True, frozen=True)
|
DC1
|
python
|
huggingface__transformers
|
src/transformers/models/squeezebert/modeling_squeezebert.py
|
{
"start": 25807,
"end": 30544
}
|
class ____(SqueezeBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = SqueezeBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, MultipleChoiceModelOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where *num_choices* is the size of the second dimension of the input tensors. (see
*input_ids* above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
|
SqueezeBertForMultipleChoice
|
python
|
astropy__astropy
|
astropy/io/fits/header.py
|
{
"start": 68488,
"end": 69404
}
|
class ____:
"""
This class allows to access cards with the _BasicHeader.cards attribute.
This is needed because during the HDU class detection, some HDUs uses
the .cards interface. Cards cannot be modified here as the _BasicHeader
object will be deleted once the HDU object is created.
"""
def __init__(self, header):
self.header = header
def __getitem__(self, key):
# .cards is a list of cards, so key here is an integer.
# get the keyword name from its index.
key = self.header._keys[key]
# then we get the card from the _BasicHeader._cards list, or parse it
# if needed.
try:
return self.header._cards[key]
except KeyError:
cardstr = self.header._raw_cards[key]
card = Card.fromstring(cardstr)
self.header._cards[key] = card
return card
|
_BasicHeaderCards
|
python
|
encode__django-rest-framework
|
rest_framework/authentication.py
|
{
"start": 3639,
"end": 4896
}
|
class ____(BaseAuthentication):
"""
Use Django's session framework for authentication.
"""
def authenticate(self, request):
"""
Returns a `User` if the request session currently has a logged in user.
Otherwise returns `None`.
"""
# Get the session-based user from the underlying HttpRequest object
user = getattr(request._request, 'user', None)
# Unauthenticated, CSRF validation not required
if not user or not user.is_active:
return None
self.enforce_csrf(request)
# CSRF passed with authenticated user
return (user, None)
def enforce_csrf(self, request):
"""
Enforce CSRF validation for session based authentication.
"""
def dummy_get_response(request): # pragma: no cover
return None
check = CSRFCheck(dummy_get_response)
# populates request.META['CSRF_COOKIE'], which is used in process_view()
check.process_request(request)
reason = check.process_view(request, None, (), {})
if reason:
# CSRF failed, bail with explicit error message
raise exceptions.PermissionDenied('CSRF Failed: %s' % reason)
|
SessionAuthentication
|
python
|
huggingface__transformers
|
src/transformers/models/kosmos2_5/modeling_kosmos2_5.py
|
{
"start": 75303,
"end": 83188
}
|
class ____(Kosmos2_5PreTrainedModel, GenerationMixin):
config_class = Kosmos2_5Config
def __init__(self, config: Kosmos2_5Config):
super().__init__(config)
self.text_model = Kosmos2_5TextForCausalLM(config.text_config)
self.vision_model = Kosmos2_5VisionModel(config.vision_config)
self.image_to_text_projection = Kosmos2_5ImageToTextProjection(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.text_model.model.embed_tokens
def set_input_embeddings(self, value):
self.text_model.model.embed_tokens = value
def get_output_embeddings(self) -> nn.Module:
return self.text_model.get_output_embeddings()
def set_output_embeddings(self, new_embeddings):
self.text_model.set_output_embeddings(new_embeddings)
@can_return_tuple
@add_start_docstrings_to_model_forward(KOSMOS2_5_INPUTS_DOCSTRING)
@replace_return_docstrings(
output_type=Kosmos2_5ForConditionalGenerationModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
flattened_patches: Optional[torch.Tensor] = None,
width: Optional[torch.Tensor] = None,
height: Optional[torch.Tensor] = None,
image_embeds_position_mask: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
image_embeds: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> Kosmos2_5ForConditionalGenerationModelOutput:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Returns:
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> import torch
>>> from transformers import AutoProcessor, Kosmos2_5ForConditionalGeneration
>>> repo = "microsoft/kosmos-2.5"
>>> device = "cuda:0"
>>> dtype = torch.bfloat16 # torch.float16
>>> model = Kosmos2_5ForConditionalGeneration.from_pretrained(repo, device_map=device, dtype=dtype)
>>> processor = AutoProcessor.from_pretrained(repo)
>>> url = "https://huggingface.co/microsoft/kosmos-2.5/resolve/main/receipt_00008.png"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> prompt = "<ocr>" # <md>
>>> inputs = processor(text=prompt, images=image, return_tensors="pt")
>>> height, width = inputs.pop("height"), inputs.pop("width")
>>> inputs = {k: v.to(device) if v is not None else None for k, v in inputs.items()}
>>> inputs["flattened_patches"] = inputs["flattened_patches"].to(dtype)
>>> generated_ids = model.generate(**inputs,max_new_tokens=1024)
>>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
>>> generated_text
'<ocr><bbox><x_53><y_573><x_69><y_606></bbox>1\n<bbox><x_79><y_573><x_464><y_612></bbox>[REG] BLACK SAKURA\n<bbox><x_690><y_569><x_810><y_606></bbox>45,455\n<bbox><x_53><y_614><x_69><y_648></bbox>1\n<bbox><x_79><y_614><x_468><y_650></bbox>COOKIE DOH SAUCES\n<bbox><x_788><y_609><x_812><y_644></bbox>0\n<bbox><x_50><y_658><x_69><y_693></bbox>1\n<bbox><x_79><y_658><x_358><y_693></bbox>NATA DE COCO\n<bbox><x_790><y_652><x_814><y_687></bbox>0\n<bbox><x_31><y_742><x_820><y_781></bbox>Sub Total 45,455\n<bbox><x_27><y_781><x_822><y_827></bbox>PB1 (10%) 4,545\n<bbox><x_27><y_826><x_824><y_872></bbox>Rounding 0\n<bbox><x_24><y_872><x_827><y_921></bbox>Total 50,000\n<bbox><x_17><y_1056><x_836><y_1108></bbox>Card Payment 50,000\n'
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
vision_model_output = None
projection_attentions = None
if image_embeds is None:
if flattened_patches is not None:
vision_model_output = self.vision_model(
flattened_patches=flattened_patches,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
**kwargs,
)
image_embeds = nn.functional.normalize(vision_model_output.last_hidden_state, dim=-1)
image_embeds, projection_attentions = self.image_to_text_projection(image_embeds)
lm_outputs: CausalLMOutputWithCrossAttentions = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
image_embeds=image_embeds,
image_embeds_position_mask=image_embeds_position_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
position_ids=position_ids,
labels=labels,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
logits_to_keep=logits_to_keep,
**kwargs,
)
return Kosmos2_5ForConditionalGenerationModelOutput(
loss=lm_outputs.loss,
logits=lm_outputs.logits,
past_key_values=lm_outputs.past_key_values,
hidden_states=lm_outputs.hidden_states,
attentions=lm_outputs.attentions,
width=width,
height=height,
image_embeds=image_embeds,
projection_attentions=projection_attentions,
vision_model_output=vision_model_output,
)
def prepare_inputs_for_generation(
self,
input_ids,
flattened_patches=None,
image_embeds=None,
image_embeds_position_mask=None,
past_key_values=None,
attention_mask=None,
use_cache=None,
cache_position=None,
position_ids=None,
**model_kwargs,
):
# Overwritten -- in specific circumstances we don't want to forward image inputs to the model
model_inputs = self.text_model.prepare_inputs_for_generation(
input_ids,
image_embeds=image_embeds,
image_embeds_position_mask=image_embeds_position_mask,
past_key_values=past_key_values,
attention_mask=attention_mask,
use_cache=use_cache,
cache_position=cache_position,
position_ids=position_ids,
**model_kwargs,
)
if cache_position[0] == 0:
# If we're in cached decoding stage, `flattened_patches` should be `None` because `input_ids` do not contain special image token anymore
# Otherwise we need `flattened_patches` to be passed to model
model_inputs["flattened_patches"] = flattened_patches
return model_inputs
__all__ = [
"Kosmos2_5ForConditionalGeneration",
"Kosmos2_5Model",
"Kosmos2_5PreTrainedModel",
]
|
Kosmos2_5ForConditionalGeneration
|
python
|
fluentpython__example-code-2e
|
21-async/mojifinder/bottle.py
|
{
"start": 116932,
"end": 117161
}
|
class ____(ServerAdapter):
def run(self,handler):
from socketio import server
address = (self.host, self.port)
server.SocketIOServer(address, handler, **self.options).serve_forever()
|
GeventSocketIOServer
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/workspace/context.py
|
{
"start": 33425,
"end": 49129
}
|
class ____(IWorkspaceProcessContext[WorkspaceRequestContext]):
"""Process-scoped object that tracks the state of a workspace.
1. Maintains an update-to-date dictionary of repository locations
2. Creates a `WorkspaceRequestContext` to be the workspace for each request
3. Runs watch thread processes that monitor repository locations
To access a CodeLocation, you should create a `WorkspaceRequestContext`
using `create_request_context`.
"""
def __init__(
self,
instance: DagsterInstance,
workspace_load_target: Optional[WorkspaceLoadTarget],
version: str = "",
read_only: bool = False,
grpc_server_registry: Optional[GrpcServerRegistry] = None,
code_server_log_level: str = "INFO",
server_command: GrpcServerCommand = GrpcServerCommand.API_GRPC,
):
self._stack = ExitStack()
check.opt_str_param(version, "version")
check.bool_param(read_only, "read_only")
self._instance = check.inst_param(instance, "instance", DagsterInstance)
self._workspace_load_target = check.opt_inst_param(
workspace_load_target, "workspace_load_target", WorkspaceLoadTarget
)
self._read_only = read_only
self._version = version
# Guards changes to _current_workspace, _watch_thread_shutdown_events and _watch_threads
self._lock = threading.Lock()
self._watch_thread_shutdown_events: dict[str, threading.Event] = {}
self._watch_threads: dict[str, threading.Thread] = {}
self._state_subscribers_lock = threading.Lock()
self._state_subscriber_id_iter = count()
self._state_subscribers: dict[int, LocationStateSubscriber] = {}
self.add_state_subscriber(LocationStateSubscriber(self._location_state_events_handler))
if grpc_server_registry:
self._grpc_server_registry: GrpcServerRegistry = check.inst_param(
grpc_server_registry, "grpc_server_registry", GrpcServerRegistry
)
else:
self._grpc_server_registry = self._stack.enter_context(
GrpcServerRegistry(
instance_ref=self._instance.get_ref(),
server_command=server_command,
heartbeat_ttl=WEBSERVER_GRPC_SERVER_HEARTBEAT_TTL,
startup_timeout=instance.code_server_process_startup_timeout,
log_level=code_server_log_level,
wait_for_processes_on_shutdown=instance.wait_for_local_code_server_processes_on_shutdown,
additional_timeout_msg=INCREASE_TIMEOUT_DAGSTER_YAML_MSG,
)
)
self._current_workspace: CurrentWorkspace = CurrentWorkspace(code_location_entries={})
self._update_workspace(
{
origin.location_name: self._load_location(origin, reload=False)
for origin in self._origins
}
)
@property
def workspace_load_target(self) -> Optional[WorkspaceLoadTarget]:
return self._workspace_load_target
@property
def _origins(self) -> Sequence[CodeLocationOrigin]:
return self._workspace_load_target.create_origins() if self._workspace_load_target else []
def get_code_server_specs(self) -> Sequence[Mapping[str, Mapping[str, Any]]]:
result = []
for origin in self._origins:
if isinstance(origin, ManagedGrpcPythonEnvCodeLocationOrigin):
grpc_endpoint = self._grpc_server_registry.get_grpc_endpoint(origin)
server_spec = {
"location_name": origin.location_name,
"socket": grpc_endpoint.socket,
"port": grpc_endpoint.port,
"host": grpc_endpoint.host,
"additional_metadata": origin.loadable_target_origin.as_dict,
}
elif isinstance(origin, GrpcServerCodeLocationOrigin):
server_spec = {
"location_name": origin.location_name,
"host": origin.host,
"port": origin.port,
"socket": origin.socket,
"additional_metadata": origin.additional_metadata,
}
else:
check.failed(f"Unexpected origin type {origin}")
result.append({"grpc_server": {k: v for k, v in server_spec.items() if v is not None}})
return result
def add_state_subscriber(self, subscriber: LocationStateSubscriber) -> int:
token = next(self._state_subscriber_id_iter)
with self._state_subscribers_lock:
self._state_subscribers[token] = subscriber
return token
def rm_state_subscriber(self, token: int) -> None:
with self._state_subscribers_lock:
if token in self._state_subscribers:
del self._state_subscribers[token]
@property
def instance(self) -> DagsterInstance:
return self._instance
@property
def read_only(self) -> bool:
return self._read_only
@property
def permissions(self) -> Mapping[str, PermissionResult]:
return get_user_permissions(True)
def permissions_for_location(self, *, location_name: str) -> Mapping[str, PermissionResult]:
return get_location_scoped_user_permissions(True)
def permissions_for_owner(self, *, owner: str) -> Mapping[str, PermissionResult]:
return {}
@property
def version(self) -> str:
return self._version
def _send_state_event_to_subscribers(self, event: LocationStateChangeEvent) -> None:
check.inst_param(event, "event", LocationStateChangeEvent)
with self._state_subscribers_lock:
for subscriber in self._state_subscribers.values():
subscriber.handle_event(event)
def _start_watch_thread(self, origin: GrpcServerCodeLocationOrigin) -> None:
from dagster._grpc.server_watcher import create_grpc_watch_thread
location_name = origin.location_name
check.invariant(location_name not in self._watch_thread_shutdown_events)
client = origin.create_client()
shutdown_event, watch_thread = create_grpc_watch_thread(
location_name,
client,
on_updated=lambda location_name, new_server_id: self._send_state_event_to_subscribers(
LocationStateChangeEvent(
LocationStateChangeEventType.LOCATION_UPDATED,
location_name=location_name,
message="Server has been updated.",
server_id=new_server_id,
)
),
on_error=lambda location_name: self._send_state_event_to_subscribers(
LocationStateChangeEvent(
LocationStateChangeEventType.LOCATION_ERROR,
location_name=location_name,
message=(
"Unable to reconnect to server. You can reload the server once it is "
"reachable again"
),
)
),
)
self._watch_thread_shutdown_events[location_name] = shutdown_event
self._watch_threads[location_name] = watch_thread
watch_thread.start()
def _load_location(self, origin: CodeLocationOrigin, reload: bool) -> CodeLocationEntry:
location_name = origin.location_name
location = None
error = None
try:
if isinstance(origin, ManagedGrpcPythonEnvCodeLocationOrigin):
endpoint = (
self._grpc_server_registry.reload_grpc_endpoint(origin)
if reload
else self._grpc_server_registry.get_grpc_endpoint(origin)
)
location = GrpcServerCodeLocation(
origin=origin,
port=endpoint.port,
socket=endpoint.socket,
host=endpoint.host,
heartbeat=True,
watch_server=False,
grpc_server_registry=self._grpc_server_registry,
instance=self._instance,
)
else:
location = (
origin.reload_location(self.instance)
if reload
else origin.create_location(self.instance)
)
except Exception:
error = serializable_error_info_from_exc_info(sys.exc_info())
# In dagster dev, the code server process already logs the error, so we don't need to log it again from
# the workspace process context
if using_dagster_dev():
warnings.warn(f"Error loading repository location {location_name}")
else:
warnings.warn(
f"Error loading repository location {location_name}:{error.to_string()}"
)
load_time = get_current_timestamp()
if isinstance(location, GrpcServerCodeLocation):
version_key = location.server_id
else:
version_key = str(load_time)
return CodeLocationEntry(
origin=origin,
code_location=location,
load_error=error,
load_status=CodeLocationLoadStatus.LOADED,
display_metadata=(
location.get_display_metadata() if location else origin.get_display_metadata()
),
update_timestamp=load_time,
version_key=version_key,
definitions_source=DefinitionsSource.CODE_SERVER,
)
def get_current_workspace(self) -> CurrentWorkspace:
with self._lock:
return self._current_workspace
@property
def code_locations_count(self) -> int:
with self._lock:
return len(self._current_workspace.code_location_entries)
@property
def code_location_names(self) -> Sequence[str]:
with self._lock:
return list(self._current_workspace.code_location_entries)
def has_code_location(self, location_name: str) -> bool:
check.str_param(location_name, "location_name")
with self._lock:
return (
location_name in self._current_workspace.code_location_entries
and self._current_workspace.code_location_entries[location_name].code_location
is not None
)
def has_code_location_error(self, location_name: str) -> bool:
check.str_param(location_name, "location_name")
with self._lock:
return (
location_name in self._current_workspace.code_location_entries
and self._current_workspace.code_location_entries[location_name].load_error
is not None
)
def reload_code_location(self, name: str) -> None:
new_entry = self._load_location(
self._current_workspace.code_location_entries[name].origin, reload=True
)
with self._lock:
# Relying on GC to clean up the old location once nothing else
# is referencing it
self._current_workspace = self._current_workspace.with_code_location(name, new_entry)
def shutdown_code_location(self, name: str) -> None:
with self._lock:
self._current_workspace.code_location_entries[name].origin.shutdown_server()
def refresh_workspace(self) -> None:
updated_locations = {
origin.location_name: self._load_location(origin, reload=False)
for origin in self._origins
}
self._update_workspace(updated_locations)
def reload_workspace(self) -> None:
updated_locations = {
origin.location_name: self._load_location(origin, reload=True)
for origin in self._origins
}
self._update_workspace(updated_locations)
def _update_workspace(self, new_locations: dict[str, CodeLocationEntry]):
# minimize lock time by only holding while swapping data old to new
with self._lock:
previous_events = self._watch_thread_shutdown_events
self._watch_thread_shutdown_events = {}
previous_threads = self._watch_threads
self._watch_threads = {}
previous_locations = self._current_workspace.code_location_entries
self._current_workspace = CurrentWorkspace(code_location_entries=new_locations)
# start monitoring for new locations
for entry in new_locations.values():
if isinstance(entry.origin, GrpcServerCodeLocationOrigin):
self._start_watch_thread(entry.origin)
# clean up previous locations
for event in previous_events.values():
event.set()
for watch_thread in previous_threads.values():
watch_thread.join()
for entry in previous_locations.values():
if entry.code_location:
entry.code_location.cleanup()
def create_request_context(self, source: Optional[object] = None) -> WorkspaceRequestContext:
return WorkspaceRequestContext(
instance=self._instance,
current_workspace=self.get_current_workspace(),
process_context=self,
version=self.version,
source=source,
read_only=self._read_only,
)
def _location_state_events_handler(self, event: LocationStateChangeEvent) -> None:
# If the server was updated or we were not able to reconnect, we immediately reload the
# location handle
if event.event_type in (
LocationStateChangeEventType.LOCATION_UPDATED,
LocationStateChangeEventType.LOCATION_ERROR,
):
# In case of an updated location, reload the handle to get updated repository data and
# re-attach a subscriber
# In case of a location error, just reload the handle in order to update the workspace
# with the correct error messages
logging.getLogger("dagster-webserver").info(
f"Received {event.event_type} event for location {event.location_name}, refreshing"
)
self.refresh_code_location(event.location_name)
def refresh_code_location(self, name: str) -> None:
# This method reloads the webserver's copy of the code from the remote gRPC server without
# restarting it, and returns a new request context created from the updated process context
new_entry = self._load_location(
self._current_workspace.code_location_entries[name].origin, reload=False
)
with self._lock:
# Relying on GC to clean up the old location once nothing else
# is referencing it
self._current_workspace = self._current_workspace.with_code_location(name, new_entry)
def __enter__(self) -> Self:
return self
def __exit__(self, exception_type, exception_value, traceback) -> None:
self._update_workspace({}) # update to empty to close all current locations
self._stack.close()
def copy_for_test_instance(self, instance: DagsterInstance) -> "WorkspaceProcessContext":
"""Make a copy with a different instance, created for tests."""
return WorkspaceProcessContext(
instance=instance,
workspace_load_target=self.workspace_load_target,
version=self.version,
read_only=self.read_only,
grpc_server_registry=self._grpc_server_registry,
)
|
WorkspaceProcessContext
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/datasets.py
|
{
"start": 3496,
"end": 8171
}
|
class ____(NonStrictDataModel):
"""
:param id: ROI id
:type id: str
:param label: ROI labels
:type label: Sequence[str]
:param poly: ROI polygon (x0, y0, ..., xn, yn)
:type poly: Sequence[float]
:param confidence: ROI confidence
:type confidence: float
:param meta: Additional metadata dictionary for the roi
:type meta: dict
:param sources: Source ID
:type sources: Sequence[str]
:param mask: Mask info for this ROI
:type mask: RoiMask
"""
_schema = {
"properties": {
"confidence": {"description": "ROI confidence", "type": "number"},
"id": {"description": "ROI id", "type": ["string", "null"]},
"label": {
"description": "ROI labels",
"items": {"type": "string"},
"type": "array",
},
"mask": {
"$ref": "#/definitions/roi_mask",
"description": "Mask info for this ROI",
},
"meta": {
"additionalProperties": True,
"description": "Additional metadata dictionary for the roi",
"type": "object",
},
"poly": {
"description": "ROI polygon (x0, y0, ..., xn, yn)",
"items": {"type": "number"},
"type": "array",
},
"sources": {
"description": "Source ID",
"items": {"type": "string"},
"type": "array",
},
},
"required": ["label"],
"type": "object",
}
def __init__(
self,
label,
id=None,
poly=None,
confidence=None,
meta=None,
sources=None,
mask=None,
**kwargs
):
super(Roi, self).__init__(**kwargs)
self.id = id
self.label = label
self.poly = poly
self.confidence = confidence
self.meta = meta
self.sources = sources
self.mask = mask
@schema_property("id")
def id(self):
return self._property_id
@id.setter
def id(self, value):
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("label")
def label(self):
return self._property_label
@label.setter
def label(self, value):
if value is None:
self._property_label = None
return
self.assert_isinstance(value, "label", (list, tuple))
self.assert_isinstance(value, "label", six.string_types, is_array=True)
self._property_label = value
@schema_property("poly")
def poly(self):
return self._property_poly
@poly.setter
def poly(self, value):
if value is None:
self._property_poly = None
return
self.assert_isinstance(value, "poly", (list, tuple))
self.assert_isinstance(
value, "poly", six.integer_types + (float,), is_array=True
)
self._property_poly = value
@schema_property("confidence")
def confidence(self):
return self._property_confidence
@confidence.setter
def confidence(self, value):
if value is None:
self._property_confidence = None
return
self.assert_isinstance(value, "confidence", six.integer_types + (float,))
self._property_confidence = value
@schema_property("meta")
def meta(self):
return self._property_meta
@meta.setter
def meta(self, value):
if value is None:
self._property_meta = None
return
self.assert_isinstance(value, "meta", (dict,))
self._property_meta = value
@schema_property("sources")
def sources(self):
return self._property_sources
@sources.setter
def sources(self, value):
if value is None:
self._property_sources = None
return
self.assert_isinstance(value, "sources", (list, tuple))
self.assert_isinstance(value, "sources", six.string_types, is_array=True)
self._property_sources = value
@schema_property("mask")
def mask(self):
return self._property_mask
@mask.setter
def mask(self, value):
if value is None:
self._property_mask = None
return
if isinstance(value, dict):
value = RoiMask.from_dict(value)
else:
self.assert_isinstance(value, "mask", RoiMask)
self._property_mask = value
|
Roi
|
python
|
pytorch__pytorch
|
test/distributed/test_launcher.py
|
{
"start": 690,
"end": 1359
}
|
class ____(TestCase):
def test_launch_user_script(self):
nnodes = 1
nproc_per_node = 4
sock = get_socket_with_port()
with closing(sock):
master_port = sock.getsockname()[1]
args = [
f"--nnodes={nnodes}",
f"--nproc-per-node={nproc_per_node}",
"--monitor-interval=1",
"--start-method=spawn",
"--master-addr=localhost",
f"--master-port={master_port}",
"--node-rank=0",
"--use-env",
path("bin/test_script.py"),
]
launch.main(args)
if __name__ == "__main__":
run_tests()
|
TestDistributedLaunch
|
python
|
ansible__ansible
|
lib/ansible/modules/user.py
|
{
"start": 87676,
"end": 100643
}
|
class ____(User):
"""
This is a Darwin macOS User manipulation class.
Main differences are that Darwin:-
- Handles accounts in a database managed by dscl(1)
- Has no useradd/groupadd
- Does not create home directories
- User password must be cleartext
- UID must be given
- System users must ben under 500
This overrides the following methods from the generic class:-
- user_exists()
- create_user()
- remove_user()
- modify_user()
"""
platform = 'Darwin'
distribution = None
SHADOWFILE = None
dscl_directory = '.'
fields = [
('comment', 'RealName'),
('home', 'NFSHomeDirectory'),
('shell', 'UserShell'),
('uid', 'UniqueID'),
('group', 'PrimaryGroupID'),
('hidden', 'IsHidden'),
]
def __init__(self, module):
super(DarwinUser, self).__init__(module)
# make the user hidden if option is set or defer to system option
if self.hidden is None:
if self.system:
self.hidden = 1
elif self.hidden:
self.hidden = 1
else:
self.hidden = 0
# add hidden to processing if set
if self.hidden is not None:
self.fields.append(('hidden', 'IsHidden'))
def _get_dscl(self):
return [self.module.get_bin_path('dscl', True), self.dscl_directory]
def _list_user_groups(self):
cmd = self._get_dscl()
cmd += ['-search', '/Groups', 'GroupMembership', self.name]
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
groups = []
for line in out.splitlines():
if line.startswith(' ') or line.startswith(')'):
continue
groups.append(line.split()[0])
return groups
def _get_user_property(self, property):
"""Return user PROPERTY as given my dscl(1) read or None if not found."""
cmd = self._get_dscl()
cmd += ['-read', '/Users/%s' % self.name, property]
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
if rc != 0:
return None
# from dscl(1)
# if property contains embedded spaces, the list will instead be
# displayed one entry per line, starting on the line after the key.
lines = out.splitlines()
# sys.stderr.write('*** |%s| %s -> %s\n' % (property, out, lines))
if len(lines) == 1:
return lines[0].split(': ')[1]
if len(lines) > 2:
return '\n'.join([lines[1].strip()] + lines[2:])
if len(lines) == 2:
return lines[1].strip()
return None
def _get_next_uid(self, system=None):
"""
Return the next available uid. If system=True, then
uid should be below of 500, if possible.
"""
cmd = self._get_dscl()
cmd += ['-list', '/Users', 'UniqueID']
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
if rc != 0:
self.module.fail_json(
msg="Unable to get the next available uid",
rc=rc,
out=out,
err=err
)
max_uid = 0
max_system_uid = 0
for line in out.splitlines():
current_uid = int(line.split(' ')[-1])
if max_uid < current_uid:
max_uid = current_uid
if max_system_uid < current_uid and current_uid < 500:
max_system_uid = current_uid
if system and (0 < max_system_uid < 499):
return max_system_uid + 1
return max_uid + 1
def _change_user_password(self):
"""Change password for SELF.NAME against SELF.PASSWORD.
Please note that password must be cleartext.
"""
# some documentation on how is stored passwords on OSX:
# http://null-byte.wonderhowto.com/how-to/hack-mac-os-x-lion-passwords-0130036/
# http://pastebin.com/RYqxi7Ca
# on OSX 10.8+ hash is SALTED-SHA512-PBKDF2
# https://passlib.readthedocs.io/en/stable/lib/passlib.hash.pbkdf2_digest.html
# https://gist.github.com/nueh/8252572
cmd = self._get_dscl()
if self.password:
cmd += ['-passwd', '/Users/%s' % self.name, self.password]
else:
cmd += ['-create', '/Users/%s' % self.name, 'Password', '*']
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Error when changing password', err=err, out=out, rc=rc)
return (rc, out, err)
def _make_group_numerical(self):
"""Convert SELF.GROUP to is stringed numerical value suitable for dscl."""
if self.group is None:
self.group = 'nogroup'
try:
self.group = grp.getgrnam(self.group).gr_gid
except KeyError:
self.module.fail_json(msg='Group "%s" not found. Try to create it first using "group" module.' % self.group)
# We need to pass a string to dscl
self.group = str(self.group)
def __modify_group(self, group, action):
"""Add or remove SELF.NAME to or from GROUP depending on ACTION.
ACTION can be 'add' or 'remove' otherwise 'remove' is assumed. """
if action == 'add':
option = '-a'
else:
option = '-d'
cmd = ['dseditgroup', '-o', 'edit', option, self.name, '-t', 'user', group]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot %s user "%s" to group "%s".'
% (action, self.name, group), err=err, out=out, rc=rc)
return (rc, out, err)
def _modify_group(self):
"""Add or remove SELF.NAME to or from GROUP depending on ACTION.
ACTION can be 'add' or 'remove' otherwise 'remove' is assumed. """
rc = 0
out = ''
err = ''
changed = False
current = set(self._list_user_groups())
if self.groups is not None:
target = self.get_groups_set(names_only=True)
else:
target = set([])
if self.append is False:
for remove in current - target:
(_rc, _out, _err) = self.__modify_group(remove, 'delete')
rc += rc
out += _out
err += _err
changed = True
for add in target - current:
(_rc, _out, _err) = self.__modify_group(add, 'add')
rc += _rc
out += _out
err += _err
changed = True
return (rc, out, err, changed)
def _update_system_user(self):
"""Hide or show user on login window according SELF.SYSTEM.
Returns 0 if a change has been made, None otherwise."""
plist_file = '/Library/Preferences/com.apple.loginwindow.plist'
# http://support.apple.com/kb/HT5017?viewlocale=en_US
cmd = ['defaults', 'read', plist_file, 'HiddenUsersList']
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
# returned value is
# (
# "_userA",
# "_UserB",
# userc
# )
hidden_users = []
for x in out.splitlines()[1:-1]:
try:
x = x.split('"')[1]
except IndexError:
x = x.strip()
hidden_users.append(x)
if self.system:
if self.name not in hidden_users:
cmd = ['defaults', 'write', plist_file, 'HiddenUsersList', '-array-add', self.name]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot user "%s" to hidden user list.' % self.name, err=err, out=out, rc=rc)
return 0
else:
if self.name in hidden_users:
del (hidden_users[hidden_users.index(self.name)])
cmd = ['defaults', 'write', plist_file, 'HiddenUsersList', '-array'] + hidden_users
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot remove user "%s" from hidden user list.' % self.name, err=err, out=out, rc=rc)
return 0
def user_exists(self):
"""Check is SELF.NAME is a known user on the system."""
cmd = self._get_dscl()
cmd += ['-read', '/Users/%s' % self.name, 'UniqueID']
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
return rc == 0
def remove_user(self):
"""Delete SELF.NAME. If SELF.FORCE is true, remove its home directory."""
info = self.user_info()
cmd = self._get_dscl()
cmd += ['-delete', '/Users/%s' % self.name]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot delete user "%s".' % self.name, err=err, out=out, rc=rc)
if self.force:
if os.path.exists(info[5]):
shutil.rmtree(info[5])
out += "Removed %s" % info[5]
return (rc, out, err)
def create_user(self, command_name='dscl'):
cmd = self._get_dscl()
cmd += ['-create', '/Users/%s' % self.name]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot create user "%s".' % self.name, err=err, out=out, rc=rc)
# Make the Gecos (alias display name) default to username
if self.comment is None:
self.comment = self.name
# Make user group default to 'staff'
if self.group is None:
self.group = 'staff'
self._make_group_numerical()
if self.uid is None:
self.uid = str(self._get_next_uid(self.system))
# Homedir is not created by default
if self.create_home:
if self.home is None:
self.home = '/Users/%s' % self.name
if not self.module.check_mode:
if not os.path.exists(self.home):
os.makedirs(self.home)
self.chown_homedir(int(self.uid), int(self.group), self.home)
# dscl sets shell to /usr/bin/false when UserShell is not specified
# so set the shell to /bin/bash when the user is not a system user
if not self.system and self.shell is None:
self.shell = '/bin/bash'
for field in self.fields:
if field[0] in self.__dict__ and self.__dict__[field[0]]:
cmd = self._get_dscl()
cmd += ['-create', '/Users/%s' % self.name, field[1], self.__dict__[field[0]]]
(rc, _out, _err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot add property "%s" to user "%s".' % (field[0], self.name), err=err, out=out, rc=rc)
out += _out
err += _err
if rc != 0:
return (rc, _out, _err)
(rc, _out, _err) = self._change_user_password()
out += _out
err += _err
self._update_system_user()
# here we don't care about change status since it is a creation,
# thus changed is always true.
if self.groups:
(rc, _out, _err, changed) = self._modify_group()
out += _out
err += _err
return (rc, out, err)
def modify_user(self):
changed = None
out = ''
err = ''
if self.group:
self._make_group_numerical()
for field in self.fields:
if field[0] in self.__dict__ and self.__dict__[field[0]]:
current = self._get_user_property(field[1])
if current is None or current != to_text(self.__dict__[field[0]]):
cmd = self._get_dscl()
cmd += ['-create', '/Users/%s' % self.name, field[1], self.__dict__[field[0]]]
(rc, _out, _err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(
msg='Cannot update property "%s" for user "%s".'
% (field[0], self.name), err=err, out=out, rc=rc)
changed = rc
out += _out
err += _err
if self.update_password == 'always' and self.password is not None:
(rc, _out, _err) = self._change_user_password()
out += _out
err += _err
changed = rc
if self.groups:
(rc, _out, _err, _changed) = self._modify_group()
out += _out
err += _err
if _changed is True:
changed = rc
rc = self._update_system_user()
if rc == 0:
changed = rc
return (changed, out, err)
|
DarwinUser
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/experimental/kernel_tests/index_flat_map_test.py
|
{
"start": 1686,
"end": 7936
}
|
class ____(test_base.DatasetTestBase, parameterized.TestCase):
"""Tests for global shuffling of index flat map datasets."""
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(use_tensors=[True, False])))
def test_split_strings(self, use_tensors: bool):
input_data = ["0 1", "2 3 4 5", "6 7", "8"]
# The metadata is [(0, 2, 0), (2, 6, 1), (6, 8, 2), (8, 9, 3)].
metadata = _get_metadata(input_data)
def _index_map_func(index: _IndexType) -> tuple[_IndexType, _IndexType]:
index = _maybe_convert_to_tensor(index)
element_index, offset = _get_index_map_func(metadata)(index)
return (_maybe_convert_to_tensor(element_index),
_maybe_convert_to_tensor(offset))
def _maybe_convert_to_tensor(value: Any) -> _IndexType:
return math_ops.cast(value, dtypes.int64) if use_tensors else value
dataset = dataset_ops.Dataset.from_tensor_slices(input_data)
dataset = index_flat_map_op.index_flat_map(dataset, _split, _index_map_func)
output = self.getDatasetOutput(dataset)
self.assertEqual(output,
[b"0", b"1", b"2", b"3", b"4", b"5", b"6", b"7", b"8"])
@combinations.generate(test_base.default_test_combinations())
def test_cache(self):
input_data = ["0 1", "2 3 4 5", "6 7", "8"]
metadata = _get_metadata(input_data)
dataset = dataset_ops.Dataset.from_tensor_slices(input_data)
dataset = dataset.cache()
dataset = index_flat_map_op.index_flat_map(
dataset, _split, _get_index_map_func(metadata))
output = self.getDatasetOutput(dataset)
self.assertEqual(output,
[b"0", b"1", b"2", b"3", b"4", b"5", b"6", b"7", b"8"])
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
repetitions=[1, 3],
seed=[None, 42],
reshuffle_each_iteration=[True, False])))
def test_global_shuffle(
self,
repetitions: int,
seed: Optional[int],
reshuffle_each_iteration: bool):
input_data = ["0 1", "2 3 4 5", "6 7", "8"]
metadata = _get_metadata(input_data)
dataset = dataset_ops.Dataset.from_tensor_slices(input_data)
dataset = index_flat_map_op.index_flat_map(
dataset, _split, _get_index_map_func(metadata), output_cardinality=9)
self.assertEqual(self.evaluate(dataset.cardinality()), 9)
if repetitions > 1:
dataset = dataset.repeat(repetitions)
dataset = global_shuffle_op._global_shuffle(
dataset, seed=seed, reshuffle_each_iteration=reshuffle_each_iteration)
dataset_output = self.getDatasetOutput(
dataset, requires_initialization=True)
expected = [
b"0", b"1", b"2", b"3", b"4", b"5", b"6", b"7", b"8"] * repetitions
self.assertCountEqual(dataset_output, expected)
self.assertNotEqual(dataset_output, expected)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(dataset_range=[0, 10])))
def test_identity_map(self, dataset_range: int):
def _map_func(element: Any) -> Any:
return element
def _index_map_func(index: int) -> tuple[int, int]:
return (index, 0)
dataset = dataset_ops.Dataset.range(dataset_range)
dataset = index_flat_map_op.index_flat_map(
dataset, _map_func, _index_map_func)
self.assertDatasetProduces(dataset, list(range(dataset_range)))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
input_range=[0, 10],
map_output=[
[[1, 2], [3, 4], [5, 6]],
[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]]
],
use_tensors=[True, False])))
def test_nested_list(
self, input_range: int, map_output: list[Any], use_tensors: bool):
def _map_func(_) -> Union[tensor.Tensor, list[list[int]]]:
return (constant_op.constant(map_output, dtype=dtypes.int64)
if use_tensors else map_output)
def _index_map_func(i: int) -> tuple[int, int]:
return (i // len(map_output), i % len(map_output))
dataset = dataset_ops.Dataset.range(input_range)
dataset = index_flat_map_op.index_flat_map(
dataset, _map_func, _index_map_func)
self.assertDatasetProduces(dataset, map_output * input_range)
@combinations.generate(test_base.default_test_combinations())
def test_offset_out_of_range(self):
def _index_map_func(_) -> tuple[int, int]:
return (0, 1000)
input_data = ["0 1", "2 3 4 5", "6 7", "8"]
dataset = dataset_ops.Dataset.from_tensor_slices(input_data)
dataset = index_flat_map_op.index_flat_map(dataset, _split, _index_map_func)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"invalid `index_map_func` which returns offset 1000"):
self.getDatasetOutput(dataset)
@combinations.generate(test_base.default_test_combinations())
def test_invalid_map_fn_type(self):
def _index_map_func(_) -> str:
# Expected to return two integers.
return "Hello"
input_data = ["0 1", "2 3 4 5", "6 7", "8"]
dataset = dataset_ops.Dataset.from_tensor_slices(input_data)
dataset = index_flat_map_op.index_flat_map(dataset, _split, _index_map_func)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"expected to return two int values"):
self.getDatasetOutput(dataset)
@combinations.generate(test_base.default_test_combinations())
def test_unknown_cardinality(self):
input_data = ["0 1", "2 3 4 5", "6 7", "8"]
dataset = dataset_ops.Dataset.from_tensor_slices(input_data)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"`global_shuffle` requires the input dataset to have a non-empty "
"finite cardinality."):
dataset = index_flat_map_op.index_flat_map(
dataset, _split, _get_index_map_func(_get_metadata(input_data)))
dataset = global_shuffle_op._global_shuffle(dataset)
self.getDatasetOutput(dataset, requires_initialization=True)
|
IndexFlatMapTest
|
python
|
facebookresearch__faiss
|
tests/test_index_accuracy.py
|
{
"start": 687,
"end": 5410
}
|
class ____(unittest.TestCase):
def test_IndexFlatIP(self):
q = faiss.IndexFlatIP(d) # Ask inner product
res = ev.launch("FLAT / IP", q)
e = ev.evalres(res)
assert e[1] == 1.0
def test_IndexFlatL2(self):
q = faiss.IndexFlatL2(d)
res = ev.launch("FLAT / L2", q)
e = ev.evalres(res)
assert e[1] == 1.0
def test_ivf_kmeans(self):
ivfk = faiss.IndexIVFFlat(faiss.IndexFlatL2(d), d, ncentroids)
ivfk.nprobe = kprobe
res = ev.launch("IndexIVFFlat", ivfk)
e = ev.evalres(res)
# should give 0.260 0.260 0.260
assert e[1] > 0.2
# test parallel mode
Dref, Iref = ivfk.search(ev.xq, 100)
ivfk.parallel_mode = 1
Dnew, Inew = ivfk.search(ev.xq, 100)
assert (Iref != Inew).sum() < Iref.size / 5000.0
assert np.all(Dref == Dnew)
def test_indexLSH(self):
q = faiss.IndexLSH(d, nbits)
res = ev.launch("FLAT / LSH Cosine", q)
e = ev.evalres(res)
# should give 0.070 0.250 0.580
assert e[10] > 0.2
def test_IndexLSH_32_48(self):
# CHECK: the difference between 32 and 48 does not make much sense
for nbits2 in 32, 48:
q = faiss.IndexLSH(d, nbits2)
res = ev.launch("LSH half size", q)
e = ev.evalres(res)
# should give 0.003 0.019 0.108
assert e[10] > 0.018
def test_IndexPQ(self):
q = faiss.IndexPQ(d, M, nbits_per_index)
res = ev.launch("FLAT / PQ L2", q)
e = ev.evalres(res)
# should give 0.070 0.230 0.260
assert e[10] > 0.2
# Approximate search module: PQ with inner product distance
def test_IndexPQ_ip(self):
q = faiss.IndexPQ(d, M, nbits_per_index, faiss.METRIC_INNER_PRODUCT)
res = ev.launch("FLAT / PQ IP", q)
e = ev.evalres(res)
# should give 0.070 0.230 0.260
# (same result as regular PQ on normalized distances)
assert e[10] > 0.2
def test_IndexIVFPQ(self):
ivfpq = faiss.IndexIVFPQ(faiss.IndexFlatL2(d), d, ncentroids, M, 8)
ivfpq.nprobe = kprobe
res = ev.launch("IVF PQ", ivfpq)
e = ev.evalres(res)
# should give 0.070 0.230 0.260
assert e[10] > 0.2
# TODO: translate evaluation of nested
# Approximate search: PQ with full vector refinement
def test_IndexPQ_refined(self):
q = faiss.IndexPQ(d, M, nbits_per_index)
res = ev.launch("PQ non-refined", q)
e = ev.evalres(res)
q.reset()
rq = faiss.IndexRefineFlat(q)
res = ev.launch("PQ refined", rq)
e2 = ev.evalres(res)
assert e2[10] >= e[10]
rq.k_factor = 4
res = ev.launch("PQ refined*4", rq)
e3 = ev.evalres(res)
assert e3[10] >= e2[10]
def test_polysemous(self):
index = faiss.IndexPQ(d, M, nbits_per_index)
index.do_polysemous_training = True
# reduce nb iterations to speed up training for the test
index.polysemous_training.n_iter = 50000
index.polysemous_training.n_redo = 1
res = ev.launch("normal PQ", index)
e_baseline = ev.evalres(res)
index.search_type = faiss.IndexPQ.ST_polysemous
index.polysemous_ht = int(M / 16.0 * 58)
stats = faiss.cvar.indexPQ_stats
stats.reset()
res = ev.launch("Polysemous ht=%d" % index.polysemous_ht, index)
e_polysemous = ev.evalres(res)
# The randu dataset is difficult, so we are not too picky on
# the results. Here we assert that we have < 10 % loss when
# computing full PQ on fewer than 20% of the data.
assert stats.n_hamming_pass < stats.ncode / 5
# Test disabled because difference is 0.17 on aarch64
# TODO check why???
# assert e_polysemous[10] > e_baseline[10] - 0.1
def test_ScalarQuantizer(self):
quantizer = faiss.IndexFlatL2(d)
ivfpq = faiss.IndexIVFScalarQuantizer(
quantizer, d, ncentroids, faiss.ScalarQuantizer.QT_8bit
)
ivfpq.nprobe = kprobe
res = ev.launch("IVF SQ", ivfpq)
e = ev.evalres(res)
# should give 0.234 0.236 0.236
assert e[10] > 0.235
def test_polysemous_OOM(self):
"""this used to cause OOM when training polysemous with large
nb bits"""
d = 32
xt, xb, xq = get_dataset_2(d, 10000, 0, 0)
index = faiss.IndexPQ(d, M, 13)
index.do_polysemous_training = True
index.pq.cp.niter = 0
index.polysemous_training.max_memory = 128 * 1024 * 1024
self.assertRaises(RuntimeError, index.train, xt)
|
IndexAccuracy
|
python
|
pytest-dev__pytest-xdist
|
testing/test_looponfail.py
|
{
"start": 10525,
"end": 12214
}
|
class ____:
def test_fail_to_ok(self, pytester: pytest.Pytester) -> None:
p = pytester.makepyfile(
textwrap.dedent(
"""
def test_one():
x = 0
assert x == 1
"""
)
)
# p = pytester.mkdir("sub").join(p1.basename)
# p1.move(p)
child = pytester.spawn_pytest("-f %s --traceconfig" % p, expect_timeout=30.0)
child.expect("def test_one")
child.expect("x == 1")
child.expect("1 failed")
child.expect("### LOOPONFAILING ####")
child.expect("waiting for changes")
p.write_text(
textwrap.dedent(
"""
def test_one():
x = 1
assert x == 1
"""
),
)
child.expect(".*1 passed.*")
child.kill(15)
def test_xfail_passes(self, pytester: pytest.Pytester) -> None:
p = pytester.makepyfile(
textwrap.dedent(
"""
import pytest
@pytest.mark.xfail
def test_one():
pass
"""
)
)
child = pytester.spawn_pytest("-f %s" % p, expect_timeout=30.0)
child.expect("1 xpass")
# child.expect("### LOOPONFAILING ####")
child.expect("waiting for changes")
child.kill(15)
def removepyc(path: Path) -> None:
# XXX damn those pyc files
pyc = path.with_suffix(".pyc")
if pyc.exists():
pyc.unlink()
c = path.parent / "__pycache__"
if c.exists():
shutil.rmtree(c)
|
TestFunctional
|
python
|
pydantic__pydantic
|
tests/mypy/modules/plugin_success.py
|
{
"start": 992,
"end": 1088
}
|
class ____(Model):
z: int = 1
InheritingModel.model_validate(model.__dict__)
|
InheritingModel
|
python
|
huggingface__transformers
|
tests/models/gemma3n/test_modeling_gemma3n.py
|
{
"start": 12394,
"end": 29645
}
|
class ____(CausalLMModelTest, unittest.TestCase):
model_tester_class = Gemma3nTextModelTester
_is_stateful = True
model_split_percents = [0.5, 0.6]
def _check_hidden_states_for_generate(
self, batch_size, hidden_states, prompt_length, output_length, config, use_cache=False
):
"Gemma3n has special hidden states shape with 1 additional dim (which is then reduced with projections)"
self.assertIsInstance(hidden_states, tuple)
self.assertListEqual(
[isinstance(iter_hidden_states, tuple) for iter_hidden_states in hidden_states],
[True] * len(hidden_states),
)
self.assertEqual(len(hidden_states), (output_length - prompt_length))
# When `output_hidden_states=True`, each iteration of generate appends the hidden states corresponding to the
# new token(s)
for generated_length, iter_hidden_states in enumerate(hidden_states):
# regardless of using cache, the first forward pass will have the full prompt as input
if use_cache and generated_length > 0:
model_input_length = 1
else:
model_input_length = prompt_length + generated_length
expected_shape = (config.altup_num_inputs, batch_size, model_input_length, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states],
[expected_shape] * len(iter_hidden_states),
)
@parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION)
def test_eager_matches_sdpa_inference(
self,
name,
dtype,
padding_side,
use_attention_mask,
output_attentions,
enable_kernels,
):
"We need to relax a bit the `atols` and `rtols` for fp32 here due to the altup projections"
atols = {
("cpu", False, torch.float32): 5e-2, # this was relaxed
("cpu", False, torch.float16): 5e-3,
("cpu", False, torch.bfloat16): 1e-2,
("cpu", True, torch.float32): 5e-2, # this was relaxed
("cpu", True, torch.float16): 5e-3,
("cpu", True, torch.bfloat16): 1e-2,
("cuda", False, torch.float32): 5e-2, # this was relaxed
("cuda", False, torch.bfloat16): 1e-2,
("cuda", False, torch.float16): 5e-3,
("cuda", True, torch.float32): 5e-2, # this was relaxed
("cuda", True, torch.bfloat16): 1e-2,
("cuda", True, torch.float16): 5e-3,
}
rtols = {
("cpu", False, torch.float32): 1e-2, # this was relaxed
("cpu", False, torch.float16): 5e-3,
("cpu", False, torch.bfloat16): 1e-2,
("cpu", True, torch.float32): 1e-2, # this was relaxed
("cpu", True, torch.float16): 5e-3,
("cpu", True, torch.bfloat16): 1e-2,
("cuda", False, torch.float32): 1e-2, # this was relaxed
("cuda", False, torch.bfloat16): 1e-2,
("cuda", False, torch.float16): 5e-3,
("cuda", True, torch.float32): 1e-2, # this was relaxed
("cuda", True, torch.bfloat16): 3e-2,
("cuda", True, torch.float16): 5e-3,
}
_test_eager_matches_sdpa_inference(
self,
name,
dtype,
padding_side,
use_attention_mask,
output_attentions,
enable_kernels,
atols=atols,
rtols=rtols,
)
@pytest.mark.generate
@unittest.skip("Gemma3n does not support QuantizedCache as it performs cache manipulation in the forward pass")
def test_generate_with_quant_cache(self):
pass
@unittest.skip("Gemma3n applies key/query norm which doesn't work with packing")
def test_eager_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip("Gemma3n applies key/query norm which doesn't work with packing")
def test_sdpa_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip("Gemma3n only support fp16 and bf16 data type")
def test_flash_attn_2_fp32_ln(self):
pass
@pytest.mark.generate
def test_generate_from_inputs_embeds_with_static_cache(self):
"""
Test that StaticCache can generate from inputs_embeds and calculates max_cache_length
correctly in `generate()`. We force the model to not stop generation until max-length is reached
to verify that the cache length is indeed set correctly and we don't run out of index when slicing the cache.
"""
for model_class in self.all_generative_model_classes:
# Here, we should ideally not skip any model, and test them all. However, some old models cannot correctly
# use a static cache because they don't create the causal masks correctly.
# TODO: cyril -> relax this by adding a `_support_static_cache` attribute
if not model_class._can_compile_fullgraph:
self.skipTest(reason="This model does not support the static cache format")
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
if config.is_encoder_decoder:
self.skipTest(reason="This model is encoder-decoder and has Encoder-Decoder Cache")
model = model_class(config).to(torch_device).eval()
if "inputs_embeds" not in inspect.signature(model.prepare_inputs_for_generation).parameters:
self.skipTest(reason="This model does not support `inputs_embeds` in generation")
input_ids = inputs_dict.pop("input_ids")
model.config.use_cache = True
model.config.is_decoder = True
batch_size = input_ids.shape[0]
max_new_tokens = 10
# here we force to not stop at eos and go until max-length
model.generation_config.eos_token_id = model.config.get_text_config().eos_token_id = -1
generation_kwargs = {
"max_new_tokens": max_new_tokens,
"cache_implementation": "static",
"return_dict_in_generate": True, # Required to return `past_key_values`
}
text_config = model.config.get_text_config()
head_dim = (
getattr(text_config, "head_dim", None) or text_config.hidden_size // text_config.num_attention_heads
)
num_key_value_heads = (
text_config.num_attention_heads
if getattr(text_config, "num_key_value_heads", None) is None
else text_config.num_key_value_heads
)
num_hidden_layers = text_config.num_hidden_layers
inputs_embeds = model.get_input_embeddings()(input_ids)
outputs = model.generate(inputs_embeds=inputs_embeds, **generation_kwargs, **inputs_dict)
# we should get `max_length - 1` in shape, not `max_length - embeds_length`.
# -1 because the last generated token isn't yet in the cache.
max_length = max_new_tokens + inputs_embeds.shape[1] - 1
cache_shape = [batch_size, num_key_value_heads, max_length, head_dim]
self.assertIsInstance(outputs.past_key_values, StaticCache)
self.assertEqual(len(outputs.past_key_values), num_hidden_layers - text_config.num_kv_shared_layers)
self.assertListEqual(list(outputs.past_key_values.layers[0].keys.shape), cache_shape)
@pytest.mark.generate
def test_generate_with_static_cache(self):
"""
Tests that generating with static cache give almost same results as with dynamic cache, and the output cache
has the expected shapes
"""
for model_class in self.all_generative_model_classes:
# Here, we should ideally not skip any model, and test them all. However, some old models cannot correctly
# use a static cache because they don't create the causal masks correctly.
# TODO: cyril -> relax this by adding a `_support_static_cache` attribute
if not model_class._can_compile_fullgraph:
self.skipTest(reason="This model does not support the static cache format")
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
set_config_for_less_flaky_test(config)
main_input = inputs_dict[model_class.main_input_name]
if config.is_encoder_decoder:
self.skipTest(reason="This model is encoder-decoder and has Encoder-Decoder Cache")
config.is_decoder = True
batch_size = main_input.shape[0]
seq_length = self.model_tester.seq_length
max_new_tokens = 20
for dtype in (torch.float32, torch.float16):
model = model_class(copy.deepcopy(config)).to(torch_device).to(dtype).eval()
inputs_dict = {
k: v.to(dtype) if isinstance(v, torch.Tensor) and torch.is_floating_point(v) else v
for k, v in inputs_dict.items()
}
set_model_for_less_flaky_test(model)
generation_kwargs = {
"max_new_tokens": max_new_tokens,
"return_dict_in_generate": True, # Required to return `past_key_values`
"output_scores": True,
"use_cache": True,
}
static_cache_generation = model.generate(
**generation_kwargs, **inputs_dict, cache_implementation="static"
)
# Check 1: The cache shapes must match the expected shapes
max_cache_len = seq_length + max_new_tokens - 1 # cache len = gen len - 1, the last token has no cache
text_config = config.text_config if hasattr(config, "text_config") else config
head_dim = (
getattr(text_config, "head_dim", None)
or text_config.hidden_size // text_config.num_attention_heads
)
num_key_value_heads = (
text_config.num_attention_heads
if getattr(text_config, "num_key_value_heads", None) is None
else text_config.num_key_value_heads
)
num_hidden_layers = text_config.num_hidden_layers
cache_shape = (batch_size, num_key_value_heads, max_cache_len, head_dim)
self.assertTrue(isinstance(static_cache_generation.past_key_values, StaticCache))
self.assertTrue(
len(static_cache_generation.past_key_values)
== num_hidden_layers - text_config.num_kv_shared_layers
)
self.assertTrue(static_cache_generation.past_key_values.layers[0].keys.shape == cache_shape)
# Check 2: The outputs must be similar to the case with dynamic cache
dynamic_cache_generation = model.generate(**generation_kwargs, **inputs_dict)
self.assertTrue(has_similar_generate_outputs(dynamic_cache_generation, static_cache_generation))
def test_model_rope_scaling_frequencies(self):
"""Tests the frequency properties of the different RoPE scaling types on the model RoPE layer."""
# Gemma3n has different RoPE configs per layer type
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
# Retrieves the RoPE layer class from the base model class. Uses `.named_modules()` to avoid hardcoding the
# named location of the RoPE layer class.
base_model = self.model_tester.base_model_class(config)
possible_rope_attributes = [
"pos_emb",
"rotary_emb", # most common case
"global_rotary_emb",
"local_rotary_emb",
]
for name, module in base_model.named_modules():
if any(potential_name in name for potential_name in possible_rope_attributes):
rope_class = type(module)
break
scaling_factor = 10
short_input_length = 10
long_input_length = int(config.max_position_embeddings * 1.5)
# Inputs
x = torch.randn(
1, dtype=torch.float32, device=torch_device
) # used exclusively to get the dtype and the device
position_ids_short = torch.arange(short_input_length, dtype=torch.long, device=torch_device)
position_ids_short = position_ids_short.unsqueeze(0)
position_ids_long = torch.arange(long_input_length, dtype=torch.long, device=torch_device)
position_ids_long = position_ids_long.unsqueeze(0)
# Sanity check original RoPE
rope_params = {"rope_type": "default", "rope_theta": 10_000.0}
config.rope_parameters = {"sliding_attention": rope_params, "full_attention": rope_params}
original_rope = rope_class(config=config).to(torch_device)
original_cos_short, original_sin_short = original_rope(x, position_ids_short, layer_type="sliding_attention")
original_cos_long, original_sin_long = original_rope(x, position_ids_long, layer_type="sliding_attention")
torch.testing.assert_close(original_cos_short, original_cos_long[:, :short_input_length, :])
torch.testing.assert_close(original_sin_short, original_sin_long[:, :short_input_length, :])
# Sanity check linear RoPE scaling
# New position "x" should match original position with index "x/scaling_factor"
rope_params = {"rope_type": "linear", "factor": scaling_factor, "rope_theta": 10_000.0}
config.rope_parameters = {"sliding_attention": rope_params, "full_attention": rope_params}
linear_scaling_rope = rope_class(config=config).to(torch_device)
linear_cos_short, linear_sin_short = linear_scaling_rope(x, position_ids_short, layer_type="sliding_attention")
linear_cos_long, linear_sin_long = linear_scaling_rope(x, position_ids_long, layer_type="sliding_attention")
torch.testing.assert_close(linear_cos_short, linear_cos_long[:, :short_input_length, :])
torch.testing.assert_close(linear_sin_short, linear_sin_long[:, :short_input_length, :])
for new_position in range(0, long_input_length, scaling_factor):
original_position = int(new_position // scaling_factor)
torch.testing.assert_close(linear_cos_long[:, new_position, :], original_cos_long[:, original_position, :])
torch.testing.assert_close(linear_sin_long[:, new_position, :], original_sin_long[:, original_position, :])
# Sanity check Dynamic NTK RoPE scaling
# Scaling should only be observed after a long input is fed. We can observe that the frequencies increase
# with scaling_factor (or that `inv_freq` decreases)
rope_params = {"rope_type": "dynamic", "factor": scaling_factor, "rope_theta": 10_000.0}
config.rope_parameters = {"sliding_attention": rope_params, "full_attention": rope_params}
ntk_scaling_rope = rope_class(config=config).to(torch_device)
ntk_cos_short, ntk_sin_short = ntk_scaling_rope(x, position_ids_short, layer_type="sliding_attention")
ntk_cos_long, ntk_sin_long = ntk_scaling_rope(x, position_ids_long, layer_type="sliding_attention")
torch.testing.assert_close(ntk_cos_short, original_cos_short)
torch.testing.assert_close(ntk_sin_short, original_sin_short)
with self.assertRaises(AssertionError):
torch.testing.assert_close(ntk_cos_long, original_cos_long)
with self.assertRaises(AssertionError):
torch.testing.assert_close(ntk_sin_long, original_sin_long)
self.assertTrue(
(ntk_scaling_rope.sliding_attention_inv_freq <= original_rope.sliding_attention_inv_freq).all()
)
# Sanity check Yarn RoPE scaling
# Scaling should be over the entire input
rope_params = {"rope_type": "yarn", "factor": scaling_factor, "rope_theta": 10_000.0}
config.rope_parameters = {"sliding_attention": rope_params, "full_attention": rope_params}
yarn_scaling_rope = rope_class(config=config).to(torch_device)
yarn_cos_short, yarn_sin_short = yarn_scaling_rope(x, position_ids_short, layer_type="sliding_attention")
yarn_cos_long, yarn_sin_long = yarn_scaling_rope(x, position_ids_long, layer_type="sliding_attention")
torch.testing.assert_close(yarn_cos_short, yarn_cos_long[:, :short_input_length, :])
torch.testing.assert_close(yarn_sin_short, yarn_sin_long[:, :short_input_length, :])
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_cos_short, original_cos_short)
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_sin_short, original_sin_short)
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_cos_long, original_cos_long)
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_sin_long, original_sin_long)
|
Gemma3nTextModelTest
|
python
|
pytorch__pytorch
|
benchmarks/operator_benchmark/pt/bmm_test.py
|
{
"start": 799,
"end": 2348
}
|
class ____(op_bench.TorchBenchmarkBase):
def init(self, B, M, N, K, device, dtype, op_func):
self.inputs = {
"batch1": torch.rand(
(B, M, N), device=device, dtype=dtype, requires_grad=self.auto_set()
),
"batch2": torch.rand(
(B, N, K), device=device, dtype=dtype, requires_grad=self.auto_set()
),
}
self.op_func = op_func
def forward(self, batch1, batch2):
return self.op_func(batch1, batch2)
def get_memory_traffic_bytes(self):
"""Override for bmm: (B, M, N) @ (B, N, K) -> (B, M, K)
Memory traffic: read(B*M*N + B*N*K) + write(B*M*K)
"""
batch1 = self.inputs["batch1"]
batch2 = self.inputs["batch2"]
B, M, N = batch1.shape
B_check, N_check, K = batch2.shape
assert B == B_check and N == N_check, "Batch dimensions must match for bmm"
bytes_per_element = batch1.element_size()
total_elements = B * (M * N + N * K + M * K)
return total_elements * bytes_per_element
op_bench.generate_pt_tests_from_op_list(
batched_binary_ops,
batched_binary_configs_short + batched_binary_configs_long,
BatchedBinaryOpBenchmark,
)
op_bench.generate_pt_gradient_tests_from_op_list(
batched_binary_ops,
batched_binary_configs_long,
BatchedBinaryOpBenchmark,
)
# batched ternary ops
batched_ternary_ops = op_bench.op_list(
attr_names=["op_name", "op_func"],
attrs=[["baddbmm", torch.baddbmm]],
)
|
BatchedBinaryOpBenchmark
|
python
|
sympy__sympy
|
sympy/polys/domains/field.py
|
{
"start": 357,
"end": 3172
}
|
class ____(Ring[Ef]):
"""Represents a field domain. """
is_Field = True
is_PID = True
def get_ring(self):
"""Returns a ring associated with ``self``. """
raise DomainError('there is no ring associated with %s' % self)
def get_field(self) -> Self:
"""Returns a field associated with ``self``. """
return self
def exquo(self, a: Ef, b: Ef) -> Ef:
"""Exact quotient of ``a`` and ``b``, implies ``__truediv__``. """
return a / b
def quo(self, a, b):
"""Quotient of ``a`` and ``b``, implies ``__truediv__``. """
return a / b
def rem(self, a, b):
"""Remainder of ``a`` and ``b``, implies nothing. """
return self.zero
def div(self, a, b):
"""Division of ``a`` and ``b``, implies ``__truediv__``. """
return a / b, self.zero
def gcd(self, a, b) -> Ef:
"""
Returns GCD of ``a`` and ``b``.
This definition of GCD over fields allows to clear denominators
in `primitive()`.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy import S, gcd, primitive
>>> from sympy.abc import x
>>> QQ.gcd(QQ(2, 3), QQ(4, 9))
2/9
>>> gcd(S(2)/3, S(4)/9)
2/9
>>> primitive(2*x/3 + S(4)/9)
(2/9, 3*x + 2)
"""
try:
ring = self.get_ring()
except DomainError:
return self.one
p = ring.gcd(self.numer(a), self.numer(b))
q = ring.lcm(self.denom(a), self.denom(b))
return self.convert(p, ring)/q
def gcdex(self, a, b) -> tuple[Ef, Ef, Ef]:
"""
Returns x, y, g such that a * x + b * y == g == gcd(a, b)
"""
d = self.gcd(a, b)
if a == self.zero:
if b == self.zero:
return self.zero, self.one, self.zero
else:
return self.zero, d/b, d
else:
return d/a, self.zero, d
def lcm(self, a, b):
"""
Returns LCM of ``a`` and ``b``.
>>> from sympy.polys.domains import QQ
>>> from sympy import S, lcm
>>> QQ.lcm(QQ(2, 3), QQ(4, 9))
4/3
>>> lcm(S(2)/3, S(4)/9)
4/3
"""
try:
ring = self.get_ring()
except DomainError:
return a*b
p = ring.lcm(self.numer(a), self.numer(b))
q = ring.gcd(self.denom(a), self.denom(b))
return self.convert(p, ring)/q
def revert(self, a):
"""Returns ``a**(-1)`` if possible. """
if a:
return 1/a
else:
raise NotReversible('zero is not reversible')
def is_unit(self, a):
"""Return true if ``a`` is a invertible"""
return bool(a)
|
Field
|
python
|
django__django
|
tests/template_tests/syntax_tests/test_named_endblock.py
|
{
"start": 116,
"end": 2475
}
|
class ____(SimpleTestCase):
@setup(
{
"namedendblocks01": "1{% block first %}_{% block second %}"
"2{% endblock second %}_{% endblock first %}3"
}
)
def test_namedendblocks01(self):
output = self.engine.render_to_string("namedendblocks01")
self.assertEqual(output, "1_2_3")
# Unbalanced blocks
@setup(
{
"namedendblocks02": "1{% block first %}_{% block second %}"
"2{% endblock first %}_{% endblock second %}3"
}
)
def test_namedendblocks02(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template("namedendblocks02")
@setup(
{
"namedendblocks03": "1{% block first %}_{% block second %}"
"2{% endblock %}_{% endblock second %}3"
}
)
def test_namedendblocks03(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template("namedendblocks03")
@setup(
{
"namedendblocks04": "1{% block first %}_{% block second %}"
"2{% endblock second %}_{% endblock third %}3"
}
)
def test_namedendblocks04(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template("namedendblocks04")
@setup(
{
"namedendblocks05": (
"1{% block first %}_{% block second %}2{% endblock first %}"
)
}
)
def test_namedendblocks05(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template("namedendblocks05")
# Mixed named and unnamed endblocks
@setup(
{
"namedendblocks06": "1{% block first %}_{% block second %}"
"2{% endblock %}_{% endblock first %}3"
}
)
def test_namedendblocks06(self):
"""
Mixed named and unnamed endblocks
"""
output = self.engine.render_to_string("namedendblocks06")
self.assertEqual(output, "1_2_3")
@setup(
{
"namedendblocks07": "1{% block first %}_{% block second %}"
"2{% endblock second %}_{% endblock %}3"
}
)
def test_namedendblocks07(self):
output = self.engine.render_to_string("namedendblocks07")
self.assertEqual(output, "1_2_3")
|
NamedEndblockTests
|
python
|
pypa__pipenv
|
pipenv/patched/pip/_internal/utils/temp_dir.py
|
{
"start": 6612,
"end": 9325
}
|
class ____(TempDirectory):
"""Helper class that creates a temporary directory adjacent to a real one.
Attributes:
original
The original directory to create a temp directory for.
path
After calling create() or entering, contains the full
path to the temporary directory.
delete
Whether the directory should be deleted when exiting
(when used as a contextmanager)
"""
# The characters that may be used to name the temp directory
# We always prepend a ~ and then rotate through these until
# a usable name is found.
# pkg_resources raises a different error for .dist-info folder
# with leading '-' and invalid metadata
LEADING_CHARS = "-~.=%0123456789"
def __init__(self, original: str, delete: Optional[bool] = None) -> None:
self.original = original.rstrip("/\\")
super().__init__(delete=delete)
@classmethod
def _generate_names(cls, name: str) -> Generator[str, None, None]:
"""Generates a series of temporary names.
The algorithm replaces the leading characters in the name
with ones that are valid filesystem characters, but are not
valid package names (for both Python and pip definitions of
package).
"""
for i in range(1, len(name)):
for candidate in itertools.combinations_with_replacement(
cls.LEADING_CHARS, i - 1
):
new_name = "~" + "".join(candidate) + name[i:]
if new_name != name:
yield new_name
# If we make it this far, we will have to make a longer name
for i in range(len(cls.LEADING_CHARS)):
for candidate in itertools.combinations_with_replacement(
cls.LEADING_CHARS, i
):
new_name = "~" + "".join(candidate) + name
if new_name != name:
yield new_name
def _create(self, kind: str) -> str:
root, name = os.path.split(self.original)
for candidate in self._generate_names(name):
path = os.path.join(root, candidate)
try:
os.mkdir(path)
except OSError as ex:
# Continue if the name exists already
if ex.errno != errno.EEXIST:
raise
else:
path = os.path.realpath(path)
break
else:
# Final fallback on the default behavior.
path = os.path.realpath(tempfile.mkdtemp(prefix=f"pip-{kind}-"))
logger.debug("Created temporary directory: %s", path)
return path
|
AdjacentTempDirectory
|
python
|
facelessuser__pymdown-extensions
|
tests/test_extensions/test_blocks/test_general_blocks.py
|
{
"start": 10447,
"end": 12227
}
|
class ____(util.MdCase):
"""Test Nested blocks and lists."""
extension = ['pymdownx.blocks.tab', 'pymdownx.blocks.html']
extension_configs = {
'pymdownx.blocks.tab': {'alternate_style': True}
}
def test_nested_blocks_in_lists(self):
"""Test a nested blocks case with lists."""
self.check_markdown(
R"""
//// html | div.my-div
- List
- List
/// tab | TEST1
Content
///
/// tab | TEST2
- A list
Paragraph
Code
///
////
""",
"""
<div class="my-div">
<ul>
<li>
<p>List</p>
<ul>
<li>
<p>List</p>
<div class="tabbed-set tabbed-alternate" data-tabs="1:2"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><input id="__tabbed_1_2" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">TEST1</label><label for="__tabbed_1_2">TEST2</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<p>Content</p>
</div>
<div class="tabbed-block">
<ul>
<li>
<p>A list</p>
<p>Paragraph</p>
<pre><code>Code
</code></pre>
</li>
</ul>
</div>
</div>
</div>
</li>
</ul>
</li>
</ul>
</div>
""", # noqa: E501
True
)
|
TestNestedBlocksAndLists
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/nn_fused_batchnorm_d9m_test.py
|
{
"start": 1408,
"end": 7073
}
|
class ____(test.TestCase,
parameterized.TestCase):
"""Test determinsitic functionality and exceptions for FusedBatchNorm.
Test that tf.errors.UnimplementedError is thrown, as
appropriate, by the GPU code-path through FusedBatchNormFreezeGrad when
deterministic ops are enabled. This test assumes that
nn_fused_batchnorm_test.py runs equivalent test cases when deterministic ops
are not enabled and will therefore detect erroneous exception throwing in
those cases.
Also test that the other code-paths, running on both CPU and GPU, operate
deterministically.
"""
def _genParams(self, data_format, x_dtype, large_batch):
if large_batch:
batch_size = 5000
height = width = 4
else:
batch_size = 10
height = 5
width = 5000
channel_count = 3
if data_format == 'NHWC':
x_shape = (batch_size, height, width, channel_count)
else: # 'NCHW'
x_shape = (batch_size, channel_count, height, width)
# Using random_ops.random_normal would produce different values on each run
x = constant_op.constant(np.random.normal(size=x_shape), dtype=x_dtype)
scale_shape = (channel_count,)
scale = constant_op.constant(
np.random.normal(size=scale_shape), dtype=dtypes.float32)
offset = constant_op.constant(
np.random.normal(size=scale_shape), dtype=dtypes.float32)
mean = np.random.normal(size=scale_shape)
variance = np.random.normal(size=scale_shape)
y_shape = x_shape
y_dtype = x_dtype
upstream_gradients = constant_op.constant(
np.random.normal(size=y_shape), dtype=y_dtype)
return x, scale, offset, mean, variance, upstream_gradients
@parameterized.parameters('NHWC', 'NCHW')
def testForward(self, data_format):
with self.cached_session():
for large_batch in [False, True]:
for x_dtype in [dtypes.float16, dtypes.float32]: # skipping bfloat16
x, scale, offset, mean, variance, _ = self._genParams(
data_format, x_dtype, large_batch)
for is_training in [False, True]:
op_output = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean,
variance,
data_format=data_format,
is_training=is_training,
exponential_avg_factor=1.01)
y_a, running_mean_a, running_var_a = op_output
y_a = self.evaluate(y_a)
if is_training:
running_mean_a = self.evaluate(running_mean_a)
running_var_a = self.evaluate(running_var_a)
for _ in range(5):
op_output_b = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean,
variance,
data_format=data_format,
is_training=is_training,
exponential_avg_factor=1.01)
y_b, running_mean_b, running_var_b = op_output_b
y_b = self.evaluate(y_b)
self.assertAllEqual(y_a, y_b)
if is_training:
running_mean_b = self.evaluate(running_mean_b)
running_var_b = self.evaluate(running_var_b)
self.assertAllEqual(running_mean_a, running_mean_b)
self.assertAllEqual(running_var_a, running_var_b)
@parameterized.parameters('NHWC', 'NCHW')
@test_util.disable_xla('XLA is deterministic')
def testBackward(self, data_format):
with self.cached_session():
for large_batch in [False, True]:
# Only run with float32, as float16 is very slow on CPUs
params = self._genParams(data_format, dtypes.float32, large_batch)
x, scale, offset, mean, variance, upstream_gradients = params
for is_training in [False, True]:
for backprop_to in [x, scale, offset]:
with backprop.GradientTape(persistent=True) as tape:
tape.watch(backprop_to)
op_output = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean,
variance,
data_format=data_format,
is_training=is_training,
exponential_avg_factor=0.99)
gradient_injector_output = op_output[0] * upstream_gradients
if (len(config.list_physical_devices('GPU')) and
not is_training):
# Only backprop to offset is nondeterministic (on GPU, when
# is_training=False), but backprop to the other parameters is
# calculated using the same kernel.
with self.assertRaisesRegex(
errors_impl.UnimplementedError,
'A deterministic GPU implementation of fused batch-norm' +
' backprop, when training is disabled, is not currently' +
' available.'):
grad = tape.gradient(gradient_injector_output, backprop_to)
self.evaluate(grad)
else:
grad_a = tape.gradient(gradient_injector_output, backprop_to)
grad_a = self.evaluate(grad_a)
for _ in range(3):
grad_b = tape.gradient(gradient_injector_output,
backprop_to)
grad_b = self.evaluate(grad_b)
self.assertAllEqual(grad_a, grad_b)
if __name__ == '__main__':
# TODO(reedwm): Merge this file with nn_fused_batchnorm_test.py
config.enable_op_determinism()
test.main()
|
FusedBatchNormalizationDeterministicTest
|
python
|
huggingface__transformers
|
tests/models/internvl/test_video_processing_internvl.py
|
{
"start": 1043,
"end": 2923
}
|
class ____:
def __init__(
self,
parent,
batch_size=5,
num_frames=8,
num_channels=3,
min_resolution=30,
max_resolution=80,
do_resize=True,
size=None,
do_normalize=True,
image_mean=OPENAI_CLIP_MEAN,
image_std=OPENAI_CLIP_STD,
do_convert_rgb=True,
):
size = size if size is not None else {"height": 384, "width": 384}
self.parent = parent
self.batch_size = batch_size
self.num_frames = num_frames
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
def prepare_video_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def expected_output_video_shape(self, videos):
return [self.num_frames, self.num_channels, self.size["height"], self.size["width"]]
def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"):
videos = prepare_video_inputs(
batch_size=self.batch_size,
num_frames=self.num_frames,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
return_tensors=return_tensors,
)
return videos
@require_torch
@require_vision
|
InternVLVideoProcessingTester
|
python
|
pyqtgraph__pyqtgraph
|
pyqtgraph/parametertree/interactive.py
|
{
"start": 827,
"end": 7351
}
|
class ____:
"""
``interact`` can be used with regular functions. However, when they are connected to
changed or changing signals, there is no way to access these connections later to
i.e. disconnect them temporarily. This utility class wraps a normal function but
can provide an external scope for accessing the hooked up parameter signals.
"""
# Attributes below are populated by `update_wrapper` but aren't detected by linters
__name__: str
__qualname__: str
def __init__(self, function, *, closures=None, **extra):
"""
Wraps a callable function in a way that forwards Parameter arguments as keywords
Parameters
----------
function: callable
Function to wrap
closures: dict[str, callable]
Arguments that shouldn't be constant, but can't be represented as a parameter.
See the rst docs for more information.
extra: dict
extra keyword arguments to pass to ``function`` when this wrapper is called
"""
super().__init__()
self.parameters = {}
self.extra = extra
self.function = function
if closures is None:
closures = {}
self.closures = closures
self._disconnected = False
self.parametersNeedRunKwargs = False
self.parameterCache = {}
# No need for wrapper __dict__ to function as function.__dict__, since
# Only __doc__, __name__, etc. attributes are required
functools.update_wrapper(self, function, updated=())
def __call__(self, **kwargs):
"""
Calls ``self.function``. Extra, closures, and parameter keywords as defined on
init and through :func:`InteractiveFunction.setParams` are forwarded during the
call.
"""
if self.parametersNeedRunKwargs:
self._updateParametersFromRunKwargs(**kwargs)
runKwargs = self.extra.copy()
runKwargs.update(self.parameterCache)
for kk, vv in self.closures.items():
runKwargs[kk] = vv()
runKwargs.update(**kwargs)
return self.function(**runKwargs)
def updateCachedParameterValues(self, param, value):
"""
This function is connected to ``sigChanged`` of every parameter associated with
it. This way, those parameters don't have to be queried for their value every
time InteractiveFunction is __call__'ed
"""
self.parameterCache[param.name()] = value
def _updateParametersFromRunKwargs(self, **kwargs):
"""
Updates attached params from __call__ without causing additional function runs
"""
# Ensure updates don't cause firing of self's function
wasDisconnected = self.disconnect()
try:
for kwarg in set(kwargs).intersection(self.parameters):
self.parameters[kwarg].setValue(kwargs[kwarg])
finally:
if not wasDisconnected:
self.reconnect()
for extraKey in set(kwargs) & set(self.extra):
self.extra[extraKey] = kwargs[extraKey]
def _disconnectParameter(self, param):
param.sigValueChanged.disconnect(self.updateCachedParameterValues)
for signal in (param.sigValueChanging, param.sigValueChanged):
fn.disconnect(signal, self.runFromChangedOrChanging)
def hookupParameters(self, params=None, clearOld=True):
"""
Binds a new set of parameters to this function. If ``clearOld`` is *True* (
default), previously bound parameters are disconnected.
Parameters
----------
params: Sequence[Parameter]
New parameters to listen for updates and optionally propagate keywords
passed to :meth:`__call__`
clearOld: bool
If ``True``, previously hooked up parameters will be removed first
"""
if clearOld:
self.removeParameters()
for param in params:
self.parameters[param.name()] = param
param.sigValueChanged.connect(self.updateCachedParameterValues)
# Populate initial values
self.parameterCache[param.name()] = param.value() if param.hasValue() else None
def removeParameters(self, clearCache=True):
"""
Disconnects from all signals of parameters in ``self.parameters``. Also,
optionally clears the old cache of param values
"""
for p in self.parameters.values():
self._disconnectParameter(p)
# Disconnected all old signals, clear out and get ready for new ones
self.parameters.clear()
if clearCache:
self.parameterCache.clear()
def runFromChangedOrChanging(self, param, value):
if self._disconnected:
return None
# Since this request came from a parameter, ensure it's not propagated back
# for efficiency and to avoid ``changing`` signals causing ``changed`` values
oldPropagate = self.parametersNeedRunKwargs
self.parametersNeedRunKwargs = False
try:
ret = self(**{param.name(): value})
finally:
self.parametersNeedRunKwargs = oldPropagate
return ret
def runFromAction(self, **kwargs):
if self._disconnected:
return None
return self(**kwargs)
def disconnect(self):
"""
Simulates disconnecting the runnable by turning ``runFrom*`` functions into no-ops
"""
oldDisconnect = self._disconnected
self._disconnected = True
return oldDisconnect
def setDisconnected(self, disconnected):
"""
Sets the disconnected state of the runnable, see :meth:`disconnect` and
:meth:`reconnect` for more information
"""
oldDisconnect = self._disconnected
self._disconnected = disconnected
return oldDisconnect
def reconnect(self):
"""Simulates reconnecting the runnable by re-enabling ``runFrom*`` functions"""
oldDisconnect = self._disconnected
self._disconnected = False
return oldDisconnect
def __str__(self):
return f"{type(self).__name__}(`<{self.function.__name__}>`) at {hex(id(self))}"
def __repr__(self):
return (
str(self) + " with keys:\n"
f"parameters={list(self.parameters)}, "
f"extra={list(self.extra)}, "
f"closures={list(self.closures)}"
)
|
InteractiveFunction
|
python
|
pytorch__pytorch
|
torch/ao/quantization/backend_config/backend_config.py
|
{
"start": 1475,
"end": 2146
}
|
class ____(Enum):
"""An enum that represents different ways of how an operator/operator pattern
should be observed
"""
OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT = 0
"""this means input and output are observed with different observers, based
on qconfig.activation
example: conv, linear, softmax
"""
OUTPUT_SHARE_OBSERVER_WITH_INPUT = 1
"""this means the output will use the same observer instance as input, based
on qconfig.activation
example: torch.cat, maxpool
"""
INPUT_OUTPUT_NOT_OBSERVED = 2
"""this means the input and output are never observed
example: x.shape, x.size
"""
@dataclass
|
ObservationType
|
python
|
getsentry__sentry
|
src/sentry/conf/types/role_dict.py
|
{
"start": 80,
"end": 322
}
|
class ____(TypedDict):
id: str
name: str
desc: str
scopes: set[str]
is_retired: NotRequired[bool]
is_global: NotRequired[bool]
is_minimum_role_for: NotRequired[str]
is_team_roles_allowed: NotRequired[bool]
|
RoleDict
|
python
|
spyder-ide__spyder
|
spyder/plugins/pylint/main_widget.py
|
{
"start": 1981,
"end": 2164
}
|
class ____:
ChangeHistory = "change_history_depth_action"
RunCodeAnalysis = "run_analysis_action"
BrowseFile = "browse_action"
ShowLog = "log_action"
|
PylintWidgetActions
|
python
|
pyqtgraph__pyqtgraph
|
doc/source/images/gen_example_gradient_plot.py
|
{
"start": 181,
"end": 2027
}
|
class ____(pg.GraphicsLayoutWidget):
""" example application main window """
def __init__(self):
super().__init__()
self.resize(420,400)
self.show()
# Prepare demonstration data
raw = np.linspace(0.0, 2.0, 400)
y_data1 = ( (raw+0.1)%1 ) ** 4
y_data2 = ( (raw+0.1)%1 ) ** 4 - ( (raw+0.6)%1 ) ** 4
# Example 1: Gradient pen
cm = pg.colormap.get('CET-L17') # prepare a linear color map
cm.reverse() # reverse it to put light colors at the top
pen = cm.getPen( span=(0.0,1.0), width=5 ) # gradient from blue (y=0) to white (y=1)
# plot a curve drawn with a pen colored according to y value:
curve1 = pg.PlotDataItem( y=y_data1, pen=pen )
# Example 2: Gradient brush
cm = pg.colormap.get('CET-D1') # prepare a diverging color map
cm.setMappingMode('diverging') # set mapping mode
brush = cm.getBrush( span=(-1., 1.) ) # gradient from blue at -1 to red at +1
# plot a curve that is filled to zero with the gradient brush:
curve2 = pg.PlotDataItem( y=y_data2, pen='w', brush=brush, fillLevel=0.0 )
for idx, curve in enumerate( (curve1, curve2) ):
plot = self.addPlot(row=idx, col=0)
plot.getAxis('left').setWidth(25)
plot.addItem( curve )
self.timer = pg.QtCore.QTimer( singleShot=True )
self.timer.timeout.connect(self.export)
self.timer.start(100)
def export(self):
print('exporting')
exporter = exp.ImageExporter(self.scene())
exporter.parameters()['width'] = 420
exporter.export('example_gradient_plot.png')
mkQApp("Gradient plotting example")
main_window = MainWindow()
## Start Qt event loop
if __name__ == '__main__':
pg.exec()
|
MainWindow
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/links/emr.py
|
{
"start": 4385,
"end": 5032
}
|
class ____(BaseAwsLink):
"""Helper class for constructing Amazon EMR Serverless link to Spark stdout logs."""
name = "Spark Driver stdout"
key = "emr_serverless_logs"
def format_link(self, application_id: str | None = None, job_run_id: str | None = None, **kwargs) -> str:
if not application_id or not job_run_id:
return ""
url = get_serverless_dashboard_url(
aws_conn_id=kwargs.get("conn_id"), application_id=application_id, job_run_id=job_run_id
)
if url:
return url._replace(path="/logs/SPARK_DRIVER/stdout.gz").geturl()
return ""
|
EmrServerlessLogsLink
|
python
|
pytorch__pytorch
|
test/cpp_extensions/open_registration_extension/torch_openreg/tests/test_storage.py
|
{
"start": 805,
"end": 8437
}
|
class ____(TestCase):
def test_serialization(self):
storage = torch.UntypedStorage(4, device=torch.device("openreg"))
self.assertEqual(torch.serialization.location_tag(storage), "openreg:0")
storage = torch.UntypedStorage(4, device=torch.device("openreg:0"))
self.assertEqual(torch.serialization.location_tag(storage), "openreg:0")
storage_cpu = torch.empty(4, 4).storage()
storage_openreg = torch.serialization.default_restore_location(
storage_cpu, "openreg:0"
)
self.assertTrue(storage_openreg.is_openreg)
tensor = torch.empty(3, 3, device="openreg")
self.assertEqual(torch._utils.get_tensor_metadata(tensor), {})
metadata = {"version_number": True, "format_number": True}
torch._utils.set_tensor_metadata(tensor, metadata)
self.assertEqual(torch._utils.get_tensor_metadata(tensor), metadata)
with tempfile.TemporaryDirectory() as tmpdir:
path = os.path.join(tmpdir, "data.pt")
torch.save(tensor, path)
tensor_openreg = torch.load(path)
self.assertTrue(tensor_openreg.is_openreg)
self.assertEqual(torch._utils.get_tensor_metadata(tensor_openreg), metadata)
tensor_cpu = torch.load(path, map_location="cpu")
self.assertFalse(tensor_cpu.is_openreg)
self.assertEqual(torch._utils.get_tensor_metadata(tensor_cpu), {})
@skipIfTorchDynamo()
@unittest.skipIf(
numpy.__version__ < "1.25",
"versions < 1.25 serialize dtypes differently from how it's serialized in data_legacy_numpy",
)
def test_open_device_numpy_serialization(self):
"""
This tests the legacy _rebuild_device_tensor_from_numpy serialization path
"""
data_legacy_numpy = (
b"PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x10\x00\x12\x00archive/data.pklFB\x0e\x00ZZZZZZZZZZZZZZ\x80\x02}q\x00X\x01"
b"\x00\x00\x00xq\x01ctorch._utils\n_rebuild_device_tensor_from_numpy\nq\x02(cnumpy.core.m"
b"ultiarray\n_reconstruct\nq\x03cnumpy\nndarray\nq\x04K\x00\x85q\x05c_codecs\nencode\nq\x06"
b"X\x01\x00\x00\x00bq\x07X\x06\x00\x00\x00latin1q\x08\x86q\tRq\n\x87q\x0bRq\x0c(K\x01K\x02K"
b"\x03\x86q\rcnumpy\ndtype\nq\x0eX\x02\x00\x00\x00f4q\x0f\x89\x88\x87q\x10Rq\x11(K\x03X\x01"
b"\x00\x00\x00<q\x12NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00tq\x13b\x89h\x06X\x1c\x00\x00"
b"\x00\x00\x00\xc2\x80?\x00\x00\x00@\x00\x00@@\x00\x00\xc2\x80@\x00\x00\xc2\xa0@\x00\x00\xc3"
b"\x80@q\x14h\x08\x86q\x15Rq\x16tq\x17bctorch\nfloat32\nq\x18X\t\x00\x00\x00openreg:0q\x19\x89"
b"tq\x1aRq\x1bs.PK\x07\x08\xdfE\xd6\xcaS\x01\x00\x00S\x01\x00\x00PK\x03\x04\x00\x00\x08"
b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00.\x00"
b"archive/byteorderFB*\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZlittlePK\x07\x08"
b"\x85=\xe3\x19\x06\x00\x00\x00\x06\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00=\x00archive/versionFB9\x00"
b"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07\x08\xd1\x9egU\x02\x00\x00"
b"\x00\x02\x00\x00\x00PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x1e\x002\x00archive/.data/serialization_idFB.\x00ZZZZZZZZZZZZZ"
b"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ0636457737946401051300000025273995036293PK\x07\x08\xee(\xcd"
b"\x8d(\x00\x00\x00(\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00"
b"\xdfE\xd6\xcaS\x01\x00\x00S\x01\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00archive/data.pklPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00"
b"\x00\x00\x85=\xe3\x19\x06\x00\x00\x00\x06\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\xa3\x01\x00\x00archive/byteorderPK\x01\x02\x00\x00\x00\x00\x08\x08\x00"
b"\x00\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x16\x02\x00\x00archive/versionPK\x01\x02\x00\x00\x00\x00\x08"
b"\x08\x00\x00\x00\x00\x00\x00\xee(\xcd\x8d(\x00\x00\x00(\x00\x00\x00\x1e\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x92\x02\x00\x00archive/.data/serialization_idPK\x06"
b"\x06,\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00"
b"\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x06\x01\x00\x00\x00\x00\x00\x008\x03\x00"
b"\x00\x00\x00\x00\x00PK\x06\x07\x00\x00\x00\x00>\x04\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00"
b"PK\x05\x06\x00\x00\x00\x00\x04\x00\x04\x00\x06\x01\x00\x008\x03\x00\x00\x00\x00"
)
buf_data_legacy_numpy = io.BytesIO(data_legacy_numpy)
with safe_globals(
[
(
(
numpy.core.multiarray._reconstruct,
"numpy.core.multiarray._reconstruct",
)
if numpy.__version__ >= "2.1"
else numpy.core.multiarray._reconstruct
),
numpy.ndarray,
numpy.dtype,
_codecs.encode,
numpy.dtypes.Float32DType,
]
):
sd_loaded = torch.load(buf_data_legacy_numpy, weights_only=True)
buf_data_legacy_numpy.seek(0)
# Test map_location
sd_loaded_cpu = torch.load(
buf_data_legacy_numpy, weights_only=True, map_location="cpu"
)
expected = torch.tensor(
[[1, 2, 3], [4, 5, 6]], dtype=torch.float32, device="openreg"
)
self.assertEqual(sd_loaded["x"].cpu(), expected.cpu())
self.assertFalse(sd_loaded["x"].is_cpu)
self.assertTrue(sd_loaded_cpu["x"].is_cpu)
def test_open_device_cpu_serialization(self):
default_protocol = torch.serialization.DEFAULT_PROTOCOL
with unittest.mock.patch.object(torch._C, "_has_storage", return_value=False):
x = torch.randn(2, 3)
x_openreg = x.to("openreg")
sd = {"x": x_openreg}
rebuild_func = x_openreg._reduce_ex_internal(default_protocol)[0]
self.assertTrue(
rebuild_func is torch._utils._rebuild_device_tensor_from_cpu_tensor
)
# Test map_location
with TemporaryFileName() as f:
torch.save(sd, f)
sd_loaded = torch.load(f, weights_only=True)
# Test map_location
sd_loaded_cpu = torch.load(f, weights_only=True, map_location="cpu")
self.assertFalse(sd_loaded["x"].is_cpu)
self.assertEqual(sd_loaded["x"].cpu(), x)
self.assertTrue(sd_loaded_cpu["x"].is_cpu)
# Test metadata_only
with TemporaryFileName() as f:
with self.assertRaisesRegex(
RuntimeError,
"Cannot serialize tensors on backends with no storage under skip_data context manager",
):
with torch.serialization.skip_data():
torch.save(sd, f)
if __name__ == "__main__":
run_tests()
|
TestSerialization
|
python
|
ray-project__ray
|
python/ray/serve/_private/router.py
|
{
"start": 39735,
"end": 44418
}
|
class ____(Router):
"""Wrapper class that runs an AsyncioRouter on a separate thread.
The motivation for this is to avoid user code blocking the event loop and
preventing the router from making progress.
Maintains a singleton event loop running in a daemon thread that is shared by
all AsyncioRouters.
"""
_asyncio_loop: Optional[asyncio.AbstractEventLoop] = None
_asyncio_loop_creation_lock = threading.Lock()
def __init__(self, **passthrough_kwargs):
assert (
"event_loop" not in passthrough_kwargs
), "SingletonThreadRouter manages the router event loop."
self._asyncio_router = AsyncioRouter(
event_loop=self._get_singleton_asyncio_loop(), **passthrough_kwargs
)
@classmethod
def _get_singleton_asyncio_loop(cls) -> asyncio.AbstractEventLoop:
"""Get singleton asyncio loop running in a daemon thread.
This method is thread safe.
"""
with cls._asyncio_loop_creation_lock:
if cls._asyncio_loop is None:
cls._asyncio_loop = asyncio.new_event_loop()
thread = threading.Thread(
daemon=True,
target=cls._asyncio_loop.run_forever,
)
thread.start()
return cls._asyncio_loop
def running_replicas_populated(self) -> bool:
return self._asyncio_router.running_replicas_populated()
def assign_request(
self,
request_meta: RequestMetadata,
*request_args,
**request_kwargs,
) -> concurrent.futures.Future[ReplicaResult]:
"""Routes assign_request call on the internal asyncio loop.
This method uses `run_coroutine_threadsafe` to execute the actual request
assignment logic (`_asyncio_router.assign_request`) on the dedicated
asyncio event loop thread. It returns a `concurrent.futures.Future` that
can be awaited or queried from the calling thread.
Returns:
A concurrent.futures.Future resolving to the ReplicaResult representing
the assigned request.
"""
def asyncio_future_callback(
asyncio_future: asyncio.Future, concurrent_future: concurrent.futures.Future
):
"""Callback attached to the asyncio Task running assign_request.
This runs when the asyncio Task finishes (completes, fails, or is cancelled).
Its primary goal is to propagate cancellation initiated via the
`concurrent_future` back to the `ReplicaResult` in situations where
asyncio_future didn't see the cancellation event in time. Think of it
like a second line of defense for cancellation of replica results.
"""
# Check if the cancellation originated from the concurrent.futures.Future
if (
concurrent_future.cancelled()
and not asyncio_future.cancelled()
and asyncio_future.exception() is None
):
result: ReplicaResult = asyncio_future.result()
logger.info(
"Asyncio task completed despite cancellation attempt. "
"Attempting to cancel the request that was assigned to a replica."
)
result.cancel()
concurrent_future = concurrent.futures.Future()
def create_task_and_setup():
task = self._asyncio_loop.create_task(
self._asyncio_router.assign_request(
request_meta, *request_args, **request_kwargs
)
)
# Set up your cancellation callback
task.add_done_callback(
lambda _: asyncio_future_callback(_, concurrent_future)
)
try:
# chain the two futures to handle direction channel of cancellation
futures._chain_future(
ensure_future(task, loop=self._asyncio_loop), concurrent_future
)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
if concurrent_future.set_running_or_notify_cancel():
concurrent_future.set_exception(exc)
raise
# Schedule on the event loop thread
self._asyncio_loop.call_soon_threadsafe(create_task_and_setup)
return concurrent_future
def shutdown(self) -> concurrent.futures.Future:
return asyncio.run_coroutine_threadsafe(
self._asyncio_router.shutdown(), loop=self._asyncio_loop
)
|
SingletonThreadRouter
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/query_engine/flare/schema.py
|
{
"start": 68,
"end": 163
}
|
class ____:
"""Query task."""
query_str: str
start_idx: int
end_idx: int
|
QueryTask
|
python
|
PrefectHQ__prefect
|
src/prefect/client/collections.py
|
{
"start": 196,
"end": 1050
}
|
class ____(Protocol):
async def read_worker_metadata(self) -> Dict[str, Any]: ...
async def __aenter__(self) -> "CollectionsMetadataClient": ...
async def __aexit__(self, *exc_info: Any) -> Any: ...
def get_collections_metadata_client(
httpx_settings: Optional[Dict[str, Any]] = None,
) -> "CollectionsMetadataClient":
"""
Creates a client that can be used to fetch metadata for
Prefect collections.
Will return a `CloudClient` if profile is set to connect
to Prefect Cloud, otherwise will return an `OrchestrationClient`.
"""
orchestration_client = get_client(httpx_settings=httpx_settings)
if orchestration_client.server_type == ServerType.CLOUD:
return get_cloud_client(httpx_settings=httpx_settings, infer_cloud_url=True)
else:
return orchestration_client
|
CollectionsMetadataClient
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_stateful_set_status.py
|
{
"start": 383,
"end": 14116
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'available_replicas': 'int',
'collision_count': 'int',
'conditions': 'list[V1StatefulSetCondition]',
'current_replicas': 'int',
'current_revision': 'str',
'observed_generation': 'int',
'ready_replicas': 'int',
'replicas': 'int',
'update_revision': 'str',
'updated_replicas': 'int'
}
attribute_map = {
'available_replicas': 'availableReplicas',
'collision_count': 'collisionCount',
'conditions': 'conditions',
'current_replicas': 'currentReplicas',
'current_revision': 'currentRevision',
'observed_generation': 'observedGeneration',
'ready_replicas': 'readyReplicas',
'replicas': 'replicas',
'update_revision': 'updateRevision',
'updated_replicas': 'updatedReplicas'
}
def __init__(self, available_replicas=None, collision_count=None, conditions=None, current_replicas=None, current_revision=None, observed_generation=None, ready_replicas=None, replicas=None, update_revision=None, updated_replicas=None, local_vars_configuration=None): # noqa: E501
"""V1StatefulSetStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._available_replicas = None
self._collision_count = None
self._conditions = None
self._current_replicas = None
self._current_revision = None
self._observed_generation = None
self._ready_replicas = None
self._replicas = None
self._update_revision = None
self._updated_replicas = None
self.discriminator = None
if available_replicas is not None:
self.available_replicas = available_replicas
if collision_count is not None:
self.collision_count = collision_count
if conditions is not None:
self.conditions = conditions
if current_replicas is not None:
self.current_replicas = current_replicas
if current_revision is not None:
self.current_revision = current_revision
if observed_generation is not None:
self.observed_generation = observed_generation
if ready_replicas is not None:
self.ready_replicas = ready_replicas
self.replicas = replicas
if update_revision is not None:
self.update_revision = update_revision
if updated_replicas is not None:
self.updated_replicas = updated_replicas
@property
def available_replicas(self):
"""Gets the available_replicas of this V1StatefulSetStatus. # noqa: E501
Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset. # noqa: E501
:return: The available_replicas of this V1StatefulSetStatus. # noqa: E501
:rtype: int
"""
return self._available_replicas
@available_replicas.setter
def available_replicas(self, available_replicas):
"""Sets the available_replicas of this V1StatefulSetStatus.
Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset. # noqa: E501
:param available_replicas: The available_replicas of this V1StatefulSetStatus. # noqa: E501
:type: int
"""
self._available_replicas = available_replicas
@property
def collision_count(self):
"""Gets the collision_count of this V1StatefulSetStatus. # noqa: E501
collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision. # noqa: E501
:return: The collision_count of this V1StatefulSetStatus. # noqa: E501
:rtype: int
"""
return self._collision_count
@collision_count.setter
def collision_count(self, collision_count):
"""Sets the collision_count of this V1StatefulSetStatus.
collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision. # noqa: E501
:param collision_count: The collision_count of this V1StatefulSetStatus. # noqa: E501
:type: int
"""
self._collision_count = collision_count
@property
def conditions(self):
"""Gets the conditions of this V1StatefulSetStatus. # noqa: E501
Represents the latest available observations of a statefulset's current state. # noqa: E501
:return: The conditions of this V1StatefulSetStatus. # noqa: E501
:rtype: list[V1StatefulSetCondition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""Sets the conditions of this V1StatefulSetStatus.
Represents the latest available observations of a statefulset's current state. # noqa: E501
:param conditions: The conditions of this V1StatefulSetStatus. # noqa: E501
:type: list[V1StatefulSetCondition]
"""
self._conditions = conditions
@property
def current_replicas(self):
"""Gets the current_replicas of this V1StatefulSetStatus. # noqa: E501
currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision. # noqa: E501
:return: The current_replicas of this V1StatefulSetStatus. # noqa: E501
:rtype: int
"""
return self._current_replicas
@current_replicas.setter
def current_replicas(self, current_replicas):
"""Sets the current_replicas of this V1StatefulSetStatus.
currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision. # noqa: E501
:param current_replicas: The current_replicas of this V1StatefulSetStatus. # noqa: E501
:type: int
"""
self._current_replicas = current_replicas
@property
def current_revision(self):
"""Gets the current_revision of this V1StatefulSetStatus. # noqa: E501
currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas). # noqa: E501
:return: The current_revision of this V1StatefulSetStatus. # noqa: E501
:rtype: str
"""
return self._current_revision
@current_revision.setter
def current_revision(self, current_revision):
"""Sets the current_revision of this V1StatefulSetStatus.
currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas). # noqa: E501
:param current_revision: The current_revision of this V1StatefulSetStatus. # noqa: E501
:type: str
"""
self._current_revision = current_revision
@property
def observed_generation(self):
"""Gets the observed_generation of this V1StatefulSetStatus. # noqa: E501
observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server. # noqa: E501
:return: The observed_generation of this V1StatefulSetStatus. # noqa: E501
:rtype: int
"""
return self._observed_generation
@observed_generation.setter
def observed_generation(self, observed_generation):
"""Sets the observed_generation of this V1StatefulSetStatus.
observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server. # noqa: E501
:param observed_generation: The observed_generation of this V1StatefulSetStatus. # noqa: E501
:type: int
"""
self._observed_generation = observed_generation
@property
def ready_replicas(self):
"""Gets the ready_replicas of this V1StatefulSetStatus. # noqa: E501
readyReplicas is the number of pods created for this StatefulSet with a Ready Condition. # noqa: E501
:return: The ready_replicas of this V1StatefulSetStatus. # noqa: E501
:rtype: int
"""
return self._ready_replicas
@ready_replicas.setter
def ready_replicas(self, ready_replicas):
"""Sets the ready_replicas of this V1StatefulSetStatus.
readyReplicas is the number of pods created for this StatefulSet with a Ready Condition. # noqa: E501
:param ready_replicas: The ready_replicas of this V1StatefulSetStatus. # noqa: E501
:type: int
"""
self._ready_replicas = ready_replicas
@property
def replicas(self):
"""Gets the replicas of this V1StatefulSetStatus. # noqa: E501
replicas is the number of Pods created by the StatefulSet controller. # noqa: E501
:return: The replicas of this V1StatefulSetStatus. # noqa: E501
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""Sets the replicas of this V1StatefulSetStatus.
replicas is the number of Pods created by the StatefulSet controller. # noqa: E501
:param replicas: The replicas of this V1StatefulSetStatus. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and replicas is None: # noqa: E501
raise ValueError("Invalid value for `replicas`, must not be `None`") # noqa: E501
self._replicas = replicas
@property
def update_revision(self):
"""Gets the update_revision of this V1StatefulSetStatus. # noqa: E501
updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas) # noqa: E501
:return: The update_revision of this V1StatefulSetStatus. # noqa: E501
:rtype: str
"""
return self._update_revision
@update_revision.setter
def update_revision(self, update_revision):
"""Sets the update_revision of this V1StatefulSetStatus.
updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas) # noqa: E501
:param update_revision: The update_revision of this V1StatefulSetStatus. # noqa: E501
:type: str
"""
self._update_revision = update_revision
@property
def updated_replicas(self):
"""Gets the updated_replicas of this V1StatefulSetStatus. # noqa: E501
updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision. # noqa: E501
:return: The updated_replicas of this V1StatefulSetStatus. # noqa: E501
:rtype: int
"""
return self._updated_replicas
@updated_replicas.setter
def updated_replicas(self, updated_replicas):
"""Sets the updated_replicas of this V1StatefulSetStatus.
updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision. # noqa: E501
:param updated_replicas: The updated_replicas of this V1StatefulSetStatus. # noqa: E501
:type: int
"""
self._updated_replicas = updated_replicas
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1StatefulSetStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1StatefulSetStatus):
return True
return self.to_dict() != other.to_dict()
|
V1StatefulSetStatus
|
python
|
realpython__materials
|
python-enum/iterate.py
|
{
"start": 24,
"end": 423
}
|
class ____(Enum):
VANILLA = 1
CHOCOLATE = 2
MINT = 3
# Iterating over members
for flavor in Flavor:
print(flavor)
# Iterating over members' names
for flavor in Flavor:
print(flavor.name)
# Iterating over members' value
for flavor in Flavor:
print(flavor.value)
# Iterating over __members__
for name, member in Flavor.__members__.items():
print(name, "->", member)
|
Flavor
|
python
|
huggingface__transformers
|
src/transformers/models/xlm/modeling_xlm.py
|
{
"start": 62985,
"end": 66878
}
|
class ____(XLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLMModel(config)
self.dropout = nn.Dropout(config.dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
langs: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
cache: Optional[dict[str, torch.Tensor]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, TokenClassifierOutput]:
r"""
langs (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
languages ids which can be obtained from the language names by using two conversion mappings provided in
the configuration of the model (only provided for multilingual models). More precisely, the *language name
to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
*language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
See usage examples detailed in the [multilingual documentation](../multilingual).
lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Length of each sentence that can be used to avoid performing attention on padding token indices. You can
also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in
`[0, ..., input_ids.size(-1)]`.
cache (`dict[str, torch.FloatTensor]`, *optional*):
Instance of `EncoderDecoderCache` that contains precomputed KV states. Can be used to speed up sequential
decoding.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
|
XLMForTokenClassification
|
python
|
run-llama__llama_index
|
llama-index-packs/llama-index-packs-koda-retriever/llama_index/packs/koda_retriever/base.py
|
{
"start": 10025,
"end": 11245
}
|
class ____(BaseLlamaPack):
def __init__(
self,
index: VectorStoreIndex,
llm: Optional[LLM] = None, # if I could, I'd default to
reranker: Optional[BaseNodePostprocessor] = None,
default_alpha: float = 0.5,
matrix: dict or AlphaMatrix = DEFAULT_CATEGORIES, # type: ignore
verbose: bool = False,
**kwargs, # kwargs for VectorIndexRetriever
) -> None:
"""Init params."""
self.retriever = KodaRetriever(
index=index,
llm=llm,
reranker=reranker,
default_alpha=default_alpha,
matrix=matrix,
verbose=verbose,
**kwargs,
)
def get_modules(self) -> dict:
"""Get modules."""
return {
"retriever": self.retriever,
"retriever_cls": KodaRetriever,
}
def run(self, query_str: str, **kwargs: Any) -> RESPONSE_TYPE:
"""Run method."""
return self.retriever.retrieve(query_str, **kwargs)
async def arun(self, query_str: str, **kwargs: Any) -> RESPONSE_TYPE:
"""Asynchronous run method."""
return await self.retriever.aretrieve(query_str, **kwargs)
|
KodaRetrieverPack
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/linalg/linear_operator_util_test.py
|
{
"start": 10833,
"end": 11060
}
|
class ____(object):
def __init__(self, domain_dimension):
self._domain_dimension = ops.convert_to_tensor(domain_dimension)
def domain_dimension_tensor(self):
return self._domain_dimension
|
DomainDimensionStubOperator
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/constructor15.py
|
{
"start": 414,
"end": 602
}
|
class ____(Generic[_M, _N]):
def __new__(cls, m: _M, n: _N) -> "B[_M, _N]": ...
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
b: B[Literal[3], Literal[4]] = B(3, 4)
|
B
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_deferred.py
|
{
"start": 54029,
"end": 72863
}
|
class ____(_Polymorphic):
__dialect__ = "default"
@classmethod
def setup_mappers(cls):
super().setup_mappers()
from sqlalchemy import inspect
inspect(Company).add_property(
"managers", relationship(Manager, viewonly=True)
)
def test_load_only_subclass(self):
s = fixture_session()
q = (
s.query(Manager)
.order_by(Manager.person_id)
.options(load_only(Manager.status, Manager.manager_name))
)
self.assert_compile(
q,
"SELECT managers.person_id AS managers_person_id, "
"people.person_id AS people_person_id, "
"people.type AS people_type, "
"managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name "
"FROM people JOIN managers "
"ON people.person_id = managers.person_id "
"ORDER BY managers.person_id",
)
@testing.variation("load", ["contains_eager", "joinedload"])
def test_issue_10125(self, load):
s = fixture_session()
employee_alias = aliased(Manager, flat=True)
company_alias = aliased(Company)
if load.contains_eager:
q = (
s.query(company_alias)
.outerjoin(
employee_alias,
company_alias.employees.of_type(employee_alias),
)
.options(
contains_eager(
company_alias.employees.of_type(employee_alias)
).load_only(
employee_alias.person_id,
)
)
)
elif load.joinedload:
q = s.query(company_alias).options(
joinedload(
company_alias.employees.of_type(employee_alias)
).load_only(
employee_alias.person_id,
)
)
else:
load.fail()
if load.contains_eager:
self.assert_compile(
q,
"SELECT people_1.person_id AS people_1_person_id, "
"people_1.type AS people_1_type, "
"managers_1.person_id AS managers_1_person_id, "
"companies_1.company_id AS companies_1_company_id, "
"companies_1.name AS companies_1_name "
"FROM companies AS companies_1 LEFT OUTER JOIN "
"(people AS people_1 JOIN managers AS managers_1 "
"ON people_1.person_id = managers_1.person_id) "
"ON companies_1.company_id = people_1.company_id",
)
elif load.joinedload:
self.assert_compile(
q,
"SELECT companies_1.company_id AS companies_1_company_id, "
"companies_1.name AS companies_1_name, "
"people_1.person_id AS people_1_person_id, "
"people_1.type AS people_1_type, "
"managers_1.person_id AS managers_1_person_id "
"FROM companies AS companies_1 LEFT OUTER JOIN "
"(people AS people_1 JOIN managers AS managers_1 "
"ON people_1.person_id = managers_1.person_id) "
"ON companies_1.company_id = people_1.company_id "
"ORDER BY people_1.person_id",
)
else:
load.fail()
def test_load_only_subclass_bound(self):
s = fixture_session()
q = (
s.query(Manager)
.order_by(Manager.person_id)
.options(
Load(Manager).load_only(Manager.status, Manager.manager_name)
)
)
self.assert_compile(
q,
"SELECT managers.person_id AS managers_person_id, "
"people.person_id AS people_person_id, "
"people.type AS people_type, "
"managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name "
"FROM people JOIN managers "
"ON people.person_id = managers.person_id "
"ORDER BY managers.person_id",
)
def test_load_only_subclass_and_superclass(self):
s = fixture_session()
q = (
s.query(Boss)
.order_by(Person.person_id)
.options(load_only(Boss.status, Boss.manager_name))
)
self.assert_compile(
q,
"SELECT managers.person_id AS managers_person_id, "
"people.person_id AS people_person_id, "
"people.type AS people_type, "
"managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name "
"FROM people JOIN managers "
"ON people.person_id = managers.person_id JOIN boss "
"ON managers.person_id = boss.boss_id ORDER BY people.person_id",
)
def test_load_only_subclass_and_superclass_bound(self):
s = fixture_session()
q = (
s.query(Boss)
.order_by(Person.person_id)
.options(Load(Boss).load_only(Boss.status, Manager.manager_name))
)
self.assert_compile(
q,
"SELECT managers.person_id AS managers_person_id, "
"people.person_id AS people_person_id, "
"people.type AS people_type, "
"managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name "
"FROM people JOIN managers "
"ON people.person_id = managers.person_id JOIN boss "
"ON managers.person_id = boss.boss_id ORDER BY people.person_id",
)
def test_load_only_alias_subclass(self):
s = fixture_session()
m1 = aliased(Manager, flat=True)
q = (
s.query(m1)
.order_by(m1.person_id)
.options(load_only(m1.status, m1.manager_name))
)
self.assert_compile(
q,
"SELECT managers_1.person_id AS managers_1_person_id, "
"people_1.person_id AS people_1_person_id, "
"people_1.type AS people_1_type, "
"managers_1.status AS managers_1_status, "
"managers_1.manager_name AS managers_1_manager_name "
"FROM people AS people_1 JOIN managers AS "
"managers_1 ON people_1.person_id = managers_1.person_id "
"ORDER BY managers_1.person_id",
)
def test_load_only_alias_subclass_bound(self):
s = fixture_session()
m1 = aliased(Manager, flat=True)
q = (
s.query(m1)
.order_by(m1.person_id)
.options(Load(m1).load_only(m1.status, m1.manager_name))
)
self.assert_compile(
q,
"SELECT managers_1.person_id AS managers_1_person_id, "
"people_1.person_id AS people_1_person_id, "
"people_1.type AS people_1_type, "
"managers_1.status AS managers_1_status, "
"managers_1.manager_name AS managers_1_manager_name "
"FROM people AS people_1 JOIN managers AS "
"managers_1 ON people_1.person_id = managers_1.person_id "
"ORDER BY managers_1.person_id",
)
def test_load_only_subclass_from_relationship_polymorphic(self):
s = fixture_session()
wp = with_polymorphic(Person, [Manager], flat=True)
q = (
s.query(Company)
.join(Company.employees.of_type(wp))
.options(
contains_eager(Company.employees.of_type(wp)).load_only(
wp.Manager.status, wp.Manager.manager_name
)
)
)
self.assert_compile(
q,
"SELECT people_1.person_id AS people_1_person_id, "
"people_1.type AS people_1_type, "
"managers_1.person_id AS managers_1_person_id, "
"managers_1.status AS managers_1_status, "
"managers_1.manager_name AS managers_1_manager_name, "
"companies.company_id AS companies_company_id, "
"companies.name AS companies_name "
"FROM companies JOIN (people AS people_1 LEFT OUTER JOIN "
"managers AS managers_1 ON people_1.person_id = "
"managers_1.person_id) ON companies.company_id = "
"people_1.company_id",
)
def test_load_only_subclass_from_relationship_polymorphic_bound(self):
s = fixture_session()
wp = with_polymorphic(Person, [Manager], flat=True)
q = (
s.query(Company)
.join(Company.employees.of_type(wp))
.options(
Load(Company)
.contains_eager(Company.employees.of_type(wp))
.load_only(wp.Manager.status, wp.Manager.manager_name)
)
)
self.assert_compile(
q,
"SELECT people_1.person_id AS people_1_person_id, "
"people_1.type AS people_1_type, "
"managers_1.person_id AS managers_1_person_id, "
"managers_1.status AS managers_1_status, "
"managers_1.manager_name AS managers_1_manager_name, "
"companies.company_id AS companies_company_id, "
"companies.name AS companies_name "
"FROM companies JOIN (people AS people_1 LEFT OUTER JOIN "
"managers AS managers_1 ON people_1.person_id = "
"managers_1.person_id) ON companies.company_id = "
"people_1.company_id",
)
def test_load_only_subclass_from_relationship(self):
s = fixture_session()
q = (
s.query(Company)
.join(Company.managers)
.options(
contains_eager(Company.managers).load_only(
Manager.status, Manager.manager_name
)
)
)
self.assert_compile(
q,
"SELECT companies.company_id AS companies_company_id, "
"companies.name AS companies_name, "
"managers.person_id AS managers_person_id, "
"people.person_id AS people_person_id, "
"people.type AS people_type, "
"managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name "
"FROM companies JOIN (people JOIN managers ON people.person_id = "
"managers.person_id) ON companies.company_id = people.company_id",
)
def test_load_only_subclass_from_relationship_bound(self):
s = fixture_session()
q = (
s.query(Company)
.join(Company.managers)
.options(
Load(Company)
.contains_eager(Company.managers)
.load_only(Manager.status, Manager.manager_name)
)
)
self.assert_compile(
q,
"SELECT companies.company_id AS companies_company_id, "
"companies.name AS companies_name, "
"managers.person_id AS managers_person_id, "
"people.person_id AS people_person_id, "
"people.type AS people_type, "
"managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name "
"FROM companies JOIN (people JOIN managers ON people.person_id = "
"managers.person_id) ON companies.company_id = people.company_id",
)
def test_defer_on_wildcard_subclass(self):
"""test case changed as of #7495"""
s = fixture_session()
q = (
s.query(Manager)
.order_by(Person.person_id)
.options(defer("*"), undefer(Manager.status))
)
self.assert_compile(
q,
"SELECT managers.person_id AS managers_person_id, "
"people.person_id AS people_person_id, "
"people.type AS people_type, managers.status AS managers_status "
"FROM people JOIN managers ON "
"people.person_id = managers.person_id ORDER BY people.person_id",
)
def test_load_only_subclass_of_type(self):
s = fixture_session()
q = s.query(Company).options(
joinedload(Company.employees.of_type(Manager)).load_only(
Manager.status
)
)
self.assert_compile(
q,
"SELECT companies.company_id AS companies_company_id, "
"companies.name AS companies_name, "
"anon_1.people_person_id AS anon_1_people_person_id, "
"anon_1.people_type AS anon_1_people_type, "
"anon_1.managers_person_id AS anon_1_managers_person_id, "
"anon_1.managers_status AS anon_1_managers_status "
"FROM companies LEFT OUTER JOIN "
"(SELECT people.person_id AS people_person_id, "
"people.company_id AS people_company_id, "
"people.name AS people_name, people.type AS people_type, "
"managers.person_id AS managers_person_id, "
"managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name "
"FROM people LEFT OUTER JOIN managers "
"ON people.person_id = managers.person_id) AS anon_1 "
"ON companies.company_id = anon_1.people_company_id "
"ORDER BY anon_1.people_person_id",
)
def test_wildcard_subclass_of_type(self):
"""fixed as of #7495"""
s = fixture_session()
q = s.query(Company).options(
joinedload(Company.employees.of_type(Manager)).defer("*")
)
self.assert_compile(
q,
"SELECT companies.company_id AS companies_company_id, "
"companies.name AS companies_name, "
"anon_1.people_person_id AS anon_1_people_person_id, "
"anon_1.people_type AS anon_1_people_type, "
"anon_1.managers_person_id AS anon_1_managers_person_id "
"FROM companies LEFT OUTER JOIN "
"(SELECT people.person_id AS people_person_id, "
"people.company_id AS people_company_id, "
"people.name AS people_name, people.type AS people_type, "
"managers.person_id AS managers_person_id, "
"managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name "
"FROM people LEFT OUTER JOIN managers "
"ON people.person_id = managers.person_id) AS anon_1 "
"ON companies.company_id = anon_1.people_company_id "
"ORDER BY anon_1.people_person_id",
)
def test_defer_super_name_on_subclass(self):
s = fixture_session()
q = (
s.query(Manager)
.order_by(Person.person_id)
.options(defer(Person.name))
)
self.assert_compile(
q,
"SELECT managers.person_id AS managers_person_id, "
"people.person_id AS people_person_id, "
"people.company_id AS people_company_id, "
"people.type AS people_type, managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name "
"FROM people JOIN managers "
"ON people.person_id = managers.person_id "
"ORDER BY people.person_id",
)
def test_defer_super_name_on_subclass_bound(self):
s = fixture_session()
q = (
s.query(Manager)
.order_by(Person.person_id)
.options(Load(Manager).defer(Manager.name))
)
self.assert_compile(
q,
"SELECT managers.person_id AS managers_person_id, "
"people.person_id AS people_person_id, "
"people.company_id AS people_company_id, "
"people.type AS people_type, managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name "
"FROM people JOIN managers "
"ON people.person_id = managers.person_id "
"ORDER BY people.person_id",
)
def test_load_only_from_with_polymorphic_mismatch(self):
s = fixture_session()
wp = with_polymorphic(Person, [Manager], flat=True)
assert_raises_message(
sa.exc.ArgumentError,
r"Mapped class Mapper\[Manager\(managers\)\] does not apply to "
"any of the root entities in this query, e.g. "
r"with_polymorphic\(Person, \[Manager\]\).",
s.query(wp).options(load_only(Manager.status))._compile_context,
)
def test_load_only_from_with_polymorphic_applied(self):
s = fixture_session()
wp = with_polymorphic(Person, [Manager], flat=True)
q = s.query(wp).options(load_only(wp.Manager.status))
self.assert_compile(
q,
"SELECT people_1.person_id AS people_1_person_id, "
"people_1.type AS people_1_type, "
"managers_1.person_id AS managers_1_person_id, "
"managers_1.status AS managers_1_status "
"FROM people AS people_1 "
"LEFT OUTER JOIN managers AS managers_1 "
"ON people_1.person_id = managers_1.person_id",
)
def test_load_only_of_type_with_polymorphic(self):
s = fixture_session()
wp = with_polymorphic(Person, [Manager], flat=True)
with expect_raises_message(
sa.exc.ArgumentError,
r'ORM mapped entity or attribute "Manager.status" does not link '
r'from relationship "Company.employees.'
r'of_type\(with_polymorphic\(Person, \[Manager\]\)\)".',
):
s.query(Company).options(
joinedload(Company.employees.of_type(wp)).load_only(
Manager.status
)
)._compile_context()
self.assert_compile(
s.query(Company).options(
joinedload(Company.employees.of_type(wp)).load_only(
wp.Manager.status
)
),
# should at least not have manager_name in it
"SELECT companies.company_id AS companies_company_id, "
"companies.name AS companies_name, "
"people_1.person_id AS people_1_person_id, "
"people_1.type AS people_1_type, "
"managers_1.person_id AS managers_1_person_id, "
"managers_1.status AS managers_1_status "
"FROM companies LEFT OUTER JOIN "
"(people AS people_1 LEFT OUTER JOIN managers AS managers_1 "
"ON people_1.person_id = managers_1.person_id) "
"ON companies.company_id = people_1.company_id "
"ORDER BY people_1.person_id",
)
|
InheritanceTest
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/forLoop1.py
|
{
"start": 1606,
"end": 1962
}
|
class ____:
@overload
def __getitem__(self, i: int) -> str:
...
@overload
def __getitem__(self, i: slice) -> list[str]:
...
def __getitem__(self, i: int | slice) -> str | list[str]:
...
c = C()
for c1 in iter(c):
reveal_type(c1, expected_text="str")
for c2 in c:
reveal_type(c2, expected_text="str")
|
C
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 13285,
"end": 13476
}
|
class ____(SchemaBase):
_rootschema = load_schema()
@classmethod
def _default_wrapper_classes(cls) -> Iterator[type[Any]]:
return _subclasses(VegaLiteSchema)
|
VegaLiteSchema
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximum-number-of-words-found-in-sentences.py
|
{
"start": 29,
"end": 227
}
|
class ____(object):
def mostWordsFound(self, sentences):
"""
:type sentences: List[str]
:rtype: int
"""
return 1+max(s.count(' ') for s in sentences)
|
Solution
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/components/core/defs_module.py
|
{
"start": 2139,
"end": 3663
}
|
class ____(BaseModel):
model_config = ConfigDict(extra="forbid")
type: str
attributes: Optional[Mapping[str, Any]] = None
template_vars_module: Optional[str] = None
requirements: Optional[ComponentRequirementsModel] = None
post_processing: Optional[Mapping[str, Any]] = None
def _add_defs_yaml_metadata(
component_yaml_path: Path,
load_context: ComponentLoadContext,
component: Component,
source_position: SourcePosition,
metadata: ArbitraryMetadataMapping,
) -> ArbitraryMetadataMapping:
existing_references_meta = CodeReferencesMetadataSet.extract(metadata)
references = (
existing_references_meta.code_references.code_references
if existing_references_meta.code_references
else []
)
references_to_add = component.get_code_references_for_yaml(
component_yaml_path, source_position, load_context
)
return {
**metadata,
**CodeReferencesMetadataSet(
code_references=CodeReferencesMetadataValue(
code_references=[
*references,
*references_to_add,
],
)
),
"dagster/component_origin": ObjectMetadataValue(component),
# maybe ComponentPath.get_relative_key too
}
def _add_defs_py_metadata(
component: Component,
metadata: ArbitraryMetadataMapping,
):
return {
**metadata,
"dagster/component_origin": ObjectMetadataValue(component),
}
|
ComponentFileModel
|
python
|
PrefectHQ__prefect
|
tests/experimental/test_bundles.py
|
{
"start": 13108,
"end": 20786
}
|
class ____:
def test_is_local_module_builtin(self):
"""Test that built-in modules are not considered local."""
assert not _is_local_module("sys")
assert not _is_local_module("os")
assert not _is_local_module("json")
@pytest.mark.skipif(
not hasattr(sys, "stdlib_module_names"), reason="Requires Python 3.10+"
)
def test_is_local_module_stdlib(self):
"""Test that standard library modules are not considered local."""
assert not _is_local_module("logging")
assert not _is_local_module("asyncio")
assert not _is_local_module("unittest")
def test_is_local_module_site_packages(self):
"""Test that modules in site-packages are not considered local."""
# pytest is definitely installed in site-packages
assert not _is_local_module("pytest")
def test_extract_imports_from_source(self):
"""Test extraction of import statements from source code."""
source = """
import os
import sys
from pathlib import Path
from typing import List, Dict
import numpy as np
from my_module import helper
from my_package.submodule import function
"""
imports = _extract_imports_from_source(source)
assert "os" in imports
assert "sys" in imports
assert "pathlib" in imports
assert "typing" in imports
assert "numpy" in imports
assert "my_module" in imports
assert "my_package.submodule" in imports
# These should not be included as they are individual items, not modules
assert "my_module.helper" not in imports
assert "my_package.submodule.function" not in imports
def test_extract_imports_handles_syntax_errors(self):
"""Test that import extraction handles syntax errors gracefully."""
source = "this is not valid python syntax !!!"
imports = _extract_imports_from_source(source)
assert imports == set()
def test_discover_local_dependencies_with_no_module(self):
"""Test discovery when flow has no module (e.g., defined in REPL)."""
@flow
def repl_flow():
return "repl"
# Mock the flow to have no module
with patch("inspect.getmodule", return_value=None):
deps = _discover_local_dependencies(repl_flow)
assert deps == set()
def test_pickle_local_modules_context_manager(self):
"""Test the context manager for registering local modules."""
@flow
def test_flow():
return "test"
# Track which modules were registered/unregistered
registered = []
unregistered = []
def mock_register(module):
registered.append(module)
def mock_unregister(module):
unregistered.append(module)
with patch("cloudpickle.register_pickle_by_value", side_effect=mock_register):
with patch(
"cloudpickle.unregister_pickle_by_value", side_effect=mock_unregister
):
with patch(
"prefect._experimental.bundles._discover_local_dependencies",
return_value={"test_module"},
):
with patch("importlib.import_module") as mock_import:
mock_module = MagicMock()
mock_module.__name__ = "test_module"
mock_import.return_value = mock_module
with _pickle_local_modules_by_value(test_flow):
# Inside context, module should be registered
assert len(registered) == 1
assert registered[0] == mock_module
# After context, module should be unregistered
assert len(unregistered) == 1
assert unregistered[0] == mock_module
def test_pickle_local_modules_handles_import_errors(
self, caplog: pytest.LogCaptureFixture
):
"""Test that import errors are handled gracefully."""
@flow
def test_flow():
return "test"
with patch(
"prefect._experimental.bundles._discover_local_dependencies",
return_value={"nonexistent_module"},
):
with _pickle_local_modules_by_value(test_flow):
pass
# Check that a debug message was logged about the failure
assert "Failed to register module nonexistent_module" in caplog.text
def test_discover_deeply_nested_local_dependencies(self, tmp_path: Path):
"""Test that local dependencies are discovered recursively through multiple levels.
This tests the scenario where:
- flow_module imports module_b
- module_b imports module_c
- module_c imports module_d
All modules should be discovered, including module_d which is 3 levels deep.
"""
# Create temporary package structure with deep nesting
package_root = tmp_path / "test_packages"
package_root.mkdir()
# Create flow_module package
flow_pkg = package_root / "flow_module"
flow_pkg.mkdir()
(flow_pkg / "__init__.py").write_text("")
# Create module_b package
module_b_pkg = package_root / "module_b"
module_b_pkg.mkdir()
(module_b_pkg / "__init__.py").write_text("")
# Create module_c package
module_c_pkg = package_root / "module_c"
module_c_pkg.mkdir()
(module_c_pkg / "__init__.py").write_text("")
# Create module_d package (deepest level)
module_d_pkg = package_root / "module_d"
module_d_pkg.mkdir()
(module_d_pkg / "__init__.py").write_text("")
# Create module_d with a simple function
(module_d_pkg / "utils.py").write_text("""
def function_d():
return "d"
""")
# Create module_c that imports from module_d
(module_c_pkg / "utils.py").write_text("""
from module_d.utils import function_d
def function_c():
return function_d()
""")
# Create module_b that imports from module_c
(module_b_pkg / "utils.py").write_text("""
from module_c.utils import function_c
def function_b():
return function_c()
""")
# Create flow_module that imports from module_b
(flow_pkg / "my_flow.py").write_text("""
from module_b.utils import function_b
from prefect import flow
@flow
def test_flow():
return function_b()
""")
# Add package_root to sys.path so modules can be imported
sys.path.insert(0, str(package_root))
try:
# Import the flow module and get the flow
import flow_module.my_flow
flow_obj = flow_module.my_flow.test_flow
# Discover dependencies
deps = _discover_local_dependencies(flow_obj)
# All four modules should be discovered
assert "flow_module.my_flow" in deps, (
"Flow module itself should be discovered"
)
assert "module_b.utils" in deps, "First-level import should be discovered"
assert "module_c.utils" in deps, "Second-level import should be discovered"
assert "module_d.utils" in deps, "Third-level import should be discovered"
finally:
# Clean up sys.path and sys.modules
sys.path.remove(str(package_root))
for module in list(sys.modules.keys()):
if module.startswith(
("flow_module", "module_b", "module_c", "module_d")
):
del sys.modules[module]
|
TestLocalDependencyDiscovery
|
python
|
PyCQA__pylint
|
pylint/pyreverse/mermaidjs_printer.py
|
{
"start": 3767,
"end": 4506
}
|
class ____(MermaidJSPrinter):
"""Printer for MermaidJS diagrams wrapped in a html boilerplate."""
HTML_OPEN_BOILERPLATE = """<html>
<body>
<script src="https://cdn.jsdelivr.net/npm/mermaid/dist/mermaid.min.js"></script>
<div class="mermaid">
"""
HTML_CLOSE_BOILERPLATE = """
</div>
</body>
</html>
"""
GRAPH_INDENT_LEVEL = 4
def _open_graph(self) -> None:
self.emit(self.HTML_OPEN_BOILERPLATE)
for _ in range(self.GRAPH_INDENT_LEVEL):
self._inc_indent()
super()._open_graph()
def _close_graph(self) -> None:
for _ in range(self.GRAPH_INDENT_LEVEL):
self._dec_indent()
self.emit(self.HTML_CLOSE_BOILERPLATE)
|
HTMLMermaidJSPrinter
|
python
|
FactoryBoy__factory_boy
|
tests/test_django.py
|
{
"start": 3008,
"end": 3122
}
|
class ____(factory.django.DjangoModelFactory):
class Meta:
model = models.WithSignals
|
WithSignalsFactory
|
python
|
django-import-export__django-import-export
|
tests/core/tests/admin_integration/test_import_functionality.py
|
{
"start": 15745,
"end": 20940
}
|
class ____(AdminTestMixin, TransactionTestCase):
fixtures = ["author"]
def _is_str_in_response(
self,
filename,
input_format,
encoding=None,
str_in_response=None,
follow=False,
status_code=200,
):
response = self._do_import_post(
self.book_import_url,
filename,
input_format,
encoding=encoding,
follow=follow,
)
self.assertEqual(response.status_code, status_code)
if str_in_response is not None:
self.assertContains(response, str_in_response)
def _is_regex_in_response(
self,
filename,
input_format,
encoding=None,
regex_in_response=None,
follow=False,
status_code=200,
):
response = self._do_import_post(
self.book_import_url,
filename,
input_format,
encoding=encoding,
follow=follow,
)
self.assertEqual(response.status_code, status_code)
if regex_in_response is not None:
self.assertRegex(response.content.decode(), regex_in_response)
def test_import_action_create(self):
self._is_str_in_response(
"books.csv",
"0",
follow=True,
str_in_response="Import finished: 1 new, 0 updated, "
+ "0 deleted and 0 skipped books.",
)
self.assertEqual(1, Book.objects.count())
def test_import_action_error_on_save(self):
with mock.patch("core.models.Book.save") as mock_save:
mock_save.side_effect = ValueError("some unknown error")
response = self._do_import_post(self.book_import_url, "books.csv")
self.assertIn("some unknown error", response.content.decode())
@override_settings(IMPORT_EXPORT_USE_TRANSACTIONS=True)
def test_import_transaction_enabled_validation_error(self):
# with transactions enabled, a validation error should cause the entire
# import to be rolled back
self._do_import_post(self.book_import_url, "books-invalid-date.csv")
self.assertEqual(0, Book.objects.count())
@override_settings(IMPORT_EXPORT_USE_TRANSACTIONS=False)
def test_import_transaction_disabled_validation_error(self):
# with transactions disabled, a validation error should not cause the entire
# import to fail
self._do_import_post(self.book_import_url, "books-invalid-date.csv")
self.assertEqual(1, Book.objects.count())
@override_settings(IMPORT_EXPORT_USE_TRANSACTIONS=True)
def test_import_transaction_enabled_core_error(self):
# test that if we send a file with multiple rows,
# and transactions is enabled, a core error means that
# no instances are persisted
index = self._get_input_format_index("json")
with mock.patch("core.admin.BookResource.skip_row") as mock_skip:
mock_skip.side_effect = [None, ValueError("some unknown error"), None]
response = self._do_import_post(self.book_import_url, "books.json", index)
self.assertIn("some unknown error", response.content.decode())
self.assertEqual(0, Book.objects.count())
@override_settings(IMPORT_EXPORT_USE_TRANSACTIONS=False)
def test_import_transaction_disabled_core_error(self):
# with transactions disabled, a core (db constraint) error should not cause the
# entire import to fail
index = self._get_input_format_index("json")
with mock.patch("core.admin.BookResource.skip_row") as mock_skip:
mock_skip.side_effect = [None, ValueError("some unknown error"), None]
response = self._do_import_post(self.book_import_url, "books.json", index)
self.assertIn("some unknown error", response.content.decode())
self.assertEqual(2, Book.objects.count())
def test_import_action_mac(self):
self._is_str_in_response(
"books-mac.csv",
"0",
follow=True,
str_in_response="Import finished: 1 new, 0 updated, "
+ "0 deleted and 0 skipped books.",
)
def test_import_action_iso_8859_1(self):
self._is_str_in_response(
"books-ISO-8859-1.csv",
"0",
"ISO-8859-1",
follow=True,
str_in_response="Import finished: 1 new, 0 updated, "
+ "0 deleted and 0 skipped books.",
)
def test_import_action_decode_error(self):
# attempting to read a file with the incorrect encoding should raise an error
self._is_regex_in_response(
"books-ISO-8859-1.csv",
"0",
follow=True,
encoding="utf-8-sig",
regex_in_response=(
".*UnicodeDecodeError.* encountered " "while trying to read file"
),
)
def test_import_action_binary(self):
self._is_str_in_response(
"books.xls",
"1",
follow=True,
str_in_response="Import finished: 1 new, 0 updated, "
+ "0 deleted and 0 skipped books.",
)
|
TestImportSkipConfirm
|
python
|
great-expectations__great_expectations
|
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_price.py
|
{
"start": 1603,
"end": 3806
}
|
class ____(ColumnMapExpectation):
"""Expect column values to conform to valid price formats."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"well_formed_price": [
"£9.15",
"Price: $119.00",
"99€",
"Rp 1.550.000",
],
"malformed_price": [
"",
"Foo",
"50% OFF",
"$",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "well_formed_price"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "malformed_price"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_price"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": ["experimental", "hackathon", "typed-entities"],
"contributors": [
"@voidforall",
],
"requirements": ["price_parser"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidPrice().print_diagnostic_checklist()
|
ExpectColumnValuesToBeValidPrice
|
python
|
bokeh__bokeh
|
src/bokeh/models/widgets/inputs.py
|
{
"start": 17968,
"end": 19750
}
|
class ____(InputWidget):
''' Color palette select widget.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
value = Required(String, help="""
The name of the initial or selected color palette.
""")
items = Required(Seq(Tuple(String, Seq(Color))), help="""
A selection of named color palettes to choose from.
""")
swatch_width = NonNegative(Int, default=100, help="""
The width of the UI element showing the preview of a palette, in pixels.
""")
swatch_height = Either(Auto, NonNegative(Int), default="auto", help="""
The height of the UI element showing the preview of a palette, either in
pixels or automatically adjusted.
""")
ncols = Positive(Int, default=1, help="""
The number of columns to split the display of the palettes into.
""")
#-----------------------------------------------------------------------------
# Legacy API
#-----------------------------------------------------------------------------
def ColorMap(*args: Any, **kwargs: Any) -> PaletteSelect:
''' Color palette select widget.
.. deprecated:: 3.4.0
Use ``PaletteSelect`` widget instead.
'''
from ...util.deprecation import deprecated
deprecated((3, 4, 0), "ColorMap widget", "PaletteSelect widget")
return PaletteSelect(*args, **kwargs)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
PaletteSelect
|
python
|
eth-brownie__brownie
|
brownie/network/middlewares/catch_tx_revert.py
|
{
"start": 195,
"end": 1144
}
|
class ____(BrownieMiddlewareABC):
"""
Middleware to handle reverting transactions, bypasses web3 error formatting.
As of web3.py version 5.13.0, a new error formatting middleware was added by default
`raise_solidity_error_on_revert` which when a `eth_call` or `eth_estimateGas` tx
raises a `ContractLogicError` instead of providing us with an RPCError dictionary.
"""
@classmethod
def get_layer(cls, w3: Web3, network_type: str) -> Optional[int]:
return -1
def process_request(
self,
make_request: Callable,
method: RPCEndpoint,
params: Sequence[Any],
) -> Dict[str, Any]:
"""Raise a ValueError when RPC.eth_call or RPC.eth_estimateGas errors."""
result = make_request(method, params)
if method in {"eth_call", "eth_estimateGas"} and "error" in result:
raise ValueError(result["error"])
return result
|
TxRevertCatcherMiddleware
|
python
|
etianen__django-reversion
|
tests/test_app/tests/test_api.py
|
{
"start": 433,
"end": 574
}
|
class ____(TestModelMixin, TestBase):
def testModelSave(self):
TestModel.objects.create()
self.assertNoRevision()
|
SaveTest
|
python
|
scrapy__scrapy
|
scrapy/exceptions.py
|
{
"start": 663,
"end": 749
}
|
class ____(Exception):
"""Request the spider not to be closed yet"""
|
DontCloseSpider
|
python
|
tensorflow__tensorflow
|
tensorflow/python/tpu/tpu_outside_compilation_test.py
|
{
"start": 17406,
"end": 26482
}
|
class ____(test.TestCase,
parameterized.TestCase):
def setUp(self):
super(OutsideCompilationOnUnsupportedOpTest, self).setUp()
config.set_soft_device_placement(True)
def testStringOpWithManualOutsideCompilation(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step(x):
def computation(x):
return tpu_replication.outside_compilation(
computation_with_string_ops, x)
return strategy.run(computation, args=(x,))
self.assertAllEqual(
strategy.experimental_local_results(train_step(0)),
constant_op.constant(10, shape=(strategy.num_replicas_in_sync)))
def testStringOpWithAutoOutsideCompilation(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step(x):
def computation(x):
return computation_with_string_ops(x)
return strategy.run(computation, args=(x,))
self.assertAllEqual(
strategy.experimental_local_results(train_step(0)),
constant_op.constant(10, shape=(strategy.num_replicas_in_sync)))
# Regression test for b/180509859.
def testImageSummary(self):
strategy = get_tpu_strategy()
def run():
@def_function.function
def sample_sequence():
bsz = 3
max_length = 32 * 32
def f():
def body(step, tokens):
next_token = random_ops.random_uniform([bsz])
tokens = tokens.write(step, next_token)
return (step + 1, tokens)
def cond_fn(step, tokens):
del tokens
return math_ops.less(step, max_length)
tokens_var = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
size=max_length,
dynamic_size=False,
clear_after_read=False,
element_shape=(bsz,),
name="tokens_accumulator",
)
step = constant_op.constant(0)
step, tokens_var = while_loop.while_loop(cond_fn, body,
[step, tokens_var])
image_flat = array_ops.transpose(tokens_var.stack(), [1, 0])
image = array_ops.tile(
array_ops.reshape(image_flat, [bsz, 32, 32, 1]), [1, 1, 1, 3])
image_summary_v2.image("image_sample", image,
constant_op.constant(5, dtype=dtypes.int64))
return strategy.run(f)
sample_sequence()
logdir = tempfile.mkdtemp()
summary_writer = summary.create_file_writer(logdir, flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
run()
events = _events_from_logdir(self, logdir)
decoded_image = image_ops.decode_png(
events[1].summary.value[0].tensor.string_val[2]).numpy()
# Ensure that non-zero values were written to the image summary.
self.assertNotAllEqual(
array_ops.zeros((3072,), dtype=dtypes.float32),
list(decoded_image.flat))
def testSummaryWithAutoOutsideCompilation(self):
strategy = get_tpu_strategy()
def host_computation(x):
scalar_summary_v2.scalar("x", x, step=0)
return x * 2.0
@def_function.function
def step():
def computation(x):
x = x + 1.0
y = host_computation(x)
return y + 1.0
return strategy.run(computation, args=(2.0,))
logdir = tempfile.mkdtemp()
summary_writer = summary.create_file_writer(logdir, flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(7., shape=(strategy.num_replicas_in_sync)))
events = _events_from_logdir(self, logdir)
# There will be 2 entries: 1 summary file header entry, and 1 entry
# written by host.
self.assertLen(events, 2)
self.assertEqual(events[1].summary.value[0].tag, "x")
def testNestedFunctionScalarSummary(self):
strategy = get_tpu_strategy()
def host_computation(x):
scalar_summary_v2.scalar("x", x, step=0)
return x * 2.0
@def_function.function
def step():
@def_function.function
def computation(x):
x = x + 1.0
y = host_computation(x)
return y + 1.0
return strategy.run(computation, args=(2.0,))
logdir = tempfile.mkdtemp()
summary_writer = summary.create_file_writer(logdir, flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(7., shape=(strategy.num_replicas_in_sync)))
events = _events_from_logdir(self, logdir)
# There will be 2 entries: 1 summary file header entry, and 1 entry
# written by host.
self.assertLen(events, 2)
self.assertEqual(events[1].summary.value[0].tag, "x")
def testHistogramSummaryWithAutoOutsideCompilation(self):
strategy = get_tpu_strategy()
def host_computation(x):
histogram_summary_v2.histogram("x", x, step=0)
return x * 2.0
@def_function.function
def step():
def computation(x):
x = x + 1.0
y = host_computation(x)
return y + 1.0
return strategy.run(computation, args=(2.0,))
logdir = tempfile.mkdtemp()
summary_writer = summary.create_file_writer(logdir, flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(7., shape=(strategy.num_replicas_in_sync)))
events = _events_from_logdir(self, logdir)
# There will be 2 entries: 1 summary file header entry, and 1 entry
# written by host.
self.assertLen(events, 2)
self.assertEqual(events[1].summary.value[0].tag, "x")
@parameterized.parameters((True), (False))
def testSummaryControlFlowIfWithAutoOutsideCompilation(
self, take_true_branch):
strategy = get_tpu_strategy()
@def_function.function
def step():
def computation(x):
x = x + 1.0
if x < 5:
scalar_summary_v2.scalar("x", x, step=0)
x = x * 2.0
return x + 1.0
if take_true_branch:
return strategy.run(computation, args=(2.0,))
else:
return strategy.run(computation, args=(10.0,))
logdir = tempfile.mkdtemp()
summary_writer = summary.create_file_writer(logdir, flush_millis=10000)
output_value = 12.
if take_true_branch:
output_value = 7.
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(
output_value, shape=(strategy.num_replicas_in_sync)))
if take_true_branch:
events = _events_from_logdir(self, logdir)
# There will be 2 entries: 1 summary file header entry, and 1 entry
# written by host.
#
self.assertLen(events, 2)
self.assertEqual(events[1].summary.value[0].tag, "cond/x")
def testAutoOutsideCompilationWithFunctionalNodes(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step(a, b):
def fn(a, b):
fn1 = lambda: computation_with_string_ops(a * 100)
fn2 = lambda: computation_with_string_ops(a)
pred = math_ops.greater_equal(a, b)
result = array_ops.identity(
cond.cond(pred, fn1, fn2),
name="uncompilable_control_flow")
return result
return strategy.run(fn, args=(a, b))
self.assertAllEqual(
strategy.experimental_local_results(train_step(0.0, -1.0)),
constant_op.constant(10, shape=(strategy.num_replicas_in_sync)))
def testRandomOpsWithAutoOutsideCompilation(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step():
def computation():
return random_ops.random_normal(shape=[1, 2, 3])
return strategy.run(computation, args=())
self.assertAllEqual(
strategy.experimental_local_results(train_step())[0].shape, [1, 2, 3])
def testOutsideCompilationWithTPUPartitionedCallOp(self):
"""Tests that control flow with TPUPartitionedCall including outside_compilation works."""
get_tpu_strategy()
def host_computation(x):
return x + 1
@def_function.function()
def train_step(x):
x2 = x + 5.0
logging_ops.print_v2(x2)
x2 = tpu_replication.outside_compilation(host_computation, x2)
return x2 + 4.0
tpu_fn = _rewrite_func_wrapper(train_step)
partitioned_tpu_fn = _tpu_partitioned_call_wrapper(tpu_fn)
concrete = partitioned_tpu_fn.get_concrete_function(
x=tensor.TensorSpec(
shape=(1), dtype=dtypes.float32, name="input_tensor"))
self.assertIsInstance(
concrete(array_ops.ones((1), dtype=dtypes.float32))[0], tensor.Tensor)
if __name__ == "__main__":
test.main()
|
OutsideCompilationOnUnsupportedOpTest
|
python
|
pandas-dev__pandas
|
pandas/tests/indexes/timedeltas/test_scalar_compat.py
|
{
"start": 355,
"end": 4851
}
|
class ____:
def test_tdi_total_seconds(self):
# GH#10939
# test index
rng = timedelta_range("1 days, 10:11:12.100123456", periods=2, freq="s")
expt = [
1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9,
1 * 86400 + 10 * 3600 + 11 * 60 + 13 + 100123456.0 / 1e9,
]
tm.assert_almost_equal(rng.total_seconds(), Index(expt))
# test Series
ser = Series(rng)
s_expt = Series(expt, index=[0, 1])
tm.assert_series_equal(ser.dt.total_seconds(), s_expt)
# with nat
ser[1] = np.nan
s_expt = Series(
[1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9, np.nan],
index=[0, 1],
)
tm.assert_series_equal(ser.dt.total_seconds(), s_expt)
def test_tdi_total_seconds_all_nat(self):
# with both nat
ser = Series([np.nan, np.nan], dtype="timedelta64[ns]")
result = ser.dt.total_seconds()
expected = Series([np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_tdi_round(self):
td = timedelta_range(start="16801 days", periods=5, freq="30Min")
elt = td[1]
expected_rng = TimedeltaIndex(
[
Timedelta("16801 days 00:00:00"),
Timedelta("16801 days 00:00:00"),
Timedelta("16801 days 01:00:00"),
Timedelta("16801 days 02:00:00"),
Timedelta("16801 days 02:00:00"),
]
)
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq="h"), expected_rng)
assert elt.round(freq="h") == expected_elt
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
td.round(freq="foo")
with pytest.raises(ValueError, match=msg):
elt.round(freq="foo")
msg = "<MonthEnd> is a non-fixed frequency"
with pytest.raises(ValueError, match=msg):
td.round(freq="ME")
with pytest.raises(ValueError, match=msg):
elt.round(freq="ME")
@pytest.mark.parametrize(
"freq,msg",
[
("YE", "<YearEnd: month=12> is a non-fixed frequency"),
("ME", "<MonthEnd> is a non-fixed frequency"),
("foobar", "Invalid frequency: foobar"),
],
)
def test_tdi_round_invalid(self, freq, msg):
t1 = timedelta_range("1 days", periods=3, freq="1 min 2 s 3 us")
with pytest.raises(ValueError, match=msg):
t1.round(freq)
with pytest.raises(ValueError, match=msg):
# Same test for TimedeltaArray
t1._data.round(freq)
# TODO: de-duplicate with test_tdi_round
def test_round(self):
t1 = timedelta_range("1 days", periods=3, freq="1 min 2 s 3 us")
t2 = -1 * t1
t1a = timedelta_range("1 days", periods=3, freq="1 min 2 s")
t1c = TimedeltaIndex(np.array([1, 1, 1], "m8[D]")).as_unit("ns")
# note that negative times round DOWN! so don't give whole numbers
msg = "'d' is deprecated and will be removed in a future version."
with tm.assert_produces_warning(Pandas4Warning, match=msg):
for freq, s1, s2 in [
("ns", t1, t2),
("us", t1, t2),
(
"ms",
t1a,
TimedeltaIndex(
["-1 days +00:00:00", "-2 days +23:58:58", "-2 days +23:57:56"]
),
),
(
"s",
t1a,
TimedeltaIndex(
["-1 days +00:00:00", "-2 days +23:58:58", "-2 days +23:57:56"]
),
),
("12min", t1c, TimedeltaIndex(["-1 days", "-1 days", "-1 days"])),
("h", t1c, TimedeltaIndex(["-1 days", "-1 days", "-1 days"])),
("d", t1c, -1 * t1c),
]:
r1 = t1.round(freq)
r2 = t2.round(freq)
tm.assert_index_equal(r1, s1)
tm.assert_index_equal(r2, s2)
def test_components(self):
rng = timedelta_range("1 days, 10:11:12", periods=2, freq="s")
rng.components
# with nat
s = Series(rng)
s[1] = np.nan
result = s.dt.components
assert not result.iloc[0].isna().all()
assert result.iloc[1].isna().all()
|
TestVectorizedTimedelta
|
python
|
plotly__plotly.py
|
plotly/graph_objs/layout/yaxis/_tickfont.py
|
{
"start": 235,
"end": 9884
}
|
class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.yaxis"
_path_str = "layout.yaxis.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the tick font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.yaxis.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super().__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.yaxis.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.yaxis.Tickfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Tickfont
|
python
|
chroma-core__chroma
|
chromadb/segment/distributed/__init__.py
|
{
"start": 238,
"end": 1003
}
|
class ____(Component):
"""A segment directory is a data interface that manages the location of segments. Concretely, this
means that for distributed chroma, it provides the grpc endpoint for a segment."""
@abstractmethod
def get_segment_endpoints(self, segment: Segment, n: int) -> List[str]:
"""Return the segment residences for a given segment ID. Will return at most n residences.
Should only return less than n residences if there are less than n residences available.
"""
@abstractmethod
def register_updated_segment_callback(
self, callback: Callable[[Segment], None]
) -> None:
"""Register a callback that will be called when a segment is updated"""
pass
@dataclass
|
SegmentDirectory
|
python
|
pytorch__pytorch
|
test/export/test_draft_export.py
|
{
"start": 597,
"end": 25619
}
|
class ____(TestCase):
def setUp(self):
super().setUp()
init_torchbind_implementations()
self.torch_bind_ops = [
torch.ops._TorchScriptTesting.queue_pop,
torch.ops._TorchScriptTesting.queue_push,
torch.ops._TorchScriptTesting.queue_size,
]
def tearDown(self):
return
def test_missing_meta_kernel_custom_op_basic(self):
with torch.library._scoped_library("mylib", "FRAGMENT"):
@torch.library.custom_op("mylib::foo2", mutates_args={})
def foo2_impl(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return a + b
class M(torch.nn.Module):
def forward(self, a, b):
res = torch.ops.mylib.foo2(a, b)
return res
inp = (torch.ones(3, 3), torch.ones(3, 3))
ep = draft_export(M(), inp)
report = ep._report
self.assertEqual(len(report.failures), 1)
self.assertEqual(
report.failures[0].failure_type, FailureType.MISSING_FAKE_KERNEL
)
inp = (torch.randn(3, 3), torch.randn(3, 3))
self.assertEqual(ep.module()(*inp), M()(*inp))
with torch._library.fake_profile.unsafe_generate_fake_kernels(
report.op_profiles
):
ep.run_decompositions()
def test_missing_meta_kernel_impl(self):
with torch.library._scoped_library("mylib", "FRAGMENT") as lib:
torch.library.define(
"mylib::foo",
"(Tensor a, Tensor b) -> Tensor",
tags=torch.Tag.pt2_compliant_tag,
lib=lib,
)
@torch.library.impl("mylib::foo", "cpu", lib=lib)
def foo_impl(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return a + b
class M(torch.nn.Module):
def forward(self, a, b):
res = torch.ops.mylib.foo(a, b)
res = torch.ops.mylib.foo(res, b)
return res
inp = (torch.ones(3, 3), torch.ones(3, 3))
ep = draft_export(M(), inp)
report = ep._report
self.assertEqual(len(report.failures), 1)
self.assertEqual(
report.failures[0].failure_type, FailureType.MISSING_FAKE_KERNEL
)
inp = (torch.randn(3, 3), torch.randn(3, 3))
self.assertEqual(ep.module()(*inp), M()(*inp))
self.assertEqual(len(report.op_profiles), 1)
self.assertEqual(len(report.op_profiles["mylib.foo.default"]), 1)
print(report.op_profiles)
with torch._library.fake_profile.unsafe_generate_fake_kernels(
report.op_profiles
):
ep = ep.run_decompositions()
self.assertEqual(ep.module()(*inp), M()(*inp))
def test_missing_meta_kernel_custom_op_multiple_profiles(self):
with torch.library._scoped_library("mylib", "FRAGMENT"):
@torch.library.custom_op("mylib::foo3", mutates_args={})
def foo3_impl(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return a + b
class M(torch.nn.Module):
def forward(self, a, b, c, d):
res1 = torch.ops.mylib.foo3(a, b)
res2 = torch.ops.mylib.foo3(c, d)
return res1, res2
inp = (
torch.ones(3, 4),
torch.ones(3, 4),
torch.ones(2, 3, 4),
torch.ones(2, 3, 4),
)
ep = draft_export(M(), inp)
report = ep._report
self.assertEqual(len(report.failures), 1)
self.assertEqual(
report.failures[0].failure_type, FailureType.MISSING_FAKE_KERNEL
)
self.assertEqual(len(report.op_profiles), 1)
self.assertEqual(len(report.op_profiles["mylib.foo3.default"]), 2)
with torch._library.fake_profile.unsafe_generate_fake_kernels(
report.op_profiles
):
ep.run_decompositions()
def test_missing_meta_kernel_custom_op_update_profile(self):
with torch.library._scoped_library("mylib", "FRAGMENT"):
@torch.library.custom_op("mylib::foo8", mutates_args={})
def foo8_impl(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return a + b
class M(torch.nn.Module):
def forward(self, a, b):
res = torch.ops.mylib.foo8(a, b)
return res
inp = (
torch.ones(3, 4),
torch.ones(3, 4),
)
ep = draft_export(M(), inp)
report = ep._report
self.assertEqual(len(report.op_profiles), 1)
self.assertEqual(len(report.op_profiles["mylib.foo8.default"]), 1)
new_inp = (
torch.ones(2, 3, 4),
torch.ones(2, 3, 4),
)
with torch._library.fake_profile.unsafe_generate_fake_kernels(
report.op_profiles
):
with FakeTensorMode(allow_non_fake_inputs=True, shape_env=ShapeEnv()):
torch.ops.mylib.foo8(*inp)
with self.assertRaisesRegex(
RuntimeError, "no profiles match the given inputs"
):
torch.ops.mylib.foo8(*new_inp)
ep = draft_export(M(), new_inp)
report = ep._report
self.assertEqual(len(report.op_profiles), 1)
self.assertEqual(len(report.op_profiles["mylib.foo8.default"]), 1)
with (
torch._library.fake_profile.unsafe_generate_fake_kernels(
report.op_profiles
),
FakeTensorMode(allow_non_fake_inputs=True, shape_env=ShapeEnv()),
):
torch.ops.mylib.foo8(*new_inp)
# Existing registration has been updated to match the new
# profile traced with draft-export
with self.assertRaisesRegex(
RuntimeError, "no profiles match the given inputs"
):
torch.ops.mylib.foo8(*inp)
@unittest.skipIf(not torch.cuda.is_available(), "Requires cuda")
def test_missing_meta_kernel_guard(self):
with torch.library._scoped_library("mylib", "FRAGMENT"):
@torch.library.custom_op("mylib::foo4", mutates_args={})
def foo4_impl(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return a + b
class M(torch.nn.Module):
def forward(self, a, b):
res1 = torch.ops.mylib.foo4(a, b)
return res1
inp = (
torch.ones(3, 4),
torch.ones(3, 4),
)
ep = draft_export(
M(),
inp,
dynamic_shapes={
"a": {0: Dim.DYNAMIC, 1: Dim.DYNAMIC},
"b": {0: Dim.DYNAMIC, 1: Dim.DYNAMIC},
},
)
inp = (torch.randn(2, 3), torch.randn(2, 3))
self.assertEqual(ep.module()(*inp), M()(*inp))
m = ep.module()
with self.assertRaisesRegex(RuntimeError, "Tensor device mismatch!"):
bad_device_inps = (
torch.randn(2, 3, device=torch.device("cuda")),
torch.randn(2, 3, device=torch.device("cuda")),
)
m(*bad_device_inps)
with self.assertRaisesRegex(RuntimeError, "Tensor dtype mismatch!"):
bad_dtype_inps = (
torch.randn(2, 3, dtype=torch.float16),
torch.randn(2, 3, dtype=torch.float16),
)
m(*bad_dtype_inps)
def test_fake_infer_dense_in_memory_check(self):
with torch.library._scoped_library("mylib", "FRAGMENT"):
@torch.library.custom_op("mylib::foo5", mutates_args={})
def foo5_impl(a: torch.Tensor) -> torch.Tensor:
return a * 2
@torch.library.custom_op("mylib::foo6", mutates_args={})
def foo6_impl(a: torch.Tensor) -> torch.Tensor:
return (a * 2)[:, :-1, :-1] # not dense in memory
@torch.library.custom_op("mylib::foo7", mutates_args={})
def foo7_impl(a: torch.Tensor) -> torch.Tensor:
return (a * 2)[:, 1:-1, :] # non-zero storage offset
class Foo(torch.nn.Module):
def forward(self, x, opt):
if opt == 0:
return torch.ops.mylib.foo5(x)
elif opt == 1:
return torch.ops.mylib.foo6(x)
else:
return torch.ops.mylib.foo7(x)
draft_export(Foo(), (torch.randn(80, 4, 4), 0))
draft_export(Foo(), (torch.randn(80, 1, 4), 0))
draft_export(Foo(), (torch.randn(1, 4, 1, 1, 4, 1, 4), 0))
with self.assertRaisesRegex(
RuntimeError,
"a return was not dense in memory",
):
draft_export(Foo(), (torch.randn(4, 6, 8), 1))
with self.assertRaisesRegex(
RuntimeError,
"a return has a non-zero storage offset",
):
draft_export(Foo(), (torch.randn(4, 6, 8), 2))
def test_data_dependent_failure(self):
with torch.library._scoped_library("mylib", "FRAGMENT") as lib:
torch.library.define(
"mylib::foo1",
"(Tensor a, Tensor b) -> Tensor",
tags=torch.Tag.pt2_compliant_tag,
lib=lib,
)
@torch.library.impl("mylib::foo1", "cpu", lib=lib)
def foo_impl(a, b):
return a + b
class M(torch.nn.Module):
def forward(self, a, b, c):
res = torch.ops.mylib.foo1(a, b)
c_item = c.item()
if c_item > 0:
return res[:c_item]
inp = (torch.ones(3, 3), torch.ones(3, 3), torch.tensor(3))
ep = draft_export(M(), inp)
report = ep._report
self.assertTrue(len(report.failures) > 0)
self.assertEqual(
report.failures[0].failure_type, FailureType.MISSING_FAKE_KERNEL
)
self.assertEqual(
report.failures[1].failure_type, FailureType.DATA_DEPENDENT_ERROR
)
inp = (torch.randn(3, 3), torch.randn(3, 3), torch.tensor(2))
self.assertEqual(ep.module()(*inp), M()(*inp))
def test_unbacked_div_mod_replacement(self):
class M(torch.nn.Module):
def forward(self, x):
x = torch.zeros(x.item())
x = x.unsqueeze(0).repeat(10, 2)
return x.view(-1, 2, 2345)
ep = draft_export(M(), (torch.tensor([938]),))
report = ep._report
self.assertEqual(len(report.failures), 0)
def test_dedup_data_dependent_failure(self):
class M(torch.nn.Module):
def forward(self, x, y, z):
res = 0
for v in [x, y]:
b = v.item()
if b > 10:
res += v * b
else:
res += v + b
return z * res
inp = (torch.tensor(5), torch.tensor(3), torch.tensor(2))
ep = draft_export(M(), inp)
report = ep._report
self.assertEqual(len(report.failures), 1)
self.assertEqual(
report.failures[0].failure_type, FailureType.DATA_DEPENDENT_ERROR
)
inp = (torch.tensor(4), torch.tensor(2), torch.tensor(6))
self.assertEqual(ep.module()(*inp), M()(*inp))
# the fake tensors on node.meta["val"] should have real_tensor
gm = ep.module()
tensors = [
node.meta.get("val").real_tensor
for node in gm.graph.nodes
if node.op == "placeholder"
]
self.assertTrue(all(isinstance(t, torch.Tensor) for t in tensors))
def test_complex_data_dependent_expr(self):
class M(torch.nn.Module):
def forward(self, x, y):
a = x.item()
a = -a
a = a // 3
a = a + 5
z = torch.cat([y, y])
if a > 0:
return z[:a]
ep = draft_export(
M(),
(torch.tensor(6), torch.randn(5)),
dynamic_shapes={"x": None, "y": {0: Dim.DYNAMIC}},
)
report = ep._report
self.assertTrue(len(report.failures) > 0)
self.assertEqual(
report.failures[0].failure_type, FailureType.DATA_DEPENDENT_ERROR
)
for _ep in [ep, ep.run_decompositions()]:
# unbacked bindings
unbacked_binding_symbols = set()
for node in _ep.graph.nodes:
if bindings := node.meta.get("unbacked_bindings"):
unbacked_binding_symbols.update(bindings.keys())
self.assertEqual(len(unbacked_binding_symbols), 2)
def test_offsets(self):
class M(torch.nn.Module):
def forward(self, x):
a = x.item()
if a == 0:
raise RuntimeError("bad")
return x * a
inp = (torch.tensor(3),)
draft_export(M(), inp)
def test_shape_failure(self):
class M(torch.nn.Module):
def forward(self, a):
assert a.shape[0] == 3
return a * a
inp = (torch.ones(3, 3),)
ep = draft_export(
M(),
inp,
dynamic_shapes={"a": {0: Dim("a0")}},
prefer_deferred_runtime_asserts_over_guards=True,
)
report = ep._report
self.assertEqual(len(report.failures), 1)
self.assertEqual(report.failures[0].failure_type, FailureType.GUARD_ADDED)
inp = (torch.randn(3, 3),)
self.assertEqual(ep.module()(*inp), M()(*inp))
inp = (torch.randn(4, 3),)
with self.assertRaisesRegex(
AssertionError,
re.escape("Guard failed: a.size()[0] <= 3"),
):
# expected <= 3, but got 4
ep.module()(*inp)
def test_side_effect1(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("a", torch.tensor(2))
def forward(self, b):
a_item = self.a.item()
if a_item == 2:
res = a_item * b
else:
res = (a_item + 1) * b
self.a.add_(1)
a_item = self.a.item()
if a_item == 3:
res = a_item * res
else:
res = (a_item + 1) * res
return res
inp = (torch.ones(3, 3),)
mod = M()
ep = draft_export(mod, inp)
self.assertEqual(mod.a, torch.tensor(2))
FileCheck().check_count("torch.ops.aten.add.default", 0, exactly=True).run(
ep.graph_module.code
)
def test_side_effect_inps(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
x.sin_()
return x
inp = (torch.ones(3, 3),)
ep = draft_export(M(), inp)
report = ep._report
self.assertTrue(report.successful())
self.assertEqual(inp[0], torch.ones(3, 3))
def test_masked_linear(self):
class M(torch.nn.Module):
def forward(self, x, mask, weight, bias):
masked = x[mask != 0, :, :]
return torch.nn.functional.linear(masked, weight, bias)
x = torch.zeros(10)
x[0] += 1
inp = (torch.randn(10, 8, 7), x, torch.randn(25, 7), torch.randn(25))
draft_ep = draft_export(M(), inp)
ep = export(M(), inp)
self.assertEqual(draft_ep.module()(*inp), ep.module()(*inp))
x[2] += 1
x[3] += 1
self.assertEqual(draft_ep.module()(*inp), ep.module()(*inp))
def test_torchbind(self):
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(2, 2)
def forward(self, tq, x):
x_cos = tq.pop() + tq.float_size() + self.linear(x)
if tq.is_empty():
x_sin = self.linear(tq.pop()) - tq.size() + x
else:
x_sin = tq.pop() + tq.size() + x
return x_sin, x_cos, tq
mod = Model()
tq = _empty_tensor_queue()
tq2 = copy.deepcopy(tq)
a = torch.randn(2, 2)
b = torch.randn(2, 2)
tq.push(a)
tq.push(b)
tq3 = copy.deepcopy(tq)
inp = (tq, torch.randn(2, 2))
ep = draft_export(mod, inp)
report = ep._report
self.assertTrue(report.successful())
self.assertEqual(tq2.size(), 0)
self.assertEqual(tq3.size(), 2)
self.assertEqual(tq.size(), 2)
def test_override_size_and_dtype_mismatched_fake_kernels(self):
with torch.library._scoped_library("mylib", "FRAGMENT"):
class M(torch.nn.Module):
def forward(self, a):
return torch.ops.mylib.foo9(a)
@torch.library.custom_op("mylib::foo9", mutates_args={})
def foo(a: torch.Tensor) -> list[torch.Tensor]:
x = a * 2
y = a.repeat(2, 2)
z = a.to(torch.bfloat16)
return [x, y, z]
@torch.library.register_fake("mylib::foo9")
def foo_fake_impl(a):
x = torch.empty_like(a) # good
y = torch.empty_like(a) # size mismatch
z = torch.empty_like(a) # dtype mismatch
return [x, y, z]
mod = M()
inputs = (torch.randn(3, 3),)
with self.assertRaises(RuntimeError):
with torch._functorch.config.patch(
fake_tensor_propagate_real_tensors=True
):
export(mod, inputs, strict=True)
ep = draft_export(mod, inputs)
report = ep._report
for ep_out, eager_out in zip(ep.module()(*inputs), mod(*inputs)):
self.assertTrue(torch.allclose(ep_out, eager_out))
self.assertEqual(ep_out.dtype, eager_out.dtype)
self.assertEqual(len(report.failures), 2)
self.assertEqual(
report.failures[0].failure_type, FailureType.MISMATCHED_FAKE_KERNEL
)
self.assertEqual(
report.failures[1].failure_type, FailureType.MISMATCHED_FAKE_KERNEL
)
self.assertEqual(
sorted([f.data["reason"] for f in report.failures]),
[
"Dtypes torch.bfloat16 and torch.float32 are not equal!",
"mismatch between fake value 3 and real value 6 ",
],
)
with torch._library.fake_profile.unsafe_generate_fake_kernels(
report.op_profiles
):
ep.run_decompositions()
def test_override_incorrectly_aliasing_kernel(self):
with torch.library._scoped_library("mylib", "FRAGMENT"):
@torch.library.custom_op("mylib::foo10", mutates_args={})
def foo(a: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
return a * 2, a + 2
@torch.library.register_fake("mylib::foo10")
def foo_fake_impl(a):
return a, torch.empty_like(a) # incorrectly aliasing
class M(torch.nn.Module):
def forward(self, a):
return torch.ops.mylib.foo10(a)
mod = M()
inputs = (torch.randn(3, 3),)
with self.assertRaisesRegex(
RuntimeError,
"Real tensor propagation found an aliasing mismatch",
):
with torch._functorch.config.patch(
fake_tensor_propagate_real_tensors=True
):
export(mod, inputs, strict=True)
ep = draft_export(mod, inputs)
report = ep._report
for ep_out, eager_out in zip(
tree_leaves(ep.module()(*inputs)), tree_leaves(mod(*inputs))
):
self.assertTrue(torch.allclose(ep_out, eager_out))
self.assertEqual(ep_out.dtype, eager_out.dtype)
self.assertEqual(len(report.failures), 1)
self.assertEqual(
report.failures[0].failure_type, FailureType.MISMATCHED_FAKE_KERNEL
)
self.assertTrue(
"Mismatched aliasing spec between fake kernel and real kernel"
in report.failures[0].data["reason"]
)
def test_override_mismatched_fake_kernel_with_unbacked_symbols(self):
with torch.library._scoped_library("mylib", "FRAGMENT"):
@torch.library.custom_op("mylib::foo11", mutates_args={})
def foo11(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return a[b.item()].to(torch.bfloat16)
@torch.library.register_fake("mylib::foo11")
def foo_fake_impl(a, b):
ctx = torch.library.get_ctx()
u = ctx.new_dynamic_size()
return torch.empty(u, a.shape[1], dtype=a.dtype)
class M(torch.nn.Module):
def forward(self, a, b):
return torch.ops.mylib.foo11(a, b)
mod = M()
inputs = (torch.randn(100, 4), torch.tensor(10))
ep = draft_export(mod, inputs)
report = ep._report
for ep_out, eager_out in zip(ep.module()(*inputs), mod(*inputs)):
self.assertTrue(torch.allclose(ep_out, eager_out))
self.assertEqual(ep_out.dtype, eager_out.dtype)
self.assertEqual(len(report.failures), 1)
self.assertEqual(
report.failures[0].failure_type, FailureType.MISMATCHED_FAKE_KERNEL
)
self.assertEqual(
report.failures[0].data["reason"],
"Dtypes torch.bfloat16 and torch.float32 are not equal!",
)
with torch._library.fake_profile.unsafe_generate_fake_kernels(
report.op_profiles
):
ep.run_decompositions()
# https://github.com/pytorch/pytorch/issues/140625
@unittest.skipIf(IS_WINDOWS, "aoti_compile_and_package not supported on Windows")
def test_constantify_unbacked_symbol(self):
class M(torch.nn.Module):
def forward(self, x, y):
xt = torch.tensor(x.shape)
u0 = xt[0].item()
return y * torch.arange(u0)
mod = M()
example_inputs = (torch.randn(3, 5), torch.randn(3))
draft_ep = draft_export(mod, example_inputs)
with tempfile.NamedTemporaryFile(suffix=".pt2") as f:
torch._inductor.aoti_compile_and_package(
draft_ep,
package_path=f.name,
)
@unittest.skipIf(
not torch.cuda.is_available()
or torch.cuda.get_device_properties(0).total_memory < 2**28,
"Requires 16 MB GPU memory to pass the test; setting it higher to catch violations",
)
def test_cuda_memory_usage(self):
# This used to OOM
class Foo(torch.nn.Module):
def forward(self, x):
for _ in range(100):
x = x + 1e-3
return x
# measure base usage
device = torch.device("cuda:0")
torch.cuda.reset_peak_memory_stats()
base_usage = torch.cuda.memory_allocated(device)
# usage with input tensor allocated
x = torch.randn(2**10, 2**10).to(device)
x_usage = torch.cuda.memory_allocated(device)
# draft export peak memory usage
draft_export(Foo(), (x,), strict=False)
peak_mem_usage = torch.cuda.memory_stats(device)["allocated_bytes.all.peak"]
# right now it's actually exactly 4x;
# I guess original tensor, 2 tensors per add op, 1 for clone stored in node.meta["val"]
self.assertTrue((peak_mem_usage - base_usage) <= (x_usage - base_usage) * 4.0)
if __name__ == "__main__":
run_tests()
|
TestDraftExport
|
python
|
getsentry__sentry
|
src/sentry/interfaces/stacktrace.py
|
{
"start": 3809,
"end": 11512
}
|
class ____(Interface):
grouping_variants = ["system", "app"]
@classmethod
def to_python(cls, data, **kwargs):
for key in (
"abs_path",
"colno",
"context_line",
"data",
"errors",
"filename",
"function",
"raw_function",
"image_addr",
"in_app",
"instruction_addr",
"addr_mode",
"lineno",
"module",
"package",
"platform",
"post_context",
"pre_context",
"source_link",
"symbol",
"symbol_addr",
"trust",
"vars",
"lock",
):
data.setdefault(key, None)
return super().to_python(data, **kwargs)
def to_json(self):
return prune_empty_keys(
{
"abs_path": self.abs_path or None,
"filename": self.filename or None,
"platform": self.platform or None,
"module": self.module or None,
"function": self.function or None,
"raw_function": self.raw_function or None,
"package": self.package or None,
"image_addr": self.image_addr,
"symbol": self.symbol,
"symbol_addr": self.symbol_addr,
"instruction_addr": self.instruction_addr,
"addr_mode": self.addr_mode,
"trust": self.trust,
"in_app": self.in_app,
"context_line": self.context_line,
"pre_context": self.pre_context or None,
"post_context": self.post_context or None,
"vars": self.vars or None,
"data": self.data or None,
"errors": self.errors or None,
"lineno": self.lineno,
"colno": self.colno,
"lock": self.lock,
"source_link": self.source_link or None,
}
)
def get_api_context(self, is_public=False, platform=None, pad_addr=None):
from sentry.stacktraces.functions import (
get_function_name_for_frame,
get_source_link_for_frame,
)
function = get_function_name_for_frame(self, platform)
source_link = get_source_link_for_frame(self)
data = {
"filename": self.filename,
"absPath": self.abs_path,
"module": self.module,
"package": self.package,
"platform": self.platform,
"instructionAddr": pad_hex_addr(self.instruction_addr, pad_addr),
"symbolAddr": pad_hex_addr(self.symbol_addr, pad_addr),
"function": function,
"rawFunction": self.raw_function,
"symbol": self.symbol,
"context": get_context(
lineno=self.lineno,
context_line=self.context_line,
pre_context=self.pre_context,
post_context=self.post_context,
),
"lineNo": self.lineno,
"colNo": self.colno,
"inApp": self.in_app,
"trust": self.trust,
"errors": self.errors,
"lock": self.lock,
"sourceLink": source_link,
}
if not is_public:
data["vars"] = self.vars
if self.addr_mode and self.addr_mode != "abs":
data["addrMode"] = self.addr_mode
# TODO(dcramer): abstract out this API
if self.data and "sourcemap" in self.data:
data.update(
{
"map": self.data["sourcemap"].rsplit("/", 1)[-1],
"origFunction": self.data.get("orig_function", "?"),
"origAbsPath": self.data.get("orig_abs_path", "?"),
"origFilename": self.data.get("orig_filename", "?"),
"origLineNo": self.data.get("orig_lineno", "?"),
"origColNo": self.data.get("orig_colno", "?"),
}
)
if is_url(self.data["sourcemap"]):
data["mapUrl"] = self.data["sourcemap"]
if self.data:
if "symbolicator_status" in self.data:
data["symbolicatorStatus"] = self.data["symbolicator_status"]
if "min_grouping_level" in self.data:
data["minGroupingLevel"] = self.data["min_grouping_level"]
return data
def get_meta_context(self, meta, is_public=False, platform=None):
if not meta:
return
return {
"filename": meta.get("filename"),
"absPath": meta.get("abs_path"),
"module": meta.get("module"),
"package": meta.get("package"),
"platform": meta.get("platform"),
"instructionAddr": meta.get("instruction_addr"),
"symbolAddr": meta.get("symbol_addr"),
"function": meta.get("function"),
"symbol": meta.get("symbol"),
"context": get_context(
lineno=meta.get("lineno"),
context_line=meta.get("context_line"),
pre_context=meta.get("pre_context"),
post_context=meta.get("post_context"),
),
"lineNo": meta.get("lineno"),
"colNo": meta.get("colno"),
"inApp": meta.get("in_app"),
"trust": meta.get("trust"),
"errors": meta.get("errors"),
"lock": meta.get("lock"),
"sourceLink": meta.get("source_link"),
}
def is_url(self):
if not self.abs_path:
return False
# URLs can be generated such that they are:
# blob:http://example.com/7f7aaadf-a006-4217-9ed5-5fbf8585c6c0
# https://developer.mozilla.org/en-US/docs/Web/API/URL/createObjectURL
if self.abs_path.startswith("blob:"):
return True
return is_url(self.abs_path)
def is_caused_by(self):
# XXX(dcramer): don't compute hash using frames containing the 'Caused by'
# text as it contains an exception value which may may contain dynamic
# values (see raven-java#125)
return self.filename.startswith("Caused by: ")
def is_unhashable_module(self, platform):
# Fix for the case where module is a partial copy of the URL
# and should not be hashed
if (
platform == "javascript"
and "/" in self.module
and self.abs_path
and self.abs_path.endswith(self.module)
):
return True
elif platform == "java" and "$$Lambda$" in self.module:
return True
return False
def is_unhashable_function(self):
# TODO(dcramer): lambda$ is Java specific
# TODO(dcramer): [Anonymous is PHP specific (used for things like SQL
# queries and JSON data)
return self.function.startswith(("lambda$", "[Anonymous"))
def to_string(self, event) -> str:
if event.platform is not None:
choices = [event.platform]
else:
choices = []
choices.append("default")
templates = ["sentry/partial/frames/%s.txt" % choice for choice in choices]
return render_to_string(
templates,
{
"abs_path": self.abs_path,
"filename": self.filename,
"function": self.function,
"module": self.module,
"lineno": self.lineno,
"colno": self.colno,
"context_line": self.context_line,
},
).strip("\n")
|
Frame
|
python
|
numba__numba
|
numba/core/typing/arraydecl.py
|
{
"start": 7907,
"end": 8193
}
|
class ____(AbstractTemplate):
def generic(self, args, kws):
assert not kws
[ary, idx] = args
out = get_array_index_type(ary, idx)
if out is not None:
return signature(out.result, ary, out.index)
@infer_global(operator.setitem)
|
GetItemBuffer
|
python
|
pytorch__pytorch
|
torch/_inductor/codegen/common.py
|
{
"start": 23221,
"end": 26789
}
|
class ____:
def __init__(self, body: LoopBody) -> None:
self.body = body
self.graphs: dict[Union[Callable[..., Any], str], Any] = {
"root": body.root_block.graph
}
for k, v in body.subblocks.items():
self.graphs[k] = v.graph
def deduce_node_dtype_by_inputs(self, node: torch.fx.Node) -> Optional[torch.dtype]:
inputs = node.all_input_nodes
input_nodes = [
n for n in inputs if isinstance(n, torch.fx.Node) and n.op != "placeholder"
]
if len(input_nodes) == 0:
return None
all_input_nodes_propagated = all(
OptimizationContext.key in n.meta
and n.meta[OptimizationContext.key].dtype is not None
for n in input_nodes
)
if not all_input_nodes_propagated:
return None
return functools.reduce(
torch.promote_types,
[n.meta[OptimizationContext.key].dtype for n in input_nodes],
)
def deduce_node_dtype_by_subgraph(self, node: torch.fx.Node) -> torch.dtype:
sub_graph = self.graphs[node.target]
dtype = self.propagate_graph(sub_graph)
assert dtype
return dtype
def deduce_node_dtype(self, node: torch.fx.Node) -> Optional[torch.dtype]:
if node.op == "placeholder":
return None
if node.target == "output" and len(node.args) != 1:
# we can infer output node if it only have 1 arg
return None
if node.target is operator.getitem:
node_arg = node.args[0]
assert isinstance(node_arg, torch.fx.Node), type(node_arg)
return self.deduce_node_dtype(node_arg)
assert isinstance(node.target, str), type(node.target)
if node.target.startswith("masked_subblock"):
return self.deduce_node_dtype_by_subgraph(node)
if (
output_dtype := deduce_output_dtype_by_name(
node.target,
*node.args,
**node.kwargs,
)
) is not None:
return output_dtype
return self.deduce_node_dtype_by_inputs(node)
def propagate_graph(self, graph: torch.fx.Graph) -> Optional[torch.dtype]:
assert graph.nodes
graph_dtype: Optional[torch.dtype] = None
# For masked_subblock, we use output's dtype to represent
# the dtype of this subgraph. For other cases, graph_dtype
# might be None
for node in graph.nodes:
if OptimizationContext.key in node.meta:
opt_ctx = node.meta[OptimizationContext.key]
else:
opt_ctx = OptimizationContext()
opt_ctx.dtype = self.deduce_node_dtype(node)
node.meta[OptimizationContext.key] = opt_ctx
if node.target == "output":
graph_dtype = opt_ctx.dtype
return graph_dtype
def propagate(self) -> Optional[torch.dtype]:
return self.propagate_graph(self.graphs["root"])
@classmethod
def propagate_loopbody(cls, body: LoopBody) -> Optional[torch.dtype]:
return cls(body).propagate()
@classmethod
def propagate_scheduler_node(cls, node: SchedulerNode) -> Optional[torch.dtype]:
from ..loop_body import LoopBody
from ..scheduler import SchedulerNode
assert isinstance(node, SchedulerNode), type(node)
assert isinstance(node._body, LoopBody), type(node._body)
return DataTypePropagation.propagate_loopbody(node._body)
|
DataTypePropagation
|
python
|
mkdocs__mkdocs
|
mkdocs/tests/config/config_options_tests.py
|
{
"start": 81616,
"end": 83084
}
|
class ____(TestCase):
class Schema(Config):
plugins = c.Plugins(default=[])
hooks = c.Hooks('plugins')
@tempdir()
def test_hooks(self, src_dir) -> None:
write_file(
b'def on_page_markdown(markdown, **kwargs): return markdown.replace("f", "z")',
os.path.join(src_dir, 'hooks', 'my_hook.py'),
)
write_file(
b'foo foo',
os.path.join(src_dir, 'docs', 'index.md'),
)
conf = self.get_config(
self.Schema,
{'hooks': ['hooks/my_hook.py']},
config_file_path=os.path.join(src_dir, 'mkdocs.yml'),
)
self.assertIn('hooks/my_hook.py', conf.plugins)
hook = conf.plugins['hooks/my_hook.py']
self.assertTrue(hasattr(hook, 'on_page_markdown'))
self.assertEqual(
{**conf.plugins.events, 'page_markdown': [hook.on_page_markdown]},
conf.plugins.events,
)
self.assertEqual(hook.on_page_markdown('foo foo'), 'zoo zoo') # type: ignore[call-arg]
self.assertFalse(hasattr(hook, 'on_nav'))
def test_hooks_wrong_type(self) -> None:
with self.expect_error(hooks="Expected a list of items, but a <class 'int'> was given."):
self.get_config(self.Schema, {'hooks': 6})
with self.expect_error(hooks="Expected type: <class 'str'> but received: <class 'int'>"):
self.get_config(self.Schema, {'hooks': [7]})
|
HooksTest
|
python
|
tensorflow__tensorflow
|
tensorflow/tools/ci_build/linux/mkl/set-build-env.py
|
{
"start": 7901,
"end": 13493
}
|
class ____(object):
"""Prepares the proper environment settings for various Intel platforms."""
default_platform_ = "haswell"
PLATFORMS_ = {
"nehalem": NehalemPlatform(),
"sandybridge": SandyBridgePlatform(),
"haswell": HaswellPlatform(),
"skylake": SkylakePlatform(),
"cascadelake": CascadelakePlatform(),
"icelake-client": IcelakeClientPlatform(),
"icelake-server": IcelakeServerPlatform(),
}
def __init__(self):
self.args = None
self.bazel_flags_ = "build "
self.target_platform_ = None
# Return a tuple of the current gcc version
def get_gcc_version(self):
gcc_major_version = 0
gcc_minor_version = 0
# check to see if gcc is present
gcc_path = ""
gcc_path_cmd = "command -v gcc"
try:
gcc_path = subprocess.check_output(gcc_path_cmd, shell=True,
stderr=subprocess.STDOUT).\
strip()
print("gcc located here: {}".format(gcc_path))
if not os.access(gcc_path, os.F_OK | os.X_OK):
raise ValueError(
"{} does not exist or is not executable.".format(gcc_path))
gcc_output = subprocess.check_output(
[gcc_path, "-dumpfullversion", "-dumpversion"],
stderr=subprocess.STDOUT).strip()
# handle python2 vs 3 (bytes vs str type)
if isinstance(gcc_output, bytes):
gcc_output = gcc_output.decode("utf-8")
print("gcc version: {}".format(gcc_output))
gcc_info = gcc_output.split(".")
gcc_major_version = int(gcc_info[0])
gcc_minor_version = int(gcc_info[1])
except subprocess.CalledProcessException as e:
print("Problem getting gcc info: {}".format(e))
gcc_major_version = 0
gcc_minor_version = 0
return gcc_major_version, gcc_minor_version
def parse_args(self):
"""Set up argument parser, and parse CLI args."""
arg_parser = argparse.ArgumentParser(
description="Parse the arguments for the "
"TensorFlow build environment "
" setter")
arg_parser.add_argument(
"--disable-mkl",
dest="disable_mkl",
help="Turn off MKL. By default the compiler flag "
"--config=mkl is enabled.",
action="store_true")
arg_parser.add_argument(
"--disable-v2",
dest="disable_v2",
help="Build TensorFlow v1 rather than v2. By default the "
" compiler flag --config=v2 is enabled.",
action="store_true")
arg_parser.add_argument(
"--enable-bfloat16",
dest="enable_bfloat16",
help="Enable bfloat16 build. By default it is "
" disabled if no parameter is passed.",
action="store_true")
arg_parser.add_argument(
"--enable-dnnl1",
dest="enable_dnnl1",
help="Enable dnnl1 build. By default it is "
" disabled if no parameter is passed.",
action="store_true")
arg_parser.add_argument(
"-s",
"--secure-build",
dest="secure_build",
help="Enable secure build flags.",
action="store_true")
arg_parser.add_argument(
"-p",
"--platform",
choices=self.PLATFORMS_.keys(),
help="The target platform.",
dest="target_platform",
default=self.default_platform_)
arg_parser.add_argument(
"-f",
"--bazelrc-file",
dest="bazelrc_file",
help="The full path to the bazelrc file into which "
"the build command will be written. The path "
"will be relative to the container "
" environment.",
required=True)
self.args = arg_parser.parse_args()
def validate_args(self):
# Check the bazelrc file
if os.path.exists(self.args.bazelrc_file):
if os.path.isfile(self.args.bazelrc_file):
self._debug("The file {} exists and will be deleted.".format(
self.args.bazelrc_file))
elif os.path.isdir(self.args.bazelrc_file):
print("You can't write bazel config to \"{}\" "
"because it is a directory".format(self.args.bazelrc_file))
return False
# Validate gcc with the requested platform
gcc_major_version, gcc_minor_version = self.get_gcc_version()
if gcc_major_version == 0 or \
not self.target_platform_.set_host_gcc_version(
gcc_major_version, gcc_minor_version):
return False
return True
def set_build_args(self):
"""Generate Bazel build flags."""
for flag in BASIC_BUILD_OPTS:
self.bazel_flags_ += "{} ".format(flag)
if self.args.secure_build:
for flag in SECURE_BUILD_OPTS:
self.bazel_flags_ += "{} ".format(flag)
if not self.args.disable_mkl:
self.bazel_flags_ += "--config=mkl "
if self.args.disable_v2:
self.bazel_flags_ += "--config=v1 "
if self.args.enable_dnnl1:
self.bazel_flags_ += "--define build_with_mkl_dnn_v1_only=true "
if self.args.enable_bfloat16:
self.bazel_flags_ += "--copt=-DENABLE_INTEL_MKL_BFLOAT16 "
self.bazel_flags_ += self.target_platform_.get_bazel_gcc_flags()
def write_build_args(self):
self._debug("Writing build flags: {}".format(self.bazel_flags_))
with open(self.args.bazelrc_file, "w") as f:
f.write(self.bazel_flags_ + "\n")
def _debug(self, msg):
print(msg)
def go(self):
self.parse_args()
self.target_platform_ = self.PLATFORMS_.get(self.args.target_platform)
if self.validate_args():
self.set_build_args()
self.write_build_args()
else:
print("Error.")
env_setter = BuildEnvSetter()
env_setter.go()
|
BuildEnvSetter
|
python
|
ray-project__ray
|
python/ray/autoscaler/_private/cli_logger.py
|
{
"start": 677,
"end": 1919
}
|
class ____:
def __init__(self):
# do not do any color work
self.identity = lambda x: x
self.colorful = self
self.colormode = None
self.NO_COLORS = None
self.ANSI_8_COLORS = None
def disable(self):
pass
@contextmanager
def with_style(self, x):
class IdentityClass:
def __getattr__(self, name):
return lambda y: y
yield IdentityClass()
def __getattr__(self, name):
if name == "with_style":
return self.with_style
return self.identity
try:
import colorful as _cf
from colorful.core import ColorfulString
_cf.use_8_ansi_colors()
except ModuleNotFoundError:
# We mock Colorful to restrict the colors used for consistency
# anyway, so we also allow for not having colorful at all.
# If the Ray Core dependency on colorful is ever removed,
# the CliLogger code will still work.
class ColorfulString:
pass
_cf = _ColorfulMock()
# We want to only allow specific formatting
# to prevent people from accidentally making bad looking color schemes.
#
# This is especially important since most will look bad on either light
# or dark themes.
|
_ColorfulMock
|
python
|
apache__airflow
|
airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_dags.py
|
{
"start": 45897,
"end": 48456
}
|
class ____(TestDagEndpoint):
"""Unit tests for Delete DAG."""
def _create_dag_for_deletion(
self,
dag_maker,
dag_id=None,
dag_display_name=None,
has_running_dagruns=False,
):
with dag_maker(
dag_id,
dag_display_name=dag_display_name,
start_date=datetime(2024, 10, 10, tzinfo=timezone.utc),
):
EmptyOperator(task_id="dummy")
if has_running_dagruns:
dr = dag_maker.create_dagrun()
ti = dr.get_task_instances()[0]
ti.set_state(TaskInstanceState.RUNNING)
dag_maker.sync_dagbag_to_db()
@pytest.mark.parametrize(
(
"dag_id",
"dag_display_name",
"status_code_delete",
"status_code_details",
"has_running_dagruns",
"is_create_dag",
),
[
("test_nonexistent_dag_id", "nonexistent_display_name", 404, 404, False, False),
(DAG4_ID, DAG4_DISPLAY_NAME, 204, 404, False, True),
(DAG5_ID, DAG5_DISPLAY_NAME, 409, 200, True, True),
],
)
@pytest.mark.usefixtures("configure_git_connection_for_dag_bundle")
def test_delete_dag(
self,
dag_maker,
test_client,
dag_id,
dag_display_name,
status_code_delete,
status_code_details,
has_running_dagruns,
is_create_dag,
session,
):
if is_create_dag:
self._create_dag_for_deletion(
dag_maker=dag_maker,
dag_id=dag_id,
dag_display_name=dag_display_name,
has_running_dagruns=has_running_dagruns,
)
delete_response = test_client.delete(f"{API_PREFIX}/{dag_id}")
assert delete_response.status_code == status_code_delete
details_response = test_client.get(f"{API_PREFIX}/{dag_id}/details")
assert details_response.status_code == status_code_details
if details_response.status_code == 204:
check_last_log(session, dag_id=dag_id, event="delete_dag", logical_date=None)
def test_delete_dag_should_response_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.delete(f"{API_PREFIX}/{DAG1_ID}")
assert response.status_code == 401
def test_delete_dag_should_response_403(self, unauthorized_test_client):
response = unauthorized_test_client.delete(f"{API_PREFIX}/{DAG1_ID}")
assert response.status_code == 403
|
TestDeleteDAG
|
python
|
huggingface__transformers
|
src/transformers/models/gptj/modeling_gptj.py
|
{
"start": 18707,
"end": 19029
}
|
class ____(PreTrainedModel):
config: GPTJConfig
base_model_prefix = "transformer"
supports_gradient_checkpointing = True
_no_split_modules = ["GPTJBlock"]
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_can_compile_fullgraph = True
@auto_docstring
|
GPTJPreTrainedModel
|
python
|
h5py__h5py
|
h5py/_hl/base.py
|
{
"start": 11537,
"end": 11741
}
|
class ____(KeysView):
def __str__(self):
return "<KeysViewHDF5 {}>".format(list(self))
def __reversed__(self):
yield from reversed(self._mapping)
__repr__ = __str__
|
KeysViewHDF5
|
python
|
pytorch__pytorch
|
torch/_inductor/codegen/memory_planning.py
|
{
"start": 691,
"end": 1398
}
|
class ____:
"""
A range where a given tensor is live. Begin and end are both counters
representing points in the program of grouped memory operations.
Begin is inclusive, end is exclusive.
Invariant: begin <= end
"""
begin: float # int | +/-inf
end: float # int | +/-inf
def contains(self, other: LiveRange):
"""Is other entirely within self"""
return self.begin <= other.begin and other.end <= self.end
def join(self, other: LiveRange):
"""Combine two ranges using a union operation"""
return LiveRange(min(self.begin, other.begin), max(self.end, other.end))
def __len__(self):
return self.end - self.begin
|
LiveRange
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-special-substring-of-length-k.py
|
{
"start": 38,
"end": 406
}
|
class ____(object):
def hasSpecialSubstring(self, s, k):
"""
:type s: str
:type k: int
:rtype: bool
"""
l = 0
for i in xrange(len(s)):
l += 1
if i+1 == len(s) or s[i] != s[i+1]:
if l == k:
return True
l = 0
return False
|
Solution
|
python
|
getsentry__sentry
|
tests/sentry/uptime/subscriptions/test_tasks.py
|
{
"start": 1404,
"end": 2891
}
|
class ____(UptimeTestCase):
__test__ = Abstract(__module__, __qualname__)
def assert_redis_config(
self,
region: str,
sub: UptimeSubscription,
action: str | None,
region_mode: UptimeSubscriptionRegion.RegionMode | None,
):
region_config = get_region_config(region)
assert region_config is not None
cluster: RedisCluster | StrictRedis = redis.redis_clusters.get_binary(
region_config.config_redis_cluster
)
assert sub.subscription_id is not None
config_key, update_key = get_partition_keys(UUID(sub.subscription_id), region_config)
if action == "upsert":
config_bytes = cluster.hget(config_key, sub.subscription_id)
assert config_bytes is not None
assert region_mode is not None
assert msgpack.unpackb(config_bytes) == uptime_subscription_to_check_config(
sub, sub.subscription_id, region_mode
)
else:
assert not cluster.hexists(config_key, sub.subscription_id)
if action is None:
assert not cluster.hexists(update_key, sub.subscription_id)
else:
update_bytes = cluster.hget(update_key, sub.subscription_id)
assert update_bytes is not None
assert msgpack.unpackb(update_bytes) == {
"action": action,
"subscription_id": sub.subscription_id,
}
|
ConfigPusherTestMixin
|
python
|
getsentry__sentry
|
src/sentry/data_export/models.py
|
{
"start": 824,
"end": 5683
}
|
class ____(Model):
"""
Stores references to asynchronous data export jobs
"""
__relocation_scope__ = RelocationScope.Excluded
organization = FlexibleForeignKey("sentry.Organization")
user_id = HybridCloudForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete="SET_NULL")
file_id = BoundedBigIntegerField(null=True, db_index=True)
date_added = models.DateTimeField(default=timezone.now)
date_finished = models.DateTimeField(null=True)
date_expired = models.DateTimeField(null=True, db_index=True)
query_type = BoundedPositiveIntegerField(choices=ExportQueryType.as_choices())
query_info: models.Field[dict[str, Any], dict[str, Any]] = JSONField()
@property
def status(self) -> ExportStatus:
if self.date_finished is None:
return ExportStatus.Early
elif self.date_expired is not None and self.date_expired < timezone.now():
return ExportStatus.Expired
else:
return ExportStatus.Valid
@property
def payload(self) -> dict[str, Any]:
payload = self.query_info.copy()
payload["export_type"] = ExportQueryType.as_str(self.query_type)
return payload
@property
def file_name(self) -> str:
date = self.date_added.strftime("%Y-%B-%d")
export_type = ExportQueryType.as_str(self.query_type)
# Example: Discover_2020-July-21_27.csv
return f"{export_type}_{date}_{self.id}.csv"
@staticmethod
def format_date(date: datetime | None) -> str | None:
# Example: 12:21 PM on July 21, 2020 (UTC)
return None if date is None else date.strftime("%-I:%M %p on %B %d, %Y (%Z)")
def delete_file(self) -> None:
file = self._get_file()
if file:
file.delete()
def delete(self, *args: Any, **kwargs: Any) -> tuple[int, dict[str, Any]]:
self.delete_file()
return super().delete(*args, **kwargs)
def finalize_upload(self, file: File, expiration: timedelta = DEFAULT_EXPIRATION) -> None:
self.delete_file() # If a file is present, remove it
current_time = timezone.now()
expire_time = current_time + expiration
self.update(file_id=file.id, date_finished=current_time, date_expired=expire_time)
transaction.on_commit(lambda: self.email_success(), router.db_for_write(ExportedData))
def email_success(self) -> None:
from sentry.utils.email import MessageBuilder
user_email = None
if self.user_id is not None:
user = user_service.get_user(user_id=self.user_id)
if user:
user_email = user.email
# The following condition should never be true, but it's a safeguard in case someone manually calls this method
if self.date_finished is None or self.date_expired is None or self._get_file() is None:
logger.warning(
"Notification email attempted on incomplete dataset",
extra={"data_export_id": self.id, "organization_id": self.organization_id},
)
return
url = self.organization.absolute_url(
reverse("sentry-data-export-details", args=[self.organization.slug, self.id])
)
msg = MessageBuilder(
subject="Your data is ready.",
context={"url": url, "expiration": self.format_date(self.date_expired)},
type="organization.export-data",
template="sentry/emails/data-export-success.txt",
html_template="sentry/emails/data-export-success.html",
)
if user_email is not None:
msg.send_async([user_email])
def email_failure(self, message: str) -> None:
from sentry.utils.email import MessageBuilder
if self.user_id is None:
return
user = user_service.get_user(user_id=self.user_id)
if user is None:
return
msg = MessageBuilder(
subject="We couldn't export your data.",
context={
"creation": self.format_date(self.date_added),
"error_message": message,
"payload": orjson.dumps(self.payload).decode(),
},
type="organization.export-data",
template="sentry/emails/data-export-failure.txt",
html_template="sentry/emails/data-export-failure.html",
)
msg.send_async([user.email])
self.delete()
def _get_file(self) -> File | None:
if self.file_id:
try:
return File.objects.get(pk=self.file_id)
except File.DoesNotExist:
self.update(file_id=None)
return None
class Meta:
app_label = "sentry"
db_table = "sentry_exporteddata"
__repr__ = sane_repr("query_type", "query_info")
@region_silo_model
|
ExportedData
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py
|
{
"start": 8106,
"end": 8422
}
|
class ____(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneStepEvent, GrapheneDisplayableEvent)
name = "ExecutionStepOutputEvent"
output_name = graphene.NonNull(graphene.String)
type_check = graphene.NonNull(GrapheneTypeCheck)
|
GrapheneExecutionStepOutputEvent
|
python
|
allegroai__clearml
|
clearml/backend_api/session/errors.py
|
{
"start": 291,
"end": 343
}
|
class ____(SessionError):
pass
|
TimeoutExpiredError
|
python
|
FactoryBoy__factory_boy
|
factory/errors.py
|
{
"start": 537,
"end": 748
}
|
class ____(FactoryError):
"""Raised when a sub-declaration has no related declaration.
This means that the user declared 'foo__bar' without adding a declaration
at 'foo'.
"""
|
InvalidDeclarationError
|
python
|
ansible__ansible
|
lib/ansible/galaxy/collection/gpg.py
|
{
"start": 3921,
"end": 4101
}
|
class ____(GpgBaseError):
"""The signature with the keyid is good, but the signature is expired."""
keyid: str
username: str
@dataclass(frozen=True, slots=True)
|
GpgExpSig
|
python
|
python-poetry__poetry
|
src/poetry/mixology/version_solver.py
|
{
"start": 5060,
"end": 26690
}
|
class ____:
"""
The version solver that finds a set of package versions that satisfy the
root package's dependencies.
See https://github.com/dart-lang/pub/tree/master/doc/solver.md for details
on how this solver works.
"""
def __init__(self, root: ProjectPackage, provider: Provider) -> None:
self._root = root
self._provider = provider
self._dependency_cache = DependencyCache(provider)
self._incompatibilities: dict[str, list[Incompatibility]] = {}
self._contradicted_incompatibilities: set[Incompatibility] = set()
self._contradicted_incompatibilities_by_level: dict[
int, set[Incompatibility]
] = collections.defaultdict(set)
self._solution = PartialSolution()
self._get_comp_key_cached = functools.cache(self._get_comp_key)
@property
def solution(self) -> PartialSolution:
return self._solution
def solve(self) -> SolverResult:
"""
Finds a set of dependencies that match the root package's constraints,
or raises an error if no such set is available.
"""
start = time.time()
root_dependency = Dependency(self._root.name, self._root.version)
root_dependency.is_root = True
self._add_incompatibility(
Incompatibility([Term(root_dependency, False)], RootCauseError())
)
try:
next: str | None = self._root.name
while next is not None:
self._propagate(next)
next = self._choose_package_version()
return self._result()
except Exception:
raise
finally:
self._log(
f"Version solving took {time.time() - start:.3f} seconds.\n"
f"Tried {self._solution.attempted_solutions} solutions."
)
def _propagate(self, package: str) -> None:
"""
Performs unit propagation on incompatibilities transitively
related to package to derive new assignments for _solution.
"""
changed = {package}
while changed:
package = changed.pop()
# Iterate in reverse because conflict resolution tends to produce more
# general incompatibilities as time goes on. If we look at those first,
# we can derive stronger assignments sooner and more eagerly find
# conflicts.
for incompatibility in reversed(self._incompatibilities[package]):
if incompatibility in self._contradicted_incompatibilities:
continue
result = self._propagate_incompatibility(incompatibility)
if result is _conflict:
# If the incompatibility is satisfied by the solution, we use
# _resolve_conflict() to determine the root cause of the conflict as
# a new incompatibility.
#
# It also backjumps to a point in the solution
# where that incompatibility will allow us to derive new assignments
# that avoid the conflict.
root_cause = self._resolve_conflict(incompatibility)
# Back jumping erases all the assignments we did at the previous
# decision level, so we clear [changed] and refill it with the
# newly-propagated assignment.
changed.clear()
result = self._propagate_incompatibility(root_cause)
assert result is not None
assert result != _conflict
assert isinstance(result, str)
changed.add(result)
break
if result is not None:
assert isinstance(result, str)
changed.add(result)
def _propagate_incompatibility(
self, incompatibility: Incompatibility
) -> str | object | None:
"""
If incompatibility is almost satisfied by _solution, adds the
negation of the unsatisfied term to _solution.
If incompatibility is satisfied by _solution, returns _conflict. If
incompatibility is almost satisfied by _solution, returns the
unsatisfied term's package name.
Otherwise, returns None.
"""
# The first entry in incompatibility.terms that's not yet satisfied by
# _solution, if one exists. If we find more than one, _solution is
# inconclusive for incompatibility and we can't deduce anything.
unsatisfied = None
for term in incompatibility.terms:
relation = self._solution.relation(term)
if relation == SetRelation.DISJOINT:
# If term is already contradicted by _solution, then
# incompatibility is contradicted as well and there's nothing new we
# can deduce from it.
self._contradicted_incompatibilities.add(incompatibility)
self._contradicted_incompatibilities_by_level[
self._solution.decision_level
].add(incompatibility)
return None
elif relation == SetRelation.OVERLAPPING:
# If more than one term is inconclusive, we can't deduce anything about
# incompatibility.
if unsatisfied is not None:
return None
# If exactly one term in incompatibility is inconclusive, then it's
# almost satisfied and [term] is the unsatisfied term. We can add the
# inverse of the term to _solution.
unsatisfied = term
# If *all* terms in incompatibility are satisfied by _solution, then
# incompatibility is satisfied and we have a conflict.
if unsatisfied is None:
return _conflict
self._contradicted_incompatibilities.add(incompatibility)
self._contradicted_incompatibilities_by_level[
self._solution.decision_level
].add(incompatibility)
adverb = "not " if unsatisfied.is_positive() else ""
self._log(f"derived: {adverb}{unsatisfied.dependency}")
self._solution.derive(
unsatisfied.dependency, not unsatisfied.is_positive(), incompatibility
)
complete_name: str = unsatisfied.dependency.complete_name
return complete_name
def _resolve_conflict(self, incompatibility: Incompatibility) -> Incompatibility:
"""
Given an incompatibility that's satisfied by _solution,
The `conflict resolution`_ constructs a new incompatibility that encapsulates
the root cause of the conflict and backtracks _solution until the new
incompatibility will allow _propagate() to deduce new assignments.
Adds the new incompatibility to _incompatibilities and returns it.
.. _conflict resolution:
https://github.com/dart-lang/pub/tree/master/doc/solver.md#conflict-resolution
"""
self._log(f"conflict: {incompatibility}")
new_incompatibility = False
while not incompatibility.is_failure():
# The term in incompatibility.terms that was most recently satisfied by
# _solution.
most_recent_term = None
# The earliest assignment in _solution such that incompatibility is
# satisfied by _solution up to and including this assignment.
most_recent_satisfier = None
# The difference between most_recent_satisfier and most_recent_term;
# that is, the versions that are allowed by most_recent_satisfier and not
# by most_recent_term. This is None if most_recent_satisfier totally
# satisfies most_recent_term.
difference = None
# The decision level of the earliest assignment in _solution *before*
# most_recent_satisfier such that incompatibility is satisfied by
# _solution up to and including this assignment plus
# most_recent_satisfier.
#
# Decision level 1 is the level where the root package was selected. It's
# safe to go back to decision level 0, but stopping at 1 tends to produce
# better error messages, because references to the root package end up
# closer to the final conclusion that no solution exists.
previous_satisfier_level = 1
for term in incompatibility.terms:
satisfier = self._solution.satisfier(term)
if most_recent_satisfier is None:
most_recent_term = term
most_recent_satisfier = satisfier
elif most_recent_satisfier.index < satisfier.index:
previous_satisfier_level = max(
previous_satisfier_level, most_recent_satisfier.decision_level
)
most_recent_term = term
most_recent_satisfier = satisfier
difference = None
else:
previous_satisfier_level = max(
previous_satisfier_level, satisfier.decision_level
)
if most_recent_term == term:
# If most_recent_satisfier doesn't satisfy most_recent_term on its
# own, then the next-most-recent satisfier may be the one that
# satisfies the remainder.
difference = most_recent_satisfier.difference(most_recent_term)
if difference is not None:
previous_satisfier_level = max(
previous_satisfier_level,
self._solution.satisfier(difference.inverse).decision_level,
)
# If most_recent_identifier is the only satisfier left at its decision
# level, or if it has no cause (indicating that it's a decision rather
# than a derivation), then incompatibility is the root cause. We then
# backjump to previous_satisfier_level, where incompatibility is
# guaranteed to allow _propagate to produce more assignments.
# using assert to suppress mypy [union-attr]
assert most_recent_satisfier is not None
if (
previous_satisfier_level < most_recent_satisfier.decision_level
or most_recent_satisfier.cause is None
):
for level in range(
self._solution.decision_level, previous_satisfier_level, -1
):
if level in self._contradicted_incompatibilities_by_level:
self._contradicted_incompatibilities.difference_update(
self._contradicted_incompatibilities_by_level.pop(level),
)
self._dependency_cache.clear_level(level)
self._solution.backtrack(previous_satisfier_level)
if new_incompatibility:
self._add_incompatibility(incompatibility)
return incompatibility
# Create a new incompatibility by combining incompatibility with the
# incompatibility that caused most_recent_satisfier to be assigned. Doing
# this iteratively constructs an incompatibility that's guaranteed to be
# true (that is, we know for sure no solution will satisfy the
# incompatibility) while also approximating the intuitive notion of the
# "root cause" of the conflict.
new_terms = [
term for term in incompatibility.terms if term != most_recent_term
]
for term in most_recent_satisfier.cause.terms:
if term.dependency != most_recent_satisfier.dependency:
new_terms.append(term)
# The most_recent_satisfier may not satisfy most_recent_term on its own
# if there are a collection of constraints on most_recent_term that
# only satisfy it together. For example, if most_recent_term is
# `foo ^1.0.0` and _solution contains `[foo >=1.0.0,
# foo <2.0.0]`, then most_recent_satisfier will be `foo <2.0.0` even
# though it doesn't totally satisfy `foo ^1.0.0`.
#
# In this case, we add `not (most_recent_satisfier \ most_recent_term)` to
# the incompatibility as well, See the `algorithm documentation`_ for
# details.
#
# .. _algorithm documentation:
# https://github.com/dart-lang/pub/tree/master/doc/solver.md#conflict-resolution
if difference is not None:
inverse = difference.inverse
if inverse.dependency != most_recent_satisfier.dependency:
new_terms.append(inverse)
incompatibility = Incompatibility(
new_terms,
ConflictCauseError(incompatibility, most_recent_satisfier.cause),
)
new_incompatibility = True
partially = "" if difference is None else " partially"
self._log(
f"! {most_recent_term} is{partially} satisfied by"
f" {most_recent_satisfier}"
)
self._log(f'! which is caused by "{most_recent_satisfier.cause}"')
self._log(f"! thus: {incompatibility}")
raise SolveFailureError(incompatibility)
def _get_comp_key(self, dependency: Dependency) -> CompKey:
"""
Returns a tuple of
- preference
- num_deps_upper_bound
- has_deps
- num_packages
that serves as priority for choosing the next package to resolve.
(A lower value takes precedence.)
In order to provide results that are as deterministic as possible
and consistent between `poetry lock` and `poetry update`, the return value
of two different dependencies should not be equal if possible.
## preference
See Preference class.
## num_deps_upper_bound
A dependency with an upper bound is more likely to cause conflicts. Therefore,
a package with more dependencies with upper bounds should be chosen first.
## has_deps
A package with dependencies should be chosen first
because a package without dependencies is less likely to cause conflicts.
## num_packages
The original algorithm proposes to prefer packages with as few remaining
versions as possible, so that if a conflict is necessary it's forced quickly.
https://github.com/dart-lang/pub/blob/master/doc/solver.md#decision-making
However, this leads to the famous boto3 vs. urllib3 issue, so we prefer
packages with more remaining versions (see
https://github.com/python-poetry/poetry/pull/8255#issuecomment-1657198242
for more details).
"""
preference = Preference.DEFAULT
# Direct origin dependencies must be handled first: we don't want to resolve
# a regular dependency for some package only to find later that we had a
# direct-origin dependency.
if dependency.is_direct_origin():
preference = Preference.DIRECT_ORIGIN
packages: list[DependencyPackage] = []
use_latest = dependency.name in self._provider.use_latest
if not use_latest:
locked = self._provider.get_locked(dependency)
if locked:
if preference == Preference.DEFAULT:
preference = Preference.LOCKED
packages = [locked]
if not packages:
packages = self._dependency_cache.search_for(
dependency, self._solution.decision_level
)
num_packages = len(packages)
if packages:
package = packages[0].package
if package.is_root():
relevant_dependencies = package.all_requires
else:
if preference != Preference.LOCKED and not package.is_direct_origin():
# We have to get the package from the pool,
# otherwise `requires` will be empty.
#
# We might need `package.source_reference` as fallback
# for transitive dependencies without a source
# if there is a top-level dependency
# for the same package with an explicit source.
for repo in (dependency.source_name, package.source_reference):
try:
package = self._provider.get_package_from_pool(
package.pretty_name,
package.version,
repository_name=repo,
)
except Exception:
pass
else:
break
relevant_dependencies = [
r
for r in package.requires
if not r.in_extras or r.in_extras[0] in dependency.extras
]
has_deps = bool(relevant_dependencies)
num_deps_upper_bound = sum(
1 for d in relevant_dependencies if d.constraint.has_upper_bound()
)
else:
has_deps = False
num_deps_upper_bound = 0
if preference == Preference.DEFAULT:
if num_packages < 2:
preference = Preference.NO_CHOICE
elif use_latest:
preference = Preference.USE_LATEST
return preference, -num_deps_upper_bound, not has_deps, -num_packages
def _choose_next(self, unsatisfied: list[Dependency]) -> Dependency:
"""
Chooses the next package to resolve.
"""
return min(unsatisfied, key=self._get_comp_key_cached)
def _choose_package_version(self) -> str | None:
"""
Tries to select a version of a required package.
Returns the name of the package whose incompatibilities should be
propagated by _propagate(), or None indicating that version solving is
complete and a solution has been found.
"""
unsatisfied = self._solution.unsatisfied
if not unsatisfied:
return None
dependency = self._choose_next(unsatisfied)
locked = self._provider.get_locked(dependency)
if locked is None:
packages = self._dependency_cache.search_for(
dependency, self._solution.decision_level
)
package = next(iter(packages), None)
if package is None:
# If there are no versions that satisfy the constraint,
# add an incompatibility that indicates that.
self._add_incompatibility(
Incompatibility([Term(dependency, True)], NoVersionsCauseError())
)
complete_name = dependency.complete_name
return complete_name
package.dependency.transitive_marker = dependency.transitive_marker
else:
package = locked
package = self._provider.complete_package(package)
conflict = False
for incompatibility in self._provider.incompatibilities_for(package):
self._add_incompatibility(incompatibility)
# If an incompatibility is already satisfied, then selecting version
# would cause a conflict.
#
# We'll continue adding its dependencies, then go back to
# unit propagation which will guide us to choose a better version.
conflict = conflict or all(
term.dependency.complete_name == dependency.complete_name
or self._solution.satisfies(term)
for term in incompatibility.terms
)
if not conflict:
self._solution.decide(package.package)
self._log(
f"selecting {package.package.complete_name}"
f" ({package.package.full_pretty_version})"
)
complete_name = dependency.complete_name
return complete_name
def _result(self) -> SolverResult:
"""
Creates a #SolverResult from the decisions in _solution
"""
decisions = self._solution.decisions
return SolverResult(
self._root,
[p for p in decisions if not p.is_root()],
self._solution.attempted_solutions,
)
def _add_incompatibility(self, incompatibility: Incompatibility) -> None:
self._log(f"fact: {incompatibility}")
for term in incompatibility.terms:
if term.dependency.complete_name not in self._incompatibilities:
self._incompatibilities[term.dependency.complete_name] = []
if (
incompatibility
in self._incompatibilities[term.dependency.complete_name]
):
continue
self._incompatibilities[term.dependency.complete_name].append(
incompatibility
)
def _log(self, text: str) -> None:
self._provider.debug(text, self._solution.attempted_solutions)
|
VersionSolver
|
python
|
django__django
|
django/templatetags/i18n.py
|
{
"start": 2092,
"end": 3341
}
|
class ____(Node):
child_nodelists = ()
def __init__(self, filter_expression, noop, asvar=None, message_context=None):
self.noop = noop
self.asvar = asvar
self.message_context = message_context
self.filter_expression = filter_expression
if isinstance(self.filter_expression.var, str):
self.filter_expression.is_var = True
self.filter_expression.var = Variable("'%s'" % self.filter_expression.var)
def render(self, context):
self.filter_expression.var.translate = not self.noop
if self.message_context:
self.filter_expression.var.message_context = self.message_context.resolve(
context
)
output = self.filter_expression.resolve(context)
value = render_value_in_context(output, context)
# Restore percent signs. Percent signs in template text are doubled
# so they are not interpreted as string format flags.
is_safe = isinstance(value, SafeData)
value = value.replace("%%", "%")
value = mark_safe(value) if is_safe else value
if self.asvar:
context[self.asvar] = value
return ""
else:
return value
|
TranslateNode
|
python
|
sphinx-doc__sphinx
|
tests/roots/test-ext-autodoc/target/autoclass_content.py
|
{
"start": 503,
"end": 678
}
|
class ____:
"""A class having both __init__ and __new__"""
def __init__(self):
"""__init__ docstring"""
def __new__(cls):
"""__new__ docstring"""
|
F
|
python
|
pytorch__pytorch
|
test/inductor/test_ordered_set.py
|
{
"start": 53769,
"end": 53985
}
|
class ____(TestCopying, TestCase):
def setUp(self):
super().setUp()
self.OrderedSet = OrderedSet()
# ------------------------------------------------------------------------------
|
TestCopyingEmpty
|
python
|
numba__numba
|
numba/core/typing/context.py
|
{
"start": 445,
"end": 1186
}
|
class ____(object):
__slots__ = 'promote', 'safe_convert', "unsafe_convert"
def __init__(self):
self.promote = 0
self.safe_convert = 0
self.unsafe_convert = 0
def astuple(self):
"""Returns a tuple suitable for comparing with the worse situation
start first.
"""
return (self.unsafe_convert, self.safe_convert, self.promote)
def __add__(self, other):
if type(self) is not type(other):
return NotImplemented
rsum = Rating()
rsum.promote = self.promote + other.promote
rsum.safe_convert = self.safe_convert + other.safe_convert
rsum.unsafe_convert = self.unsafe_convert + other.unsafe_convert
return rsum
|
Rating
|
python
|
pytorch__pytorch
|
test/torch_np/numpy_tests/core/test_getlimits.py
|
{
"start": 4919,
"end": 7576
}
|
class ____(TestCase):
@skip(reason="Instantiate {i,f}info from dtypes.")
def test_instances(self):
iinfo(10)
finfo(3.0)
@skip(reason="MachAr no implemented (does it need to)?")
def test_known_types(self):
# Test we are correctly compiling parameters for known types
for ftype, ma_like in (
(np.float16, _float_ma[16]),
(np.float32, _float_ma[32]),
(np.float64, _float_ma[64]),
):
assert_ma_equal(_discovered_machar(ftype), ma_like)
# Suppress warning for broken discovery of double double on PPC
ld_ma = _discovered_machar(np.longdouble)
bytes = np.dtype(np.longdouble).itemsize
if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16):
# 80-bit extended precision
assert_ma_equal(ld_ma, _float_ma[80])
elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16:
# IEE 754 128-bit
assert_ma_equal(ld_ma, _float_ma[128])
@skip(reason="MachAr no implemented (does it need to be)?")
def test_subnormal_warning(self):
"""Test that the subnormal is zero warning is not being raised."""
ld_ma = _discovered_machar(np.longdouble)
bytes = np.dtype(np.longdouble).itemsize
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16):
# 80-bit extended precision
ld_ma.smallest_subnormal
assert len(w) == 0
elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16:
# IEE 754 128-bit
ld_ma.smallest_subnormal
assert len(w) == 0
else:
# Double double
ld_ma.smallest_subnormal
# This test may fail on some platforms
assert len(w) == 0
@xpassIfTorchDynamo_np # (reason="None of nmant, minexp, maxexp is implemented.")
def test_plausible_finfo(self):
# Assert that finfo returns reasonable results for all types
for ftype in (
[np.float16, np.float32, np.float64, np.longdouble]
+ [
np.complex64,
np.complex128,
]
# no complex256 in torch._numpy
+ ([np.clongdouble] if hasattr(np, "clongdouble") else [])
):
info = np.finfo(ftype)
assert_(info.nmant > 1)
assert_(info.minexp < -1)
assert_(info.maxexp > 1)
if __name__ == "__main__":
run_tests()
|
TestMisc
|
python
|
jazzband__django-simple-history
|
simple_history/tests/models.py
|
{
"start": 11417,
"end": 11580
}
|
class ____(Document):
history = HistoricalRecords()
@Document._history_user.setter
def _history_user(self, value):
self.changed_by = value
|
Paper
|
python
|
jupyterlab__jupyterlab
|
jupyterlab/extensions/manager.py
|
{
"start": 4434,
"end": 4957
}
|
class ____:
"""Plugin manager options.
Attributes:
lock_all: Whether to lock (prevent enabling/disabling) all plugins.
lock_rules: A list of plugins or extensions that cannot be toggled.
If extension name is provided, all its plugins will be disabled.
The plugin names need to follow colon-separated format of `extension:plugin`.
"""
lock_rules: frozenset[str] = field(default_factory=frozenset)
lock_all: bool = False
@dataclass(frozen=True)
|
PluginManagerOptions
|
python
|
getsentry__sentry-python
|
sentry_sdk/integrations/pymongo.py
|
{
"start": 2802,
"end": 6164
}
|
class ____(monitoring.CommandListener):
def __init__(self):
# type: () -> None
self._ongoing_operations = {} # type: Dict[int, Span]
def _operation_key(self, event):
# type: (Union[CommandFailedEvent, CommandStartedEvent, CommandSucceededEvent]) -> int
return event.request_id
def started(self, event):
# type: (CommandStartedEvent) -> None
if sentry_sdk.get_client().get_integration(PyMongoIntegration) is None:
return
with capture_internal_exceptions():
command = dict(copy.deepcopy(event.command))
command.pop("$db", None)
command.pop("$clusterTime", None)
command.pop("$signature", None)
tags = {
"db.name": event.database_name,
SPANDATA.DB_SYSTEM: "mongodb",
SPANDATA.DB_OPERATION: event.command_name,
SPANDATA.DB_MONGODB_COLLECTION: command.get(event.command_name),
}
try:
tags["net.peer.name"] = event.connection_id[0]
tags["net.peer.port"] = str(event.connection_id[1])
except TypeError:
pass
data = {"operation_ids": {}} # type: Dict[str, Any]
data["operation_ids"]["operation"] = event.operation_id
data["operation_ids"]["request"] = event.request_id
data.update(_get_db_data(event))
try:
lsid = command.pop("lsid")["id"]
data["operation_ids"]["session"] = str(lsid)
except KeyError:
pass
if not should_send_default_pii():
command = _strip_pii(command)
query = json.dumps(command, default=str)
span = sentry_sdk.start_span(
op=OP.DB,
name=query,
origin=PyMongoIntegration.origin,
)
for tag, value in tags.items():
# set the tag for backwards-compatibility.
# TODO: remove the set_tag call in the next major release!
span.set_tag(tag, value)
span.set_data(tag, value)
for key, value in data.items():
span.set_data(key, value)
with capture_internal_exceptions():
sentry_sdk.add_breadcrumb(
message=query, category="query", type=OP.DB, data=tags
)
self._ongoing_operations[self._operation_key(event)] = span.__enter__()
def failed(self, event):
# type: (CommandFailedEvent) -> None
if sentry_sdk.get_client().get_integration(PyMongoIntegration) is None:
return
try:
span = self._ongoing_operations.pop(self._operation_key(event))
span.set_status(SPANSTATUS.INTERNAL_ERROR)
span.__exit__(None, None, None)
except KeyError:
return
def succeeded(self, event):
# type: (CommandSucceededEvent) -> None
if sentry_sdk.get_client().get_integration(PyMongoIntegration) is None:
return
try:
span = self._ongoing_operations.pop(self._operation_key(event))
span.set_status(SPANSTATUS.OK)
span.__exit__(None, None, None)
except KeyError:
pass
|
CommandTracer
|
python
|
huggingface__transformers
|
src/transformers/models/camembert/modular_camembert.py
|
{
"start": 15281,
"end": 18428
}
|
class ____(RobertaForQuestionAnswering):
def __init__(self, config):
super().__init__(config)
del self.camembert
self.roberta = CamembertModel(config, add_pooling_layer=False)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]:
r"""
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value
>= 2. All the value in this tensor should be always < type_vocab_size.
[What are token type IDs?](../glossary#token-type-ids)
"""
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
return_dict=True,
**kwargs,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
|
CamembertForQuestionAnswering
|
python
|
rapidsai__cudf
|
python/cudf/cudf/core/series.py
|
{
"start": 14236,
"end": 14290
}
|
class ____(_SeriesLocIndexer):
pass
|
_SeriesAtIndexer
|
python
|
lazyprogrammer__machine_learning_examples
|
rl2/a3c/thread_example.py
|
{
"start": 90,
"end": 982
}
|
class ____:
def __init__(self, id_, global_counter):
self.id = id_
self.global_counter = global_counter
self.local_counter = itertools.count()
def run(self):
while True:
time.sleep(np.random.rand()*2)
global_step = next(self.global_counter)
local_step = next(self.local_counter)
print("Worker({}): {}".format(self.id, local_step))
if global_step >= 20:
break
global_counter = itertools.count()
NUM_WORKERS = multiprocessing.cpu_count()
# create the workers
workers = []
for worker_id in range(NUM_WORKERS):
worker = Worker(worker_id, global_counter)
workers.append(worker)
# start the threads
worker_threads = []
for worker in workers:
worker_fn = lambda: worker.run()
t = threading.Thread(target=worker_fn)
t.start()
worker_threads.append(t)
# join the threads
for t in worker_threads:
t.join()
print("DONE!")
|
Worker
|
python
|
pytorch__pytorch
|
torch/_dynamo/source.py
|
{
"start": 18419,
"end": 18836
}
|
class ____(ChainedSource):
idx: int
def __post_init__(self) -> None:
assert self.base is not None
def reconstruct(self, codegen: "PyCodegen") -> None:
raise NotImplementedError
def guard_source(self) -> GuardSource:
return self.base.guard_source()
def name(self) -> str:
return f"({self.idx}, {self.base.name()})"
@dataclasses.dataclass(frozen=True)
|
IndexedSource
|
python
|
python-pillow__Pillow
|
Tests/test_imagewin.py
|
{
"start": 116,
"end": 620
}
|
class ____:
def test_sanity(self) -> None:
dir(ImageWin)
def test_hdc(self) -> None:
# Arrange
dc = 50
# Act
hdc = ImageWin.HDC(dc)
dc2 = int(hdc)
# Assert
assert dc2 == 50
def test_hwnd(self) -> None:
# Arrange
wnd = 50
# Act
hwnd = ImageWin.HWND(wnd)
wnd2 = int(hwnd)
# Assert
assert wnd2 == 50
@pytest.mark.skipif(not is_win32(), reason="Windows only")
|
TestImageWin
|
python
|
huggingface__transformers
|
src/transformers/models/instructblipvideo/modular_instructblipvideo.py
|
{
"start": 6823,
"end": 6943
}
|
class ____(InstructBlipForConditionalGenerationModelOutput):
pass
|
InstructBlipVideoForConditionalGenerationModelOutput
|
python
|
huggingface__transformers
|
src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py
|
{
"start": 34853,
"end": 35835
}
|
class ____(nn.Module):
def __init__(self, length, channels, max_timescale=10000):
super().__init__()
if channels % 2 != 0:
raise ValueError("SinusoidsPositionEmbedding needs even channels input")
log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1)
inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2).float())
scaled_time = torch.arange(length)[:, np.newaxis] * inv_timescales[np.newaxis, :]
self.register_buffer(
"positional_embedding",
torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1),
persistent=False,
)
def forward(self, seqlen: int):
return self.positional_embedding[:seqlen, :]
@auto_docstring(
custom_intro="""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`Qwen2_5OmniAudioEncoderLayer`].
"""
)
|
SinusoidsPositionEmbedding
|
python
|
plotly__plotly.py
|
tests/test_core/test_graph_objs/test_instantiate_hierarchy.py
|
{
"start": 303,
"end": 1406
}
|
class ____(TestCase):
def test_construct_datatypes(self):
for datatypes_module in datatype_modules:
module = importlib.import_module(datatypes_module)
for name in getattr(module, "__all__", []):
if name.startswith("_") or name[0].islower() or name == "FigureWidget":
continue
obj = getattr(module, name)
try:
v = obj()
except Exception:
print(
"Failed to construct {obj} in module {module}".format(
obj=obj, module=datatypes_module
)
)
raise
if obj.__module__ == "plotly.graph_objs._deprecations":
self.assertTrue(isinstance(v, list) or isinstance(v, dict))
obj()
elif name in ("Figure", "FigureWidget"):
self.assertIsInstance(v, BaseFigure)
else:
self.assertIsInstance(v, BasePlotlyType)
|
HierarchyTest
|
python
|
huggingface__transformers
|
src/transformers/models/pixtral/modeling_pixtral.py
|
{
"start": 1743,
"end": 8336
}
|
class ____(nn.Module):
"""
The key with pixtral embedding is just that you have a frequency for each pixel positions.
If you have height x width pixels (or embedding pixels), then the frequency used for ROPE
is given by indexing the pre_computed frequency on the width and height.
What you output is of dimension (batch, height * width, dim) with dim the embed dim.
This simply means that for each image hidden state, you are going to add
a corresponding positional embedding, based on its index in the grid.
"""
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: PixtralVisionConfig, device=None, layer_type=None):
super().__init__()
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
raise ValueError(
f"{self.__class__.__name__} does not support non-default RoPE, but got `rope_type={self.rope_type}`"
)
inv_freq, attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[PixtralVisionConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Here is the diff from Llama RoPE
max_patches_per_side = config.image_size // config.patch_size
h = torch.arange(max_patches_per_side)
w = torch.arange(max_patches_per_side)
freqs = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim))
freqs_h = torch.outer(h, freqs[::2]).float()
freqs_w = torch.outer(w, freqs[1::2]).float()
inv_freq = torch.cat(
[
freqs_h[:, None, :].repeat(1, max_patches_per_side, 1),
freqs_w[None, :, :].repeat(max_patches_per_side, 1, 1),
],
dim=-1,
).reshape(-1, dim // 2) # we reshape to only index on the position indexes, not tuple of indexes
# Different from paper, but it uses a different permutation in order to obtain the same calculation
# TODO maybe make it torch compatible later on. We can also just slice
inv_freq = torch.cat((inv_freq, inv_freq), dim=-1)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
freqs = self.inv_freq[position_ids]
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
emb = freqs
cos = emb.cos()
sin = emb.sin()
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
# Copied from transformers.models.llama.modeling_llama.rotate_half
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
# Copied from transformers.models.siglip.modeling_siglip.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs,
):
attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
|
PixtralRotaryEmbedding
|
python
|
ray-project__ray
|
python/ray/train/base_trainer.py
|
{
"start": 4480,
"end": 37414
}
|
class ____(abc.ABC):
"""Defines interface for distributed training on Ray.
Note: The base ``BaseTrainer`` class cannot be instantiated directly. Only
one of its subclasses can be used.
Note to developers: If a new trainer is added, please update
`air/_internal/usage.py`.
**How does a trainer work?**
- First, initialize the Trainer. The initialization runs locally,
so heavyweight setup should not be done in ``__init__``.
- Then, when you call ``trainer.fit()``, the Trainer is serialized
and copied to a remote Ray actor. The following methods are then
called in sequence on the remote actor.
- ``trainer.setup()``: Any heavyweight Trainer setup should be
specified here.
- ``trainer.training_loop()``: Executes the main training logic.
- Calling ``trainer.fit()`` will return a ``ray.result.Result``
object where you can access metrics from your training run, as well
as any checkpoints that may have been saved.
**How do I create a new Trainer?**
Subclass ``ray.train.trainer.BaseTrainer``, and override the ``training_loop``
method, and optionally ``setup``.
.. testcode::
:skipif: True
import torch
from ray.train.trainer import BaseTrainer
from ray import train, tune
class MyPytorchTrainer(BaseTrainer):
def setup(self):
self.model = torch.nn.Linear(1, 1)
self.optimizer = torch.optim.SGD(
self.model.parameters(), lr=0.1)
def training_loop(self):
# You can access any Trainer attributes directly in this method.
# self.datasets["train"] has already been
dataset = self.datasets["train"]
torch_ds = dataset.iter_torch_batches(dtypes=torch.float)
loss_fn = torch.nn.MSELoss()
for epoch_idx in range(10):
loss = 0
num_batches = 0
torch_ds = dataset.iter_torch_batches(
dtypes=torch.float, batch_size=2
)
for batch in torch_ds:
X = torch.unsqueeze(batch["x"], 1)
y = torch.unsqueeze(batch["y"], 1)
# Compute prediction error
pred = self.model(X)
batch_loss = loss_fn(pred, y)
# Backpropagation
self.optimizer.zero_grad()
batch_loss.backward()
self.optimizer.step()
loss += batch_loss.item()
num_batches += 1
loss /= num_batches
# Use Tune functions to report intermediate
# results.
train.report({"loss": loss, "epoch": epoch_idx})
# Initialize the Trainer, and call Trainer.fit()
import ray
train_dataset = ray.data.from_items(
[{"x": i, "y": i} for i in range(10)])
my_trainer = MyPytorchTrainer(datasets={"train": train_dataset})
result = my_trainer.fit()
Args:
scaling_config: Configuration for how to scale training.
run_config: Configuration for the execution of the training run.
datasets: Any Datasets to use for training. Use the key "train"
to denote which dataset is the training dataset.
metadata: Dict that should be made available via
`train.get_context().get_metadata()` and in `checkpoint.get_metadata()`
for checkpoints saved from this Trainer. Must be JSON-serializable.
resume_from_checkpoint: A checkpoint to resume training from.
"""
_scaling_config_allowed_keys: List[str] = [
"trainer_resources",
]
_handles_checkpoint_freq: bool = False
_handles_checkpoint_at_end: bool = False
# fields to propagate to Tuner param_space.
# See `BaseTrainer._extract_fields_for_tuner_param_space` for more details.
_fields_for_tuner_param_space = []
def __init__(
self,
*,
scaling_config: Optional[ScalingConfig] = None,
run_config: Optional[RunConfig] = None,
datasets: Optional[Dict[str, GenDataset]] = None,
metadata: Optional[Dict[str, Any]] = None,
resume_from_checkpoint: Optional[Checkpoint] = None,
):
self.scaling_config = (
scaling_config if scaling_config is not None else ScalingConfig()
)
self.run_config = (
copy.copy(run_config) if run_config is not None else RunConfig()
)
self.metadata = metadata
self.datasets = datasets if datasets is not None else {}
self.starting_checkpoint = resume_from_checkpoint
if _v2_migration_warnings_enabled():
if metadata is not None:
_log_deprecation_warning(_GET_METADATA_DEPRECATION_MESSAGE)
if resume_from_checkpoint is not None:
_log_deprecation_warning(_RESUME_FROM_CHECKPOINT_DEPRECATION_WARNING)
# These attributes should only be set through `BaseTrainer.restore`
self._restore_path = None
self._restore_storage_filesystem = None
self._validate_attributes()
usage_lib.record_library_usage("train")
air_usage.tag_air_trainer(self)
@classmethod
@Deprecated(message=_TRAINER_RESTORE_DEPRECATION_WARNING)
def restore(
cls: Type["BaseTrainer"],
path: Union[str, os.PathLike],
storage_filesystem: Optional[pyarrow.fs.FileSystem] = None,
datasets: Optional[Dict[str, GenDataset]] = None,
scaling_config: Optional[ScalingConfig] = None,
**kwargs,
) -> "BaseTrainer":
"""Restores a Train experiment from a previously interrupted/failed run.
Restore should be used for experiment-level fault tolerance in the event
that the head node crashes (e.g., OOM or some other runtime error) or the
entire cluster goes down (e.g., network error affecting all nodes).
A run that has already completed successfully will not be resumed from this API.
To continue training from a successful run, launch a new run with the
``<Framework>Trainer(resume_from_checkpoint)`` API instead, passing in a
checkpoint from the previous run to start with.
.. note::
Restoring an experiment from a path that's pointing to a *different*
location than the original experiment path is supported. However, Ray Train
assumes that the full experiment directory is available
(including checkpoints) so that it's possible to resume trials from their
latest state.
For example, if the original experiment path was run locally, then the
results are uploaded to cloud storage, Ray Train expects the full contents
to be available in cloud storage if attempting to resume
via ``<Framework>Trainer.restore("s3://...")``. The restored run will
continue writing results to the same cloud storage location.
The following example can be paired with implementing job retry using
:ref:`Ray Jobs <jobs-overview>` to produce a Train experiment that will
attempt to resume on both experiment-level and trial-level failures:
.. testcode::
:skipif: True
import os
import ray
from ray import train
from ray.train.trainer import BaseTrainer
experiment_name = "unique_experiment_name"
storage_path = os.path.expanduser("~/ray_results")
experiment_dir = os.path.join(storage_path, experiment_name)
# Define some dummy inputs for demonstration purposes
datasets = {"train": ray.data.from_items([{"a": i} for i in range(10)])}
class CustomTrainer(BaseTrainer):
def training_loop(self):
pass
if CustomTrainer.can_restore(experiment_dir):
trainer = CustomTrainer.restore(
experiment_dir, datasets=datasets
)
else:
trainer = CustomTrainer(
datasets=datasets,
run_config=train.RunConfig(
name=experiment_name,
storage_path=storage_path,
# Tip: You can also enable retries on failure for
# worker-level fault tolerance
failure_config=train.FailureConfig(max_failures=3),
),
)
result = trainer.fit()
Args:
path: The path to the experiment directory of the training run to restore.
This can be a local path or a remote URI if the experiment was
uploaded to the cloud.
storage_filesystem: Custom ``pyarrow.fs.FileSystem``
corresponding to the ``path``. This may be necessary if the original
experiment passed in a custom filesystem.
datasets: Re-specified datasets used in the original training run.
This must include all the datasets that were passed in the
original trainer constructor.
scaling_config: Optionally re-specified scaling config. This can be
modified to be different from the original spec.
**kwargs: Other optionally re-specified arguments, passed in by subclasses.
Raises:
ValueError: If all datasets were not re-supplied on restore.
Returns:
BaseTrainer: A restored instance of the class that is calling this method.
"""
if _v2_migration_warnings_enabled():
_log_deprecation_warning(_TRAINER_RESTORE_DEPRECATION_WARNING)
if not cls.can_restore(path, storage_filesystem):
raise ValueError(
f"Invalid restore path: {path}. Make sure that this path exists and "
"is the experiment directory that results from a call to "
"`trainer.fit()`."
)
fs, fs_path = get_fs_and_path(path, storage_filesystem)
trainer_pkl_path = Path(fs_path, _TRAINER_PKL).as_posix()
with fs.open_input_file(trainer_pkl_path) as f:
trainer_cls, param_dict = pickle.loads(f.readall())
if trainer_cls is not cls:
warnings.warn(
f"Invalid trainer type. You are attempting to restore a trainer of type"
f" {trainer_cls} with `{cls.__name__}.restore`, "
"which will most likely fail. "
f"Use `{trainer_cls.__name__}.restore` instead."
)
original_datasets = param_dict.pop("datasets", {})
if original_datasets and not datasets:
raise ValueError(
"The following datasets need to be provided again on restore: "
f"{list(original_datasets.keys())}\n"
f"Use {cls.__name__}.restore(..., datasets=datasets) "
"with the datasets that were provided to the original trainer."
)
datasets = datasets or {}
if set(original_datasets) != set(datasets):
raise ValueError(
"The provided datasets don't match the original dataset keys.\n"
f" Expected datasets for the keys: {list(original_datasets.keys())}\n"
f" Actual datasets provided: {list(datasets.keys())}"
)
param_dict["datasets"] = datasets
if scaling_config:
param_dict["scaling_config"] = scaling_config
for param_name, val in kwargs.items():
# Overwrite the old value if something is passed into restore
if val is not None:
param_dict[param_name] = val
try:
trainer = cls(**param_dict)
except Exception as e:
raise ValueError(
"Trainer restoration failed (see above for the stack trace). "
"Make sure that you use the right trainer class to restore: "
f"`{cls.__name__}.restore`\n"
) from e
trainer._restore_path = path
trainer._restore_storage_filesystem = storage_filesystem
return trainer
@classmethod
@Deprecated(
message=_TRAINER_RESTORE_DEPRECATION_WARNING,
warning=_v2_migration_warnings_enabled(),
)
def can_restore(
cls: Type["BaseTrainer"],
path: Union[str, os.PathLike],
storage_filesystem: Optional[pyarrow.fs.FileSystem] = None,
) -> bool:
"""Checks whether a given directory contains a restorable Train experiment.
Args:
path: The path to the experiment directory of the Train experiment.
This can be either a local directory (e.g., ~/ray_results/exp_name)
or a remote URI (e.g., s3://bucket/exp_name).
Returns:
bool: Whether this path exists and contains the trainer state to resume from
"""
if _v2_migration_warnings_enabled():
_log_deprecation_warning(_TRAINER_RESTORE_DEPRECATION_WARNING)
fs, fs_path = get_fs_and_path(path, storage_filesystem)
trainer_pkl_path = Path(fs_path, _TRAINER_PKL).as_posix()
return _exists_at_fs_path(fs, trainer_pkl_path)
def __repr__(self):
# A dictionary that maps parameters to their default values.
default_values: Dict[str, Any] = {
"scaling_config": ScalingConfig(),
"run_config": RunConfig(),
"datasets": {},
"starting_checkpoint": None,
}
non_default_arguments = []
for parameter, default_value in default_values.items():
value = getattr(self, parameter)
if value != default_value:
non_default_arguments.append(f"{parameter}={value!r}")
if non_default_arguments:
return f"<{self.__class__.__name__} {' '.join(non_default_arguments)}>"
return f"<{self.__class__.__name__}>"
def __new__(cls, *args, **kwargs):
# Store the init args as attributes so this can be merged with Tune hparams.
trainer = super(BaseTrainer, cls).__new__(cls)
parameters = inspect.signature(cls.__init__).parameters
parameters = list(parameters.keys())
# Remove self.
parameters = parameters[1:]
arg_dict = dict(zip(parameters, args))
trainer._param_dict = {**arg_dict, **kwargs}
return trainer
def _validate_attributes(self):
"""Called on __init()__ to validate trainer attributes."""
# Run config
if not isinstance(self.run_config, RunConfig):
raise ValueError(
f"`run_config` should be an instance of `ray.train.RunConfig`, "
f"found {type(self.run_config)} with value `{self.run_config}`."
)
# Scaling config
if not isinstance(self.scaling_config, ScalingConfig):
raise ValueError(
"`scaling_config` should be an instance of `ScalingConfig`, "
f"found {type(self.scaling_config)} with value `{self.scaling_config}`."
)
# Datasets
if not isinstance(self.datasets, dict):
raise ValueError(
f"`datasets` should be a dict mapping from a string to "
f"`ray.data.Dataset` objects, "
f"found {type(self.datasets)} with value `{self.datasets}`."
)
else:
for key, dataset in self.datasets.items():
if not isinstance(dataset, ray.data.Dataset) and not callable(dataset):
raise ValueError(
f"The Dataset under '{key}' key is not a "
"`ray.data.Dataset`. "
f"Received {dataset} instead."
)
# Metadata.
self.metadata = self.metadata or {}
if not isinstance(self.metadata, dict):
raise TypeError(
f"The provided metadata must be a dict, was {type(self.metadata)}."
)
try:
self.metadata = json.loads(json.dumps(self.metadata))
except Exception as e:
raise ValueError(
"The provided metadata must be JSON-serializable: "
f"{self.metadata}: {e}"
)
if self.starting_checkpoint is not None and not isinstance(
self.starting_checkpoint, Checkpoint
):
raise ValueError(
f"`resume_from_checkpoint` should be an instance of "
f"`ray.train.Checkpoint`, found {type(self.starting_checkpoint)} "
f"with value `{self.starting_checkpoint}`."
)
self._log_v2_deprecation_warnings()
def _log_v2_deprecation_warnings(self):
"""Logs deprecation warnings for v2 migration.
Log them here in the Ray Train case rather than in the configuration
constructors to avoid logging incorrect deprecation warnings when
`ray.train.RunConfig` is passed to Ray Tune.
"""
from ray.train.v2._internal.constants import V2_ENABLED_ENV_VAR, is_v2_enabled
if is_v2_enabled():
raise DeprecationWarning(
f"Detected use of a deprecated Trainer import from `{self.__class__.__module__}`. "
"This Trainer class is not compatible with Ray Train V2.\n"
"To fix this:\n"
" - Update to use the new import path. For example, "
"`from ray.train.torch.torch_trainer import TorchTrainer` -> "
"`from ray.train.torch import TorchTrainer`\n"
f" - Or, explicitly disable V2 by setting: {V2_ENABLED_ENV_VAR}=0\n"
"See this issue for more context: "
"https://github.com/ray-project/ray/issues/49454"
)
if not _v2_migration_warnings_enabled():
return
from ray.train.v2._internal.migration_utils import (
CALLBACKS_DEPRECATION_MESSAGE,
FAIL_FAST_DEPRECATION_MESSAGE,
LOG_TO_FILE_DEPRECATION_MESSAGE,
PROGRESS_REPORTER_DEPRECATION_MESSAGE,
STOP_DEPRECATION_MESSAGE,
SYNC_CONFIG_DEPRECATION_MESSAGE,
TRAINER_RESOURCES_DEPRECATION_MESSAGE,
VERBOSE_DEPRECATION_MESSAGE,
)
# ScalingConfig deprecations
if self.scaling_config.trainer_resources is not None:
_log_deprecation_warning(TRAINER_RESOURCES_DEPRECATION_MESSAGE)
# FailureConfig deprecations
if self.run_config.failure_config.fail_fast:
_log_deprecation_warning(FAIL_FAST_DEPRECATION_MESSAGE)
# RunConfig deprecations
# NOTE: _verbose is the original verbose value passed by the user
if self.run_config._verbose is not None:
_log_deprecation_warning(VERBOSE_DEPRECATION_MESSAGE)
if self.run_config.log_to_file:
_log_deprecation_warning(LOG_TO_FILE_DEPRECATION_MESSAGE)
if self.run_config.stop is not None:
_log_deprecation_warning(STOP_DEPRECATION_MESSAGE)
if self.run_config.callbacks is not None:
_log_deprecation_warning(CALLBACKS_DEPRECATION_MESSAGE)
if self.run_config.progress_reporter is not None:
_log_deprecation_warning(PROGRESS_REPORTER_DEPRECATION_MESSAGE)
if self.run_config.sync_config != ray.train.SyncConfig():
_log_deprecation_warning(SYNC_CONFIG_DEPRECATION_MESSAGE)
@classmethod
def _validate_scaling_config(cls, scaling_config: ScalingConfig) -> ScalingConfig:
"""Returns scaling config dataclass after validating updated keys."""
ensure_only_allowed_dataclass_keys_updated(
dataclass=scaling_config,
allowed_keys=cls._scaling_config_allowed_keys,
)
return scaling_config
def setup(self) -> None:
"""Called during fit() to perform initial setup on the Trainer.
.. note:: This method is run on a remote process.
This method will not be called on the driver, so any expensive setup
operations should be placed here and not in ``__init__``.
This method is called prior to ``preprocess_datasets`` and
``training_loop``.
"""
pass
def preprocess_datasets(self) -> None:
"""Deprecated."""
raise DeprecationWarning(
"`preprocess_datasets` is no longer used, since preprocessors "
f"are no longer accepted by Trainers.\n{PREPROCESSOR_DEPRECATION_MESSAGE}"
)
@abc.abstractmethod
def training_loop(self) -> None:
"""Loop called by fit() to run training and report results to Tune.
.. note:: This method runs on a remote process.
``self.datasets`` have already been evaluated if they were wrapped in a factory.
You can use the :ref:`Ray Train utilities <train-loop-api>`
(:func:`train.report() <ray.train.report>` and
:func:`train.get_checkpoint() <ray.train.get_checkpoint>`) inside
this training loop.
Example:
.. testcode::
from ray.train.trainer import BaseTrainer
from ray import train
class MyTrainer(BaseTrainer):
def training_loop(self):
for epoch_idx in range(5):
...
train.report({"epoch": epoch_idx})
"""
raise NotImplementedError
@PublicAPI(stability="beta")
def fit(self) -> Result:
"""Runs training.
Returns:
A Result object containing the training result.
Raises:
ray.train.base_trainer.TrainingFailedError: If any failures during the execution
of ``self.as_trainable()``, or during the Tune execution loop.
"""
from ray.tune import ResumeConfig, TuneError
from ray.tune.tuner import Tuner
trainable = self.as_trainable()
param_space = self._extract_fields_for_tuner_param_space()
self.run_config.name = (
self.run_config.name or StorageContext.get_experiment_dir_name(trainable)
)
# The storage context here is only used to access the resolved
# storage fs and experiment path, in order to avoid duplicating that logic.
# This is NOT the storage context object that gets passed to remote workers.
storage = StorageContext(
storage_path=self.run_config.storage_path,
experiment_dir_name=self.run_config.name,
storage_filesystem=self.run_config.storage_filesystem,
)
if self._restore_path:
tuner = Tuner.restore(
path=self._restore_path,
trainable=trainable,
param_space=param_space,
_resume_config=ResumeConfig(
finished=ResumeConfig.ResumeType.RESUME,
unfinished=ResumeConfig.ResumeType.RESUME,
errored=ResumeConfig.ResumeType.RESUME,
),
storage_filesystem=self._restore_storage_filesystem,
)
else:
tuner = Tuner(
trainable=trainable,
param_space=param_space,
run_config=self.run_config,
_entrypoint=AirEntrypoint.TRAINER,
)
self._save(storage.storage_filesystem, storage.experiment_fs_path)
restore_msg = TrainingFailedError._RESTORE_MSG.format(
trainer_cls_name=self.__class__.__name__,
path=str(storage.experiment_fs_path),
)
try:
result_grid = tuner.fit()
except TuneError as e:
# Catch any `TuneError`s raised by the `Tuner.fit` call.
# Unwrap the `TuneError` if needed.
parent_error = e.__cause__ or e
# Raise it to the user as a `TrainingFailedError` with a message to restore.
raise TrainingFailedError(restore_msg) from parent_error
# Other exceptions get passed through directly (ex: on `fail_fast='raise'`)
assert len(result_grid) == 1
result = result_grid[0]
if result.error:
# Raise trainable errors to the user with a message to restore
# or configure `FailureConfig` in a new run.
raise TrainingFailedError(
"\n".join([restore_msg, TrainingFailedError._FAILURE_CONFIG_MSG])
) from result.error
return result
def _save(self, fs: pyarrow.fs.FileSystem, experiment_path: str):
"""Saves the current trainer's class along with the `param_dict` of
parameters passed to this trainer's constructor.
This is used to recreate the trainer on restore.
Unless a parameter is re-specified during restoration (only a subset
of parameters can be passed in again), that parameter will be loaded
from the saved copy.
Datasets should not be saved as part of the state. Instead, we save the
keys and replace the dataset values with dummy functions that will
raise an error if invoked. The error only serves as a guardrail for
misuse (e.g., manually unpickling and constructing the Trainer again)
and is not typically surfaced, since datasets must be re-specified
upon restoration.
"""
param_dict = self._param_dict.copy()
datasets = param_dict.pop("datasets", {})
def raise_fn():
raise RuntimeError
if datasets:
param_dict["datasets"] = {
dataset_name: raise_fn for dataset_name in datasets
}
cls_and_param_dict = (self.__class__, param_dict)
fs.create_dir(experiment_path)
with fs.open_output_stream(Path(experiment_path, _TRAINER_PKL).as_posix()) as f:
f.write(pickle.dumps(cls_and_param_dict))
def _extract_fields_for_tuner_param_space(self) -> Dict:
"""Extracts fields to be included in `Tuner.param_space`.
This is needed to leverage the full logging/integration offerings from Tune.
For example, `param_space` is logged automatically to wandb integration.
Currently only done for `train_loop_config`.
Returns:
A dictionary that should be passed to Tuner.param_space.
"""
result = {}
for key in self._fields_for_tuner_param_space:
if key in self._param_dict.keys():
result[key] = copy.deepcopy(self._param_dict[key])
return result
def _generate_trainable_cls(self) -> Type["Trainable"]:
"""Generates the base Trainable class.
Returns:
A Trainable class to use for training.
"""
from ray.tune.execution.placement_groups import PlacementGroupFactory
from ray.tune.trainable import wrap_function
trainer_cls = self.__class__
scaling_config = self.scaling_config
metadata = self.metadata
train_coordinator_fn = partial(
_train_coordinator_fn, trainer_cls=trainer_cls, metadata=metadata
)
# Change the name of the training function to match the name of the Trainer
# class. This will mean the Tune trial name will match the name of Trainer on
# stdout messages and the results directory.
train_coordinator_fn.__name__ = trainer_cls.__name__
trainable_cls = wrap_function(train_coordinator_fn)
has_base_dataset = bool(self.datasets)
if has_base_dataset:
from ray.data.context import DataContext
dataset_context = DataContext.get_current()
else:
dataset_context = None
class TrainTrainable(trainable_cls):
"""Adds default resources to the Trainable."""
_handles_checkpoint_freq = trainer_cls._handles_checkpoint_freq
_handles_checkpoint_at_end = trainer_cls._handles_checkpoint_at_end
@classmethod
def has_base_dataset(cls) -> bool:
"""Whether a dataset is provided through the Trainer."""
return has_base_dataset
@classmethod
def base_scaling_config(cls) -> ScalingConfig:
"""Returns the unchanged scaling config provided through the Trainer."""
return scaling_config
def setup(self, config, **kwargs):
base_config = dict(kwargs)
# Merge Tuner param space hyperparameters in `config` into the
# base config passed to the Trainer constructor, which is `base_config`.
# `base_config` is pulled from the object store from the usage of
# tune.with_parameters in `BaseTrainer.as_trainable`.
# run_config is not a tunable hyperparameter so it does not need to be
# merged.
run_config = base_config.pop("run_config", None)
self._merged_config = deep_update(
base_config, self.config, new_keys_allowed=True
)
self._merged_config["run_config"] = run_config
merged_scaling_config = self._merged_config.get(
"scaling_config", ScalingConfig()
)
if isinstance(merged_scaling_config, dict):
merged_scaling_config = ScalingConfig(**merged_scaling_config)
self._merged_config[
"scaling_config"
] = self._reconcile_scaling_config_with_trial_resources(
merged_scaling_config
)
if self.has_base_dataset():
# Set the DataContext on the Trainer actor to the DataContext
# specified on the driver.
DataContext._set_current(dataset_context)
super(TrainTrainable, self).setup(config)
def _reconcile_scaling_config_with_trial_resources(
self, scaling_config: ScalingConfig
) -> ScalingConfig:
"""
ResourceChangingScheduler workaround.
Ensures that the scaling config matches trial resources.
This should be replaced with RCS returning a ScalingConfig
in the future.
"""
trial_resources = self.trial_resources
# This will be false if the resources are default
if not isinstance(trial_resources, PlacementGroupFactory):
return scaling_config
# Ignore ResourceChangingScheduler workaround when resource bundles
# are unchanged
if self.trial_resources == scaling_config.as_placement_group_factory():
return scaling_config
trainer_cls._validate_scaling_config(scaling_config)
return ScalingConfig.from_placement_group_factory(trial_resources)
def _trainable_func(self, config):
# We ignore the config passed by Tune and instead use the merged
# config which includes the initial Trainer args.
super()._trainable_func(self._merged_config)
@classmethod
def default_resource_request(cls, config):
# `config["scaling_config"] is a dataclass when passed via the
# `scaling_config` argument in `Trainer` and is a dict when passed
# via the `scaling_config` key of `param_spec`.
# Conversion logic must be duplicated in `TrainTrainable.__init__`
# because this is a class method.
updated_scaling_config = config.get("scaling_config", scaling_config)
if isinstance(updated_scaling_config, dict):
updated_scaling_config = ScalingConfig(**updated_scaling_config)
validated_scaling_config = trainer_cls._validate_scaling_config(
updated_scaling_config
)
return validated_scaling_config.as_placement_group_factory()
return TrainTrainable
def as_trainable(self) -> Type["Trainable"]:
"""Converts self to a ``tune.Trainable`` class."""
from ray import tune
base_config = self._param_dict
trainable_cls = self._generate_trainable_cls()
# Wrap with `tune.with_parameters` to handle very large values in base_config
return tune.with_parameters(trainable_cls, **base_config)
|
BaseTrainer
|
python
|
getsentry__sentry
|
src/sentry/search/events/fields.py
|
{
"start": 37674,
"end": 38539
}
|
class ____(ColumnArg):
def __init__(self, name: str, **kwargs):
super().__init__(name, **kwargs)
self.has_default = True
def get_default(self, _) -> None:
return None
def normalize(
self, value: str, params: ParamsType, combinator: Combinator | None
) -> str | list[Any]:
if value is None:
raise InvalidFunctionArgument("a column is required")
if value not in FIELD_ALIASES:
return value
field = FIELD_ALIASES[value]
# If the alias has an expression prefer that over the column alias
# This enables user.display to work in aggregates
expression = field.get_expression(params)
if expression is not None:
return expression
elif field.alias is not None:
return field.alias
return value
|
CountColumn
|
python
|
spack__spack
|
lib/spack/spack/util/file_cache.py
|
{
"start": 617,
"end": 1037
}
|
class ____:
def __init__(self, path: Union[str, pathlib.Path]) -> None:
self.path = path
def __enter__(self) -> Optional[IO[str]]:
"""Return a file object for the cache if it exists."""
self.cache_file = _maybe_open(self.path)
return self.cache_file
def __exit__(self, type, value, traceback):
if self.cache_file:
self.cache_file.close()
|
ReadContextManager
|
python
|
huggingface__transformers
|
src/transformers/models/janus/modeling_janus.py
|
{
"start": 16779,
"end": 17791
}
|
class ____(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`JanusVisionEncoderLayer`].
Args:
config: JanusVisionConfig
"""
def __init__(self, config: JanusVisionConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([JanusVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
# Ignore copy
@auto_docstring
def forward(
self,
inputs_embeds,
attention_mask: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutput:
hidden_states = inputs_embeds
for encoder_layer in self.layers:
hidden_states = encoder_layer(
hidden_states,
attention_mask,
**kwargs,
)
return BaseModelOutput(last_hidden_state=hidden_states)
|
JanusVisionEncoder
|
python
|
geekcomputers__Python
|
BrowserHistory/tests/test_browser_history.py
|
{
"start": 201,
"end": 3525
}
|
class ____(unittest.TestCase):
def setUp(self):
"""Set up test cases"""
self.browser = BrowserHistory("homepage.com")
def test_initialization(self):
"""Test proper initialization of BrowserHistory"""
self.assertEqual(self.browser._curr.val, "homepage.com")
self.assertEqual(self.browser._back_count, 0)
self.assertEqual(self.browser._forward_count, 0)
self.assertIsNone(self.browser._curr.nxt)
self.assertIsNone(self.browser._curr.prev)
def test_visit(self):
"""Test visit functionality and forward history cleanup"""
self.browser.visit("page1.com")
self.assertEqual(self.browser._curr.val, "page1.com")
self.assertEqual(self.browser._back_count, 1)
self.assertEqual(self.browser._forward_count, 0)
# Test forward history cleanup
self.browser.visit("page2.com")
self.browser.back(1)
self.browser.visit("page3.com") # Should clear forward history
self.assertIsNone(self.browser._curr.nxt)
self.assertEqual(self.browser._forward_count, 0)
def test_back_navigation(self):
"""Test back navigation with counter validation"""
# Setup history
self.browser.visit("page1.com")
self.browser.visit("page2.com")
# Test normal back navigation
result = self.browser.back(1)
self.assertEqual(result, "page1.com")
self.assertEqual(self.browser._back_count, 1)
self.assertEqual(self.browser._forward_count, 1)
# Test back with more steps than available
result = self.browser.back(5) # Should only go back 1 step
self.assertEqual(result, "homepage.com")
self.assertEqual(self.browser._back_count, 0)
self.assertEqual(self.browser._forward_count, 2)
def test_forward_navigation(self):
"""Test forward navigation with counter validation"""
# Setup history and position
self.browser.visit("page1.com")
self.browser.visit("page2.com")
self.browser.back(2) # Go back to homepage
# Test normal forward navigation
result = self.browser.forward(1)
self.assertEqual(result, "page1.com")
self.assertEqual(self.browser._forward_count, 1)
self.assertEqual(self.browser._back_count, 1)
# Test forward with more steps than available
result = self.browser.forward(5) # Should only go forward remaining 1 step
self.assertEqual(result, "page2.com")
self.assertEqual(self.browser._forward_count, 0)
self.assertEqual(self.browser._back_count, 2)
def test_complex_navigation(self):
"""Test complex navigation patterns"""
self.browser.visit("page1.com")
self.browser.visit("page2.com")
self.browser.visit("page3.com")
# Back navigation
self.assertEqual(self.browser.back(2), "page1.com")
# New visit should clear forward history
self.browser.visit("page4.com")
self.assertEqual(self.browser._forward_count, 0)
self.assertIsNone(self.browser._curr.nxt)
# Verify we can't go forward to cleared history
self.assertEqual(self.browser.forward(1), "page4.com")
# starting point of code
if __name__ == "__main__":
unittest.main()
|
TestBrowserHistory
|
python
|
GoogleCloudPlatform__python-docs-samples
|
appengine/standard/taskqueue/pull-counter/main.py
|
{
"start": 1640,
"end": 2588
}
|
class ____(webapp2.RequestHandler):
def get(self):
"""Indefinitely fetch tasks and update the datastore."""
queue = taskqueue.Queue("pullq")
while True:
try:
tasks = queue.lease_tasks_by_tag(3600, 1000, deadline=60)
except (
taskqueue.TransientError,
apiproxy_errors.DeadlineExceededError,
) as e:
logging.exception(e)
time.sleep(1)
continue
if tasks:
key = tasks[0].tag
try:
update_counter(key, tasks)
except Exception as e:
logging.exception(e)
raise
finally:
queue.delete_tasks(tasks)
time.sleep(1)
app = webapp2.WSGIApplication(
[("/", CounterHandler), ("/_ah/start", CounterWorker)], debug=True
)
|
CounterWorker
|
python
|
mlflow__mlflow
|
mlflow/store/artifact/utils/models.py
|
{
"start": 1760,
"end": 6353
}
|
class ____(NamedTuple):
model_id: str | None = None
name: str | None = None
version: str | None = None
stage: str | None = None
alias: str | None = None
def _parse_model_uri(uri, scheme: str = "models") -> ParsedModelUri:
"""
Returns a ParsedModelUri tuple. Since a models:/ or prompts:/ URI can only have one of
{version, stage, 'latest', alias}, it will return
- (id, None, None, None) to look for a specific model by ID,
- (name, version, None, None) to look for a specific version,
- (name, None, stage, None) to look for the latest version of a stage,
- (name, None, None, None) to look for the latest of all versions.
- (name, None, None, alias) to look for a registered model alias.
Args:
uri: The URI to parse (e.g., "models:/name/version" or "prompts:/name@alias")
scheme: The expected URI scheme (default: "models", can be "prompts")
"""
parsed = urllib.parse.urlparse(uri, allow_fragments=False)
if parsed.scheme != scheme:
raise MlflowException(_improper_model_uri_msg(uri, scheme))
path = parsed.path
if not path.startswith("/") or len(path) <= 1:
raise MlflowException(_improper_model_uri_msg(uri, scheme))
parts = path.lstrip("/").split("/")
if len(parts) > 2 or parts[0].strip() == "":
raise MlflowException(_improper_model_uri_msg(uri, scheme))
if len(parts) == 2:
name, suffix = parts
if suffix.strip() == "":
raise MlflowException(_improper_model_uri_msg(uri, scheme))
# The URI is in the suffix format
if suffix.isdigit():
# The suffix is a specific version, e.g. "models:/AdsModel1/123"
return ParsedModelUri(name=name, version=suffix)
elif suffix.lower() == _MODELS_URI_SUFFIX_LATEST.lower() and scheme == "models":
# The suffix is the 'latest' string (case insensitive), e.g. "models:/AdsModel1/latest"
# Only supported for models, not prompts
return ParsedModelUri(name=name)
elif scheme == "models":
# The suffix is a specific stage (case insensitive), e.g. "models:/AdsModel1/Production"
# Only supported for models, not prompts
return ParsedModelUri(name=name, stage=suffix)
else:
# For prompts, only version numbers are supported, not stages or 'latest'
raise MlflowException(_improper_model_uri_msg(uri, scheme))
elif "@" in path:
# The URI is an alias URI, e.g. "models:/AdsModel1@Champion"
alias_parts = parts[0].rsplit("@", 1)
if len(alias_parts) != 2 or alias_parts[1].strip() == "":
raise MlflowException(_improper_model_uri_msg(uri, scheme))
return ParsedModelUri(name=alias_parts[0], alias=alias_parts[1])
else:
# The URI is of the form "models:/<model_id>"
return ParsedModelUri(parts[0])
def _parse_model_id_if_present(possible_model_uri: str | Path) -> str | None:
"""
Parses the model ID from the given string. If the string represents a UC model URI, we get the
model version to extract the model ID. If the string is not a models:/ URI, returns None.
Args:
possible_model_uri: The string that may be a models:/ URI.
Returns:
The model ID if the string is a models:/ URI, otherwise None.
"""
uri = str(possible_model_uri)
if is_models_uri(uri):
parsed_model_uri = _parse_model_uri(uri)
if parsed_model_uri.model_id is not None:
return parsed_model_uri.model_id
elif parsed_model_uri.name is not None and parsed_model_uri.version is not None:
client = mlflow.tracking.MlflowClient()
return client.get_model_version(
parsed_model_uri.name, parsed_model_uri.version
).model_id
return None
def get_model_name_and_version(client, models_uri):
(model_id, model_name, model_version, model_stage, model_alias) = _parse_model_uri(models_uri)
if model_id is not None:
return (model_id,)
if model_version is not None:
return model_name, model_version
# NB: Call get_model_version_by_alias of registry client directly to bypass prompt check
if isinstance(client, mlflow.MlflowClient):
client = client._get_registry_client()
if model_alias is not None:
mv = client.get_model_version_by_alias(model_name, model_alias)
return model_name, mv.version
return model_name, str(_get_latest_model_version(client, model_name, model_stage))
|
ParsedModelUri
|
python
|
pytorch__pytorch
|
benchmarks/tensorexpr/reduction.py
|
{
"start": 7170,
"end": 7666
}
|
class ____(DynamicReduce2DBench):
def __init__(self, mode, device, dtype, dim0, dim1):
super().__init__(mode, device, dtype, 1, dim0, dim1)
@staticmethod
def default_configs():
parent_config = DynamicReduce2DBench.default_configs()[0]
return [parent_config[1:]]
def config(self):
parent_config = super().config()
return parent_config[1:]
@staticmethod
def module():
return "reduce2d_dynamic_inner"
|
DynamicReduce2DInnerBench
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.