language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
getsentry__sentry
|
tests/sentry/workflow_engine/migration_helpers/test_issue_alert_dual_write.py
|
{
"start": 4043,
"end": 12807
}
|
class ____(RuleMigrationHelpersTestBase):
def test_rule_snooze_updates_workflow(self) -> None:
IssueAlertMigrator(self.issue_alert, self.user.id).run()
rule_snooze = RuleSnooze.objects.create(rule=self.issue_alert)
issue_alert_workflow = AlertRuleWorkflow.objects.get(rule_id=self.issue_alert.id)
workflow = Workflow.objects.get(id=issue_alert_workflow.workflow.id)
assert workflow.enabled is False
rule_snooze.delete()
workflow.refresh_from_db()
assert workflow.enabled is True
def test_ignores_per_user_rule_snooze(self) -> None:
IssueAlertMigrator(self.issue_alert, self.user.id).run()
RuleSnooze.objects.create(rule=self.issue_alert, user_id=self.user.id)
issue_alert_workflow = AlertRuleWorkflow.objects.get(rule_id=self.issue_alert.id)
workflow = Workflow.objects.get(id=issue_alert_workflow.workflow.id)
workflow.refresh_from_db()
assert workflow.enabled is True
def test_update_issue_alert(self) -> None:
IssueAlertMigrator(self.issue_alert, self.user.id).run()
conditions_payload = [
{
"id": FirstSeenEventCondition.id,
},
{
"id": LatestReleaseFilter.id,
},
]
rule_data = self.issue_alert.data
rule_data.update(
{
"action_match": "none",
"filter_match": "all",
"conditions": conditions_payload,
"frequency": 60,
"actions": [
{
"id": "sentry.rules.actions.notify_event.NotifyEventAction",
"uuid": "test-uuid",
}
],
}
)
self.issue_alert.update(
label="hello world",
owner_user_id=self.user.id,
environment_id=self.environment.id,
data=rule_data,
)
update_migrated_issue_alert(self.issue_alert)
issue_alert_workflow = AlertRuleWorkflow.objects.get(rule_id=self.issue_alert.id)
workflow = Workflow.objects.get(id=issue_alert_workflow.workflow.id)
assert workflow.name == self.issue_alert.label
assert self.issue_alert.project
assert workflow.organization_id == self.issue_alert.project.organization.id
assert workflow.config == {"frequency": 60}
assert workflow.owner_user_id == self.user.id
assert workflow.owner_team_id is None
assert workflow.when_condition_group
assert workflow.when_condition_group.logic_type == DataConditionGroup.Type.NONE
conditions = DataCondition.objects.filter(condition_group=workflow.when_condition_group)
assert conditions.count() == 1
assert conditions.filter(
type=Condition.FIRST_SEEN_EVENT,
comparison=True,
condition_result=True,
).exists()
if_dcg = WorkflowDataConditionGroup.objects.get(workflow=workflow).condition_group
assert if_dcg.logic_type == DataConditionGroup.Type.ALL
filters = DataCondition.objects.filter(condition_group=if_dcg)
assert filters.count() == 1
assert filters.filter(
type=Condition.LATEST_RELEASE,
comparison=True,
condition_result=True,
).exists()
dcg_actions = DataConditionGroupAction.objects.get(condition_group=if_dcg)
action = dcg_actions.action
assert action.type == Action.Type.PLUGIN # tested fully in test_migrate_rule_action.py
def test_update_issue_alert__none_match(self) -> None:
IssueAlertMigrator(self.issue_alert, self.user.id).run()
conditions_payload = [
{
"id": FirstSeenEventCondition.id,
},
{
"id": LatestReleaseFilter.id,
},
]
rule_data = self.issue_alert.data
rule_data.update(
{
"action_match": None,
"filter_match": None,
"conditions": conditions_payload,
"frequency": 60,
"actions": [
{
"id": "sentry.rules.actions.notify_event.NotifyEventAction",
"uuid": "test-uuid",
}
],
}
)
self.issue_alert.update(
label="hello world",
owner_user_id=self.user.id,
environment_id=self.environment.id,
data=rule_data,
)
update_migrated_issue_alert(self.issue_alert)
issue_alert_workflow = AlertRuleWorkflow.objects.get(rule_id=self.issue_alert.id)
workflow = Workflow.objects.get(id=issue_alert_workflow.workflow.id)
assert workflow.when_condition_group
assert workflow.when_condition_group.logic_type == DataConditionGroup.Type.ALL
if_dcg = WorkflowDataConditionGroup.objects.get(workflow=workflow).condition_group
assert if_dcg.logic_type == DataConditionGroup.Type.ALL
def test_update_issue_alert__with_conditions(self) -> None:
IssueAlertMigrator(self.issue_alert, self.user.id).run()
rule_data = self.issue_alert.data
rule_data.update(
{
"action_match": "none",
"filter_match": "all",
"conditions": self.conditions,
"frequency": 60,
"actions": [
{
"id": "sentry.rules.actions.notify_event.NotifyEventAction",
"uuid": "test-uuid",
}
],
}
)
self.issue_alert.update(
label="hello world",
owner_user_id=self.user.id,
environment_id=self.environment.id,
data=rule_data,
)
update_migrated_issue_alert(self.issue_alert)
assert DataCondition.objects.all().count() == 1
dc = DataCondition.objects.get(type=Condition.EVENT_UNIQUE_USER_FREQUENCY_COUNT)
assert dc.comparison == {
"interval": "1h",
"value": 50,
"filters": self.expected_filters,
}
def test_required_fields_only(self) -> None:
IssueAlertMigrator(self.issue_alert, self.user.id).run()
# None fields are not updated
rule_data = self.issue_alert.data
rule_data.update(
{
"action_match": "none",
"conditions": [],
"actions": [
{
"id": "sentry.rules.actions.notify_event.NotifyEventAction",
"uuid": "test-uuid",
}
],
}
)
self.issue_alert.update(
label="hello world",
owner_user_id=None,
owner_team_id=None,
environment_id=None,
data=rule_data,
)
update_migrated_issue_alert(self.issue_alert)
issue_alert_workflow = AlertRuleWorkflow.objects.get(rule_id=self.issue_alert.id)
workflow = Workflow.objects.get(id=issue_alert_workflow.workflow.id)
assert workflow.environment is None
assert workflow.owner_user_id is None
assert workflow.owner_team_id is None
assert workflow.config == {"frequency": 5} # not migrated
assert workflow.when_condition_group
assert workflow.when_condition_group.logic_type == DataConditionGroup.Type.NONE
conditions = DataCondition.objects.filter(condition_group=workflow.when_condition_group)
assert conditions.count() == 0
if_dcg = WorkflowDataConditionGroup.objects.get(workflow=workflow).condition_group
assert if_dcg.logic_type == DataConditionGroup.Type.ANY_SHORT_CIRCUIT
filters = DataCondition.objects.filter(condition_group=if_dcg)
assert filters.count() == 0
def test_invalid_frequency(self) -> None:
IssueAlertMigrator(self.issue_alert, self.user.id).run()
self.issue_alert.data["frequency"] = -1
self.issue_alert.save()
with pytest.raises(ValidationError):
update_migrated_issue_alert(self.issue_alert)
def test_keeps_snooze_status(self) -> None:
RuleSnooze.objects.create(rule=self.issue_alert)
workflow = IssueAlertMigrator(self.issue_alert, self.user.id).run()
assert workflow.enabled is False
self.issue_alert.data["frequency"] = 5
self.issue_alert.save()
update_migrated_issue_alert(self.issue_alert)
workflow.refresh_from_db()
assert workflow.enabled is False
|
IssueAlertDualWriteUpdateTest
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/super2.py
|
{
"start": 1137,
"end": 1261
}
|
class ____(CChild, D):
def __init__(self, name: str, num: int) -> None:
super(C, self).__init__(name, num)
|
DChild1
|
python
|
ray-project__ray
|
rllib/core/models/torch/encoder.py
|
{
"start": 6931,
"end": 10095
}
|
class ____(TorchModel, Encoder):
"""A recurrent LSTM encoder.
This encoder has...
- Zero or one tokenizers.
- One or more LSTM layers.
"""
def __init__(self, config: RecurrentEncoderConfig) -> None:
TorchModel.__init__(self, config)
# Maybe create a tokenizer
if config.tokenizer_config is not None:
self.tokenizer = config.tokenizer_config.build(framework="torch")
lstm_input_dims = config.tokenizer_config.output_dims
else:
self.tokenizer = None
lstm_input_dims = config.input_dims
# We only support 1D spaces right now.
assert len(lstm_input_dims) == 1
lstm_input_dim = lstm_input_dims[0]
lstm_weights_initializer = get_initializer_fn(
config.hidden_weights_initializer, framework="torch"
)
lstm_bias_initializer = get_initializer_fn(
config.hidden_bias_initializer, framework="torch"
)
# Create the torch LSTM layer.
self.lstm = nn.LSTM(
lstm_input_dim,
config.hidden_dim,
config.num_layers,
batch_first=config.batch_major,
bias=config.use_bias,
)
# Initialize LSTM layer weigths and biases, if necessary.
for layer in self.lstm.all_weights:
if lstm_weights_initializer:
lstm_weights_initializer(
layer[0], **config.hidden_weights_initializer_config or {}
)
lstm_weights_initializer(
layer[1], **config.hidden_weights_initializer_config or {}
)
if lstm_bias_initializer:
lstm_bias_initializer(
layer[2], **config.hidden_bias_initializer_config or {}
)
lstm_bias_initializer(
layer[3], **config.hidden_bias_initializer_config or {}
)
@override(Model)
def get_initial_state(self):
return {
"h": torch.zeros(self.config.num_layers, self.config.hidden_dim),
"c": torch.zeros(self.config.num_layers, self.config.hidden_dim),
}
@override(Model)
def _forward(self, inputs: dict, **kwargs) -> dict:
outputs = {}
if self.tokenizer is not None:
# Push observations through the tokenizer encoder if we built one.
out = tokenize(self.tokenizer, inputs, framework="torch")
else:
# Otherwise, just use the raw observations.
out = inputs[Columns.OBS].float()
# States are batch-first when coming in. Make them layers-first.
states_in = tree.map_structure(
lambda s: s.transpose(0, 1), inputs[Columns.STATE_IN]
)
out, states_out = self.lstm(out, (states_in["h"], states_in["c"]))
states_out = {"h": states_out[0], "c": states_out[1]}
# Insert them into the output dict.
outputs[ENCODER_OUT] = out
outputs[Columns.STATE_OUT] = tree.map_structure(
lambda s: s.transpose(0, 1), states_out
)
return outputs
|
TorchLSTMEncoder
|
python
|
sympy__sympy
|
sympy/physics/quantum/tests/test_operator.py
|
{
"start": 1056,
"end": 1147
}
|
class ____(Ket):
@classmethod
def default_args(self):
return ("t",)
|
CustomKet
|
python
|
apache__airflow
|
providers/apache/hive/src/airflow/providers/apache/hive/transfers/hive_to_samba.py
|
{
"start": 1301,
"end": 3003
}
|
class ____(BaseOperator):
"""
Execute hql code in a specific Hive database and load the results as a csv to a Samba location.
:param hql: the hql to be exported. (templated)
:param destination_filepath: the file path to where the file will be pushed onto samba
:param samba_conn_id: reference to the samba destination
:param hiveserver2_conn_id: Reference to the
:ref: `Hive Server2 thrift service connection id <howto/connection:hiveserver2>`.
"""
template_fields: Sequence[str] = ("hql", "destination_filepath")
template_ext: Sequence[str] = (
".hql",
".sql",
)
template_fields_renderers = {"hql": "hql"}
def __init__(
self,
*,
hql: str,
destination_filepath: str,
samba_conn_id: str = "samba_default",
hiveserver2_conn_id: str = "hiveserver2_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.hiveserver2_conn_id = hiveserver2_conn_id
self.samba_conn_id = samba_conn_id
self.destination_filepath = destination_filepath
self.hql = hql
def execute(self, context: Context):
self.hql = self.hql.strip().rstrip(";")
with NamedTemporaryFile() as tmp_file:
self.log.info("Fetching file from Hive")
hive = HiveServer2Hook(hiveserver2_conn_id=self.hiveserver2_conn_id)
hive.to_csv(self.hql, csv_filepath=tmp_file.name, hive_conf=context_to_airflow_vars(context))
self.log.info("Pushing to samba")
samba = SambaHook(samba_conn_id=self.samba_conn_id)
samba.push_from_local(self.destination_filepath, tmp_file.name)
|
HiveToSambaOperator
|
python
|
doocs__leetcode
|
lcof2/剑指 Offer II 075. 数组相对排序/Solution2.py
|
{
"start": 0,
"end": 354
}
|
class ____:
def relativeSortArray(self, arr1: List[int], arr2: List[int]) -> List[int]:
cnt = Counter(arr1)
ans = []
for x in arr2:
ans.extend([x] * cnt[x])
cnt.pop(x)
mi, mx = min(arr1), max(arr1)
for x in range(mi, mx + 1):
ans.extend([x] * cnt[x])
return ans
|
Solution
|
python
|
huggingface__transformers
|
src/transformers/models/poolformer/modeling_poolformer.py
|
{
"start": 11605,
"end": 12003
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
def forward(self, hidden_states):
output = self.dense(hidden_states)
return output
@auto_docstring(
custom_intro="""
PoolFormer Model transformer with an image classification head on top
"""
)
|
PoolFormerFinalPooler
|
python
|
ipython__ipython
|
IPython/terminal/shortcuts/__init__.py
|
{
"start": 1247,
"end": 18584
}
|
class ____(BaseBinding):
# while filter could be created by referencing variables directly (rather
# than created from strings), by using strings we ensure that users will
# be able to create filters in configuration (e.g. JSON) files too, which
# also benefits the documentation by enforcing human-readable filter names.
condition: Optional[str] = None
def __post_init__(self):
if self.condition:
self.filter = filter_from_string(self.condition)
else:
self.filter = None
def create_identifier(handler: Callable):
parts = handler.__module__.split(".")
name = handler.__name__
package = parts[0]
if len(parts) > 1:
final_module = parts[-1]
return f"{package}:{final_module}.{name}"
else:
return f"{package}:{name}"
AUTO_MATCH_BINDINGS = [
*[
Binding(
cmd, [key], "focused_insert & auto_match & followed_by_closing_paren_or_end"
)
for key, cmd in match.auto_match_parens.items()
],
*[
# raw string
Binding(cmd, [key], "focused_insert & auto_match & preceded_by_raw_str_prefix")
for key, cmd in match.auto_match_parens_raw_string.items()
],
Binding(
match.double_quote,
['"'],
"focused_insert"
" & auto_match"
" & not_inside_unclosed_string"
" & preceded_by_paired_double_quotes"
" & followed_by_closing_paren_or_end",
),
Binding(
match.single_quote,
["'"],
"focused_insert"
" & auto_match"
" & not_inside_unclosed_string"
" & preceded_by_paired_single_quotes"
" & followed_by_closing_paren_or_end",
),
Binding(
match.docstring_double_quotes,
['"'],
"focused_insert"
" & auto_match"
" & not_inside_unclosed_string"
" & preceded_by_two_double_quotes",
),
Binding(
match.docstring_single_quotes,
["'"],
"focused_insert"
" & auto_match"
" & not_inside_unclosed_string"
" & preceded_by_two_single_quotes",
),
Binding(
match.skip_over,
[")"],
"focused_insert & auto_match & followed_by_closing_round_paren",
),
Binding(
match.skip_over,
["]"],
"focused_insert & auto_match & followed_by_closing_bracket",
),
Binding(
match.skip_over,
["}"],
"focused_insert & auto_match & followed_by_closing_brace",
),
Binding(
match.skip_over, ['"'], "focused_insert & auto_match & followed_by_double_quote"
),
Binding(
match.skip_over, ["'"], "focused_insert & auto_match & followed_by_single_quote"
),
Binding(
match.delete_pair,
["backspace"],
"focused_insert"
" & preceded_by_opening_round_paren"
" & auto_match"
" & followed_by_closing_round_paren",
),
Binding(
match.delete_pair,
["backspace"],
"focused_insert"
" & preceded_by_opening_bracket"
" & auto_match"
" & followed_by_closing_bracket",
),
Binding(
match.delete_pair,
["backspace"],
"focused_insert"
" & preceded_by_opening_brace"
" & auto_match"
" & followed_by_closing_brace",
),
Binding(
match.delete_pair,
["backspace"],
"focused_insert"
" & preceded_by_double_quote"
" & auto_match"
" & followed_by_double_quote",
),
Binding(
match.delete_pair,
["backspace"],
"focused_insert"
" & preceded_by_single_quote"
" & auto_match"
" & followed_by_single_quote",
),
]
AUTO_SUGGEST_BINDINGS = [
# there are two reasons for re-defining bindings defined upstream:
# 1) prompt-toolkit does not execute autosuggestion bindings in vi mode,
# 2) prompt-toolkit checks if we are at the end of text, not end of line
# hence it does not work in multi-line mode of navigable provider
Binding(
auto_suggest.accept_or_jump_to_end,
["end"],
"has_suggestion & default_buffer_focused & emacs_like_insert_mode",
),
Binding(
auto_suggest.accept_or_jump_to_end,
["c-e"],
"has_suggestion & default_buffer_focused & emacs_like_insert_mode",
),
Binding(
auto_suggest.accept,
["c-f"],
"has_suggestion & default_buffer_focused & emacs_like_insert_mode",
),
Binding(
auto_suggest.accept,
["right"],
"has_suggestion & default_buffer_focused & emacs_like_insert_mode & is_cursor_at_the_end_of_line",
),
Binding(
auto_suggest.accept_word,
["escape", "f"],
"has_suggestion & default_buffer_focused & emacs_like_insert_mode",
),
Binding(
auto_suggest.accept_token,
["c-right"],
"has_suggestion & default_buffer_focused & emacs_like_insert_mode",
),
Binding(
auto_suggest.discard,
["escape"],
# note this one is using `emacs_insert_mode`, not `emacs_like_insert_mode`
# as in `vi_insert_mode` we do not want `escape` to be shadowed (ever).
"has_suggestion & default_buffer_focused & emacs_insert_mode",
),
Binding(
auto_suggest.discard,
["delete"],
"has_suggestion & default_buffer_focused & emacs_insert_mode",
),
Binding(
auto_suggest.swap_autosuggestion_up,
["c-up"],
"navigable_suggestions"
" & ~has_line_above"
" & has_suggestion"
" & default_buffer_focused",
),
Binding(
auto_suggest.swap_autosuggestion_down,
["c-down"],
"navigable_suggestions"
" & ~has_line_below"
" & has_suggestion"
" & default_buffer_focused",
),
Binding(
auto_suggest.up_and_update_hint,
["c-up"],
"has_line_above & navigable_suggestions & default_buffer_focused",
),
Binding(
auto_suggest.down_and_update_hint,
["c-down"],
"has_line_below & navigable_suggestions & default_buffer_focused",
),
Binding(
auto_suggest.accept_character,
["escape", "right"],
"has_suggestion & default_buffer_focused & emacs_like_insert_mode",
),
Binding(
auto_suggest.accept_and_move_cursor_left,
["c-left"],
"has_suggestion & default_buffer_focused & emacs_like_insert_mode",
),
Binding(
auto_suggest.accept_and_keep_cursor,
["escape", "down"],
"has_suggestion & default_buffer_focused & emacs_insert_mode",
),
Binding(
auto_suggest.backspace_and_resume_hint,
["backspace"],
# no `has_suggestion` here to allow resuming if no suggestion
"default_buffer_focused & emacs_like_insert_mode",
),
Binding(
auto_suggest.resume_hinting,
["right"],
"is_cursor_at_the_end_of_line"
" & default_buffer_focused"
" & emacs_like_insert_mode"
" & pass_through",
),
]
SIMPLE_CONTROL_BINDINGS = [
Binding(cmd, [key], "vi_insert_mode & default_buffer_focused & ebivim")
for key, cmd in {
"c-a": nc.beginning_of_line,
"c-b": nc.backward_char,
"c-k": nc.kill_line,
"c-w": nc.backward_kill_word,
"c-y": nc.yank,
"c-_": nc.undo,
}.items()
]
ALT_AND_COMOBO_CONTROL_BINDINGS = [
Binding(cmd, list(keys), "vi_insert_mode & default_buffer_focused & ebivim")
for keys, cmd in {
# Control Combos
("c-x", "c-e"): nc.edit_and_execute,
("c-x", "e"): nc.edit_and_execute,
# Alt
("escape", "b"): nc.backward_word,
("escape", "c"): nc.capitalize_word,
("escape", "d"): nc.kill_word,
("escape", "h"): nc.backward_kill_word,
("escape", "l"): nc.downcase_word,
("escape", "u"): nc.uppercase_word,
("escape", "y"): nc.yank_pop,
("escape", "."): nc.yank_last_arg,
}.items()
]
def add_binding(bindings: KeyBindings, binding: Binding):
bindings.add(
*binding.keys,
**({"filter": binding.filter} if binding.filter is not None else {}),
)(binding.command)
def create_ipython_shortcuts(shell, skip=None) -> KeyBindings:
"""Set up the prompt_toolkit keyboard shortcuts for IPython.
Parameters
----------
shell: InteractiveShell
The current IPython shell Instance
skip: List[Binding]
Bindings to skip.
Returns
-------
KeyBindings
the keybinding instance for prompt toolkit.
"""
kb = KeyBindings()
skip = skip or []
for binding in KEY_BINDINGS:
skip_this_one = False
for to_skip in skip:
if (
to_skip.command == binding.command
and to_skip.filter == binding.filter
and to_skip.keys == binding.keys
):
skip_this_one = True
break
if skip_this_one:
continue
add_binding(kb, binding)
def get_input_mode(self):
app = get_app()
app.ttimeoutlen = shell.ttimeoutlen
app.timeoutlen = shell.timeoutlen
return self._input_mode
def set_input_mode(self, mode):
shape = {InputMode.NAVIGATION: 2, InputMode.REPLACE: 4}.get(mode, 6)
cursor = "\x1b[{} q".format(shape)
sys.stdout.write(cursor)
sys.stdout.flush()
self._input_mode = mode
if shell.editing_mode == "vi" and shell.modal_cursor:
ViState._input_mode = InputMode.INSERT # type: ignore
ViState.input_mode = property(get_input_mode, set_input_mode) # type: ignore
return kb
def reformat_and_execute(event):
"""Reformat code and execute it"""
shell = get_ipython()
reformat_text_before_cursor(
event.current_buffer, event.current_buffer.document, shell
)
event.current_buffer.validate_and_handle()
def reformat_text_before_cursor(buffer, document, shell):
text = buffer.delete_before_cursor(len(document.text[: document.cursor_position]))
try:
formatted_text = shell.reformat_handler(text)
buffer.insert_text(formatted_text)
except Exception as e:
buffer.insert_text(text)
def handle_return_or_newline_or_execute(event):
shell = get_ipython()
if getattr(shell, "handle_return", None):
return shell.handle_return(shell)(event)
else:
return newline_or_execute_outer(shell)(event)
def newline_or_execute_outer(shell):
def newline_or_execute(event):
"""When the user presses return, insert a newline or execute the code."""
b = event.current_buffer
d = b.document
if b.complete_state:
cc = b.complete_state.current_completion
if cc:
b.apply_completion(cc)
else:
b.cancel_completion()
return
# If there's only one line, treat it as if the cursor is at the end.
# See https://github.com/ipython/ipython/issues/10425
if d.line_count == 1:
check_text = d.text
else:
check_text = d.text[: d.cursor_position]
status, indent = shell.check_complete(check_text)
# if all we have after the cursor is whitespace: reformat current text
# before cursor
after_cursor = d.text[d.cursor_position :]
reformatted = False
if not after_cursor.strip():
reformat_text_before_cursor(b, d, shell)
reformatted = True
if not (
d.on_last_line
or d.cursor_position_row >= d.line_count - d.empty_line_count_at_the_end()
):
if shell.autoindent:
b.insert_text("\n" + indent)
else:
b.insert_text("\n")
return
if (status != "incomplete") and b.accept_handler:
if not reformatted:
reformat_text_before_cursor(b, d, shell)
b.validate_and_handle()
else:
if shell.autoindent:
b.insert_text("\n" + indent)
else:
b.insert_text("\n")
return newline_or_execute
def previous_history_or_previous_completion(event):
"""
Control-P in vi edit mode on readline is history next, unlike default prompt toolkit.
If completer is open this still select previous completion.
"""
event.current_buffer.auto_up()
def next_history_or_next_completion(event):
"""
Control-N in vi edit mode on readline is history previous, unlike default prompt toolkit.
If completer is open this still select next completion.
"""
event.current_buffer.auto_down()
def dismiss_completion(event):
"""Dismiss completion"""
b = event.current_buffer
if b.complete_state:
b.cancel_completion()
def reset_buffer(event):
"""Reset buffer"""
b = event.current_buffer
if b.complete_state:
b.cancel_completion()
else:
b.reset()
def reset_search_buffer(event):
"""Reset search buffer"""
if event.current_buffer.document.text:
event.current_buffer.reset()
else:
event.app.layout.focus(DEFAULT_BUFFER)
def suspend_to_bg(event):
"""Suspend to background"""
event.app.suspend_to_background()
def quit(event):
"""
Quit application with ``SIGQUIT`` if supported or ``sys.exit`` otherwise.
On platforms that support SIGQUIT, send SIGQUIT to the current process.
On other platforms, just exit the process with a message.
"""
sigquit = getattr(signal, "SIGQUIT", None)
if sigquit is not None:
os.kill(0, signal.SIGQUIT)
else:
sys.exit("Quit")
def indent_buffer(event):
"""Indent buffer"""
event.current_buffer.insert_text(" " * 4)
def newline_autoindent(event):
"""Insert a newline after the cursor indented appropriately.
Fancier version of former ``newline_with_copy_margin`` which should
compute the correct indentation of the inserted line. That is to say, indent
by 4 extra space after a function definition, class definition, context
manager... And dedent by 4 space after ``pass``, ``return``, ``raise ...``.
"""
shell = get_ipython()
inputsplitter = shell.input_transformer_manager
b = event.current_buffer
d = b.document
if b.complete_state:
b.cancel_completion()
text = d.text[: d.cursor_position] + "\n"
_, indent = inputsplitter.check_complete(text)
b.insert_text("\n" + (" " * (indent or 0)), move_cursor=False)
def open_input_in_editor(event):
"""Open code from input in external editor"""
event.app.current_buffer.open_in_editor()
if sys.platform == "win32":
from IPython.core.error import TryNext
from IPython.lib.clipboard import (
ClipboardEmpty,
tkinter_clipboard_get,
win32_clipboard_get,
)
@undoc
def win_paste(event):
try:
text = win32_clipboard_get()
except TryNext:
try:
text = tkinter_clipboard_get()
except (TryNext, ClipboardEmpty):
return
except ClipboardEmpty:
return
event.current_buffer.insert_text(text.replace("\t", " " * 4))
else:
@undoc
def win_paste(event):
"""Stub used on other platforms"""
pass
KEY_BINDINGS = [
Binding(
handle_return_or_newline_or_execute,
["enter"],
"default_buffer_focused & ~has_selection & insert_mode",
),
Binding(
reformat_and_execute,
["escape", "enter"],
"default_buffer_focused & ~has_selection & insert_mode & ebivim",
),
Binding(quit, ["c-\\"]),
Binding(
previous_history_or_previous_completion,
["c-p"],
"vi_insert_mode & default_buffer_focused",
),
Binding(
next_history_or_next_completion,
["c-n"],
"vi_insert_mode & default_buffer_focused",
),
Binding(dismiss_completion, ["c-g"], "default_buffer_focused & has_completions"),
Binding(reset_buffer, ["c-c"], "default_buffer_focused"),
Binding(reset_search_buffer, ["c-c"], "search_buffer_focused"),
Binding(suspend_to_bg, ["c-z"], "supports_suspend"),
Binding(
indent_buffer,
["tab"], # Ctrl+I == Tab
"default_buffer_focused & ~has_selection & insert_mode & cursor_in_leading_ws",
),
Binding(newline_autoindent, ["c-o"], "default_buffer_focused & emacs_insert_mode"),
Binding(open_input_in_editor, ["f2"], "default_buffer_focused"),
*AUTO_MATCH_BINDINGS,
*AUTO_SUGGEST_BINDINGS,
Binding(
display_completions_like_readline,
["c-i"],
"readline_like_completions"
" & default_buffer_focused"
" & ~has_selection"
" & insert_mode"
" & ~cursor_in_leading_ws",
),
Binding(win_paste, ["c-v"], "default_buffer_focused & ~vi_mode & is_windows_os"),
*SIMPLE_CONTROL_BINDINGS,
*ALT_AND_COMOBO_CONTROL_BINDINGS,
]
UNASSIGNED_ALLOWED_COMMANDS = [
auto_suggest.llm_autosuggestion,
nc.beginning_of_buffer,
nc.end_of_buffer,
nc.end_of_line,
nc.forward_char,
nc.forward_word,
nc.unix_line_discard,
]
|
Binding
|
python
|
ray-project__ray
|
rllib/examples/learners/classes/intrinsic_curiosity_learners.py
|
{
"start": 1067,
"end": 3664
}
|
class ____(PPOTorchLearner):
def build(self) -> None:
super().build()
add_intrinsic_curiosity_connectors(self)
def add_intrinsic_curiosity_connectors(torch_learner: TorchLearner) -> None:
"""Adds two connector pieces to the Learner pipeline, needed for ICM training.
- The `AddNextObservationsFromEpisodesToTrainBatch` connector makes sure the train
batch contains the NEXT_OBS for ICM's forward- and inverse dynamics net training.
- The `IntrinsicCuriosityModelConnector` piece computes intrinsic rewards from the
ICM and adds the results to the extrinsic reward of the main module's train batch.
Args:
torch_learner: The TorchLearner, to whose Learner pipeline the two ICM connector
pieces should be added.
"""
learner_config_dict = torch_learner.config.learner_config_dict
# Assert, we are only training one policy (RLModule) and we have the ICM
# in our MultiRLModule.
assert (
len(torch_learner.module) == 2
and DEFAULT_MODULE_ID in torch_learner.module
and ICM_MODULE_ID in torch_learner.module
)
# Make sure both curiosity loss settings are explicitly set in the
# `learner_config_dict`.
if (
"forward_loss_weight" not in learner_config_dict
or "intrinsic_reward_coeff" not in learner_config_dict
):
raise KeyError(
"When using the IntrinsicCuriosityTorchLearner, both `forward_loss_weight` "
" and `intrinsic_reward_coeff` must be part of your config's "
"`learner_config_dict`! Add these values through: `config.training("
"learner_config_dict={'forward_loss_weight': .., 'intrinsic_reward_coeff': "
"..})`."
)
if torch_learner.config.add_default_connectors_to_learner_pipeline:
# Prepend a "add-NEXT_OBS-from-episodes-to-train-batch" connector piece
# (right after the corresponding "add-OBS-..." default piece).
torch_learner._learner_connector.insert_after(
AddObservationsFromEpisodesToBatch,
AddNextObservationsFromEpisodesToTrainBatch(),
)
# Append the ICM connector, computing intrinsic rewards and adding these to
# the main model's extrinsic rewards.
torch_learner._learner_connector.insert_after(
NumpyToTensor,
IntrinsicCuriosityModelConnector(
intrinsic_reward_coeff=(
torch_learner.config.learner_config_dict["intrinsic_reward_coeff"]
)
),
)
|
PPOTorchLearnerWithCuriosity
|
python
|
walkccc__LeetCode
|
solutions/2815. Max Pair Sum in an Array/2815.py
|
{
"start": 0,
"end": 518
}
|
class ____:
def maxSum(self, nums: list[int]) -> int:
ans = 0
# maxNum[i] := the maximum num we met so far with the maximum digit i
maxNum = [0] * 10
def getMaxDigit(num: int) -> int:
maxDigit = 0
while num > 0:
maxDigit = max(maxDigit, num % 10)
num //= 10
return maxDigit
for num in nums:
d = getMaxDigit(num)
if maxNum[d] > 0:
ans = max(ans, num + maxNum[d])
maxNum[d] = max(maxNum[d], num)
return -1 if ans == 0 else ans
|
Solution
|
python
|
conda__conda
|
conda/plugins/prefix_data_loaders/pypi/pkg_format.py
|
{
"start": 2127,
"end": 15087
}
|
class ____:
"""Base object describing a python distribution based on path to anchor file."""
MANIFEST_FILES = () # Only one is used, but many names available
REQUIRES_FILES = () # Only one is used, but many names available
MANDATORY_FILES = ()
ENTRY_POINTS_FILES = ("entry_points.txt",)
@staticmethod
def init(prefix_path, anchor_file, python_version):
if anchor_file.endswith(".egg-link"):
return PythonEggLinkDistribution(prefix_path, anchor_file, python_version)
elif ".dist-info" in anchor_file:
return PythonInstalledDistribution(prefix_path, anchor_file, python_version)
elif anchor_file.endswith(".egg-info"):
anchor_full_path = join(prefix_path, win_path_ok(anchor_file))
sp_reference = basename(anchor_file)
return PythonEggInfoDistribution(
anchor_full_path, python_version, sp_reference
)
elif ".egg-info" in anchor_file:
anchor_full_path = join(prefix_path, win_path_ok(dirname(anchor_file)))
sp_reference = basename(dirname(anchor_file))
return PythonEggInfoDistribution(
anchor_full_path, python_version, sp_reference
)
elif ".egg" in anchor_file:
anchor_full_path = join(prefix_path, win_path_ok(dirname(anchor_file)))
sp_reference = basename(dirname(anchor_file))
return PythonEggInfoDistribution(
anchor_full_path, python_version, sp_reference
)
else:
raise NotImplementedError()
def __init__(self, anchor_full_path, python_version):
# Don't call PythonDistribution directly. Use the init() static method.
self.anchor_full_path = anchor_full_path
self.python_version = python_version
if anchor_full_path and isfile(anchor_full_path):
self._metadata_dir_full_path = dirname(anchor_full_path)
elif anchor_full_path and isdir(anchor_full_path):
self._metadata_dir_full_path = anchor_full_path
else:
raise RuntimeError(f"Path not found: {anchor_full_path}")
self._check_files()
self._metadata = PythonDistributionMetadata(anchor_full_path)
self._provides_file_data = ()
self._requires_file_data = ()
def _check_files(self):
"""Check the existence of mandatory files for a given distribution."""
for fname in self.MANDATORY_FILES:
if self._metadata_dir_full_path:
fpath = join(self._metadata_dir_full_path, fname)
if not isfile(fpath):
raise OSError(ENOENT, strerror(ENOENT), fpath)
def _check_path_data(self, path, checksum, size):
"""Normalizes record data content and format."""
if checksum:
if not checksum.startswith("sha256="):
raise ValueError(
f"Invalid checksum {checksum} at {path}. "
f"Check {self._metadata_dir_full_path}."
)
checksum = checksum[7:]
else:
checksum = None
size = int(size) if size else None
return path, checksum, size
@staticmethod
def _parse_requires_file_data(data, global_section="__global__"):
# https://setuptools.readthedocs.io/en/latest/formats.html#requires-txt
requires = {}
lines = [line.strip() for line in data.split("\n") if line]
if lines and not (lines[0].startswith("[") and lines[0].endswith("]")):
# Add dummy section for unsectioned items
lines = [f"[{global_section}]"] + lines
# Parse sections
for line in lines:
if line.startswith("[") and line.endswith("]"):
section = line.strip()[1:-1]
requires[section] = []
continue
if line.strip():
requires[section].append(line.strip())
# Adapt to *standard* requirements (add env markers to requirements)
reqs = []
extras = []
for section, values in requires.items():
if section == global_section:
# This is the global section (same as dist_requires)
reqs.extend(values)
elif section.startswith(":"):
# The section is used as a marker
# Example: ":python_version < '3'"
marker = section.replace(":", "; ")
new_values = [v + marker for v in values]
reqs.extend(new_values)
else:
# The section is an extra, i.e. "docs", or "tests"...
extras.append(section)
marker = f'; extra == "{section}"'
new_values = [v + marker for v in values]
reqs.extend(new_values)
return frozenset(reqs), extras
@staticmethod
def _parse_entries_file_data(data):
# https://setuptools.readthedocs.io/en/latest/formats.html#entry-points-txt-entry-point-plugin-metadata
# FIXME: Use pkg_resources which provides API for this?
entries_data = {}
config = ConfigParser()
config.optionxform = lambda x: x # Avoid lowercasing keys
try:
do_read = config.read_file
except AttributeError:
do_read = config.readfp
do_read(StringIO(data))
for section in config.sections():
entries_data[section] = dict(config.items(section))
return entries_data
def _load_requires_provides_file(self):
# https://setuptools.readthedocs.io/en/latest/formats.html#requires-txt
# FIXME: Use pkg_resources which provides API for this?
requires, extras = None, None
for fname in self.REQUIRES_FILES:
fpath = join(self._metadata_dir_full_path, fname)
if isfile(fpath):
with open_utf8(fpath) as fh:
data = fh.read()
requires, extras = self._parse_requires_file_data(data)
self._provides_file_data = extras
self._requires_file_data = requires
break
return requires, extras
@memoizedproperty
def manifest_full_path(self):
manifest_full_path = None
if self._metadata_dir_full_path:
for fname in self.MANIFEST_FILES:
manifest_full_path = join(self._metadata_dir_full_path, fname)
if isfile(manifest_full_path):
break
return manifest_full_path
def get_paths(self):
"""
Read the list of installed paths from record or source file.
Example
-------
[(u'skdata/__init__.py', u'sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU', 0),
(u'skdata/diabetes.py', None, None),
...
]
"""
manifest_full_path = self.manifest_full_path
if manifest_full_path:
python_version = self.python_version
sp_dir = get_python_site_packages_short_path(python_version) + "/"
prepend_metadata_dirname = (
basename(manifest_full_path) == "installed-files.txt"
)
if prepend_metadata_dirname:
path_prepender = basename(dirname(manifest_full_path)) + "/"
else:
path_prepender = ""
def process_csv_row(reader):
seen = []
records = []
for row in reader:
cleaned_path = posix_normpath(f"{sp_dir}{path_prepender}{row[0]}")
if len(row) == 3:
checksum, size = row[1:]
if checksum:
if not checksum.startswith("sha256="):
raise ValueError(
f"Invalid checksum {checksum} at {cleaned_path}. "
f"Check {self._metadata_dir_full_path}."
)
checksum = checksum[7:]
else:
checksum = None
size = int(size) if size else None
else:
checksum = size = None
if cleaned_path not in seen and row[0]:
seen.append(cleaned_path)
records.append((cleaned_path, checksum, size))
return tuple(records)
csv_delimiter = ","
with open_utf8(manifest_full_path) as csvfile:
record_reader = csv_reader(csvfile, delimiter=csv_delimiter)
# format of each record is (path, checksum, size)
records = process_csv_row(record_reader)
files_set = {record[0] for record in records}
_pyc_path, _py_file_re = pyc_path, PY_FILE_RE
py_ver_mm = get_major_minor_version(python_version, with_dot=False)
missing_pyc_files = (
ff
for ff in (
_pyc_path(f, py_ver_mm) for f in files_set if _py_file_re.match(f)
)
if ff not in files_set
)
records = sorted(
(*records, *((pf, None, None) for pf in missing_pyc_files))
)
return records
return []
def get_dist_requirements(self):
# FIXME: On some packages, requirements are not added to metadata,
# but on a separate requires.txt, see: python setup.py develop for
# anaconda-client. This is setuptools behavior.
# TODO: what is the dependency_links.txt on the same example?
data = self._metadata.get_dist_requirements()
if self._requires_file_data:
data = self._requires_file_data
elif not data:
self._load_requires_provides_file()
data = self._requires_file_data
return data
def get_python_requirements(self):
return self._metadata.get_python_requirements()
def get_external_requirements(self):
return self._metadata.get_external_requirements()
def get_extra_provides(self):
# FIXME: On some packages, requirements are not added to metadata,
# but on a separate requires.txt, see: python setup.py develop for
# anaconda-client. This is setuptools behavior.
data = self._metadata.get_extra_provides()
if self._provides_file_data:
data = self._provides_file_data
elif not data:
self._load_requires_provides_file()
data = self._provides_file_data
return data
def get_conda_dependencies(self):
"""
Process metadata fields providing dependency information.
This includes normalizing fields, and evaluating environment markers.
"""
python_spec = "python {}.*".format(".".join(self.python_version.split(".")[:2]))
def pyspec_to_norm_req(pyspec):
conda_name = pypi_name_to_conda_name(norm_package_name(pyspec.name))
return (
f"{conda_name} {pyspec.constraints}"
if pyspec.constraints
else conda_name
)
reqs = self.get_dist_requirements()
pyspecs = tuple(parse_specification(req) for req in reqs)
marker_groups = groupby(lambda ps: ps.marker.split("==", 1)[0].strip(), pyspecs)
depends = {pyspec_to_norm_req(pyspec) for pyspec in marker_groups.pop("", ())}
extras = marker_groups.pop("extra", ())
execution_context = {
"python_version": self.python_version,
}
depends.update(
pyspec_to_norm_req(pyspec)
for pyspec in chain.from_iterable(marker_groups.values())
if interpret(pyspec.marker, execution_context)
)
constrains = {
pyspec_to_norm_req(pyspec) for pyspec in extras if pyspec.constraints
}
depends.add(python_spec)
return sorted(depends), sorted(constrains)
def get_optional_dependencies(self):
raise NotImplementedError
def get_entry_points(self):
# TODO: need to add entry points, "exports," and other files that might
# not be in RECORD
for fname in self.ENTRY_POINTS_FILES:
fpath = join(self._metadata_dir_full_path, fname)
if isfile(fpath):
with open_utf8(fpath) as fh:
data = fh.read()
return self._parse_entries_file_data(data)
@property
def name(self):
return self._metadata.name
@property
def norm_name(self):
return norm_package_name(self.name)
@property
def conda_name(self):
return pypi_name_to_conda_name(self.norm_name)
@property
def version(self):
return self._metadata.version
|
PythonDistribution
|
python
|
huggingface__transformers
|
tests/models/lightglue/test_modeling_lightglue.py
|
{
"start": 4443,
"end": 11984
}
|
class ____(ModelTesterMixin, unittest.TestCase):
all_model_classes = (LightGlueForKeypointMatching,) if is_torch_available() else ()
all_generative_model_classes = () if is_torch_available() else ()
test_resize_embeddings = False
has_attentions = True
def setUp(self):
self.model_tester = LightGlueModelTester(self)
self.config_tester = ConfigTester(self, config_class=LightGlueConfig, has_text_modality=False, hidden_size=37)
def test_config(self):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def test_batching_equivalence(self, atol=1e-5, rtol=1e-5):
device_properties = get_device_properties()
if device_properties[0] == "cuda" and device_properties[1] == 8:
# TODO: (ydshieh) fix this
self.skipTest(reason="After switching to A10, this test always fails, but pass on CPU or T4.")
super().test_batching_equivalence(atol=atol, rtol=rtol)
@unittest.skip(reason="LightGlueForKeypointMatching does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="LightGlueForKeypointMatching does not support input and output embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="LightGlueForKeypointMatching does not use feedforward chunking")
def test_feed_forward_chunking(self):
pass
@unittest.skip(reason="LightGlueForKeypointMatching is not trainable")
def test_training(self):
pass
@unittest.skip(reason="LightGlueForKeypointMatching is not trainable")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(reason="LightGlueForKeypointMatching is not trainable")
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(reason="LightGlueForKeypointMatching is not trainable")
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="LightGlue does not output any loss term in the forward pass")
def test_retain_grad_hidden_states_attentions(self):
pass
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
maximum_num_matches = outputs.mask.shape[-1]
hidden_states_sizes = [
self.model_tester.descriptor_dim,
self.model_tester.descriptor_dim,
self.model_tester.descriptor_dim * 2,
self.model_tester.descriptor_dim,
self.model_tester.descriptor_dim,
self.model_tester.descriptor_dim * 2,
self.model_tester.descriptor_dim,
] * self.model_tester.num_layers
for i, hidden_states_size in enumerate(hidden_states_sizes):
self.assertListEqual(
list(hidden_states[i].shape[-2:]),
[maximum_num_matches, hidden_states_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_attention_outputs(self):
def check_attention_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
maximum_num_matches = outputs.mask.shape[-1]
expected_attention_shape = [self.model_tester.num_heads, maximum_num_matches, maximum_num_matches]
for i, attention in enumerate(attentions):
self.assertListEqual(
list(attention.shape[-3:]),
expected_attention_shape,
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
check_attention_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
check_attention_output(inputs_dict, config, model_class)
@slow
def test_model_from_pretrained(self):
from_pretrained_ids = ["ETH-CVG/lightglue_superpoint"]
for model_name in from_pretrained_ids:
model = LightGlueForKeypointMatching.from_pretrained(model_name)
self.assertIsNotNone(model)
# Copied from tests.models.superglue.test_modeling_superglue.SuperGlueModelTest.test_forward_labels_should_be_none
def test_forward_labels_should_be_none(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
model_inputs = self._prepare_for_class(inputs_dict, model_class)
# Provide an arbitrary sized Tensor as labels to model inputs
model_inputs["labels"] = torch.rand((128, 128))
with self.assertRaises(ValueError) as cm:
model(**model_inputs)
self.assertEqual(ValueError, cm.exception.__class__)
def prepare_imgs():
dataset = load_dataset("hf-internal-testing/image-matching-test-dataset", split="train")
image0 = dataset[0]["image"]
image1 = dataset[1]["image"]
image2 = dataset[2]["image"]
# [image1, image1] on purpose to test the model early stopping
return [[image2, image0], [image1, image1]]
@require_torch
@require_vision
|
LightGlueModelTest
|
python
|
django__django
|
django/contrib/auth/management/commands/createsuperuser.py
|
{
"start": 514,
"end": 598
}
|
class ____(Exception):
pass
PASSWORD_FIELD = "password"
|
NotRunningInTTYException
|
python
|
scikit-learn__scikit-learn
|
sklearn/decomposition/_nmf.py
|
{
"start": 36004,
"end": 42285
}
|
class ____(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, ABC):
"""Base class for NMF and MiniBatchNMF."""
_parameter_constraints: dict = {
"n_components": [
Interval(Integral, 1, None, closed="left"),
None,
StrOptions({"auto"}),
],
"init": [
StrOptions({"random", "nndsvd", "nndsvda", "nndsvdar", "custom"}),
None,
],
"beta_loss": [
StrOptions({"frobenius", "kullback-leibler", "itakura-saito"}),
Real,
],
"tol": [Interval(Real, 0, None, closed="left")],
"max_iter": [Interval(Integral, 1, None, closed="left")],
"random_state": ["random_state"],
"alpha_W": [Interval(Real, 0, None, closed="left")],
"alpha_H": [Interval(Real, 0, None, closed="left"), StrOptions({"same"})],
"l1_ratio": [Interval(Real, 0, 1, closed="both")],
"verbose": ["verbose"],
}
def __init__(
self,
n_components="auto",
*,
init=None,
beta_loss="frobenius",
tol=1e-4,
max_iter=200,
random_state=None,
alpha_W=0.0,
alpha_H="same",
l1_ratio=0.0,
verbose=0,
):
self.n_components = n_components
self.init = init
self.beta_loss = beta_loss
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.alpha_W = alpha_W
self.alpha_H = alpha_H
self.l1_ratio = l1_ratio
self.verbose = verbose
def _check_params(self, X):
# n_components
self._n_components = self.n_components
if self._n_components is None:
self._n_components = X.shape[1]
# beta_loss
self._beta_loss = _beta_loss_to_float(self.beta_loss)
def _check_w_h(self, X, W, H, update_H):
"""Check W and H, or initialize them."""
n_samples, n_features = X.shape
if self.init == "custom" and update_H:
_check_init(H, (self._n_components, n_features), "NMF (input H)")
_check_init(W, (n_samples, self._n_components), "NMF (input W)")
if self._n_components == "auto":
self._n_components = H.shape[0]
if H.dtype != X.dtype or W.dtype != X.dtype:
raise TypeError(
"H and W should have the same dtype as X. Got "
"H.dtype = {} and W.dtype = {}.".format(H.dtype, W.dtype)
)
elif not update_H:
if W is not None:
warnings.warn(
"When update_H=False, the provided initial W is not used.",
RuntimeWarning,
)
_check_init(H, (self._n_components, n_features), "NMF (input H)")
if self._n_components == "auto":
self._n_components = H.shape[0]
if H.dtype != X.dtype:
raise TypeError(
"H should have the same dtype as X. Got H.dtype = {}.".format(
H.dtype
)
)
# 'mu' solver should not be initialized by zeros
if self.solver == "mu":
avg = np.sqrt(X.mean() / self._n_components)
W = np.full((n_samples, self._n_components), avg, dtype=X.dtype)
else:
W = np.zeros((n_samples, self._n_components), dtype=X.dtype)
else:
if W is not None or H is not None:
warnings.warn(
(
"When init!='custom', provided W or H are ignored. Set "
" init='custom' to use them as initialization."
),
RuntimeWarning,
)
if self._n_components == "auto":
self._n_components = X.shape[1]
W, H = _initialize_nmf(
X, self._n_components, init=self.init, random_state=self.random_state
)
return W, H
def _compute_regularization(self, X):
"""Compute scaled regularization terms."""
n_samples, n_features = X.shape
alpha_W = self.alpha_W
alpha_H = self.alpha_W if self.alpha_H == "same" else self.alpha_H
l1_reg_W = n_features * alpha_W * self.l1_ratio
l1_reg_H = n_samples * alpha_H * self.l1_ratio
l2_reg_W = n_features * alpha_W * (1.0 - self.l1_ratio)
l2_reg_H = n_samples * alpha_H * (1.0 - self.l1_ratio)
return l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
**params : kwargs
Parameters (keyword arguments) and values passed to
the fit_transform instance.
Returns
-------
self : object
Returns the instance itself.
"""
# param validation is done in fit_transform
self.fit_transform(X, **params)
return self
def inverse_transform(self, X):
"""Transform data back to its original space.
.. versionadded:: 0.18
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_components)
Transformed data matrix.
Returns
-------
X_original : ndarray of shape (n_samples, n_features)
Returns a data matrix of the original shape.
"""
check_is_fitted(self)
return X @ self.components_
@property
def _n_features_out(self):
"""Number of transformed output features."""
return self.components_.shape[0]
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.positive_only = True
tags.input_tags.sparse = True
tags.transformer_tags.preserves_dtype = ["float64", "float32"]
return tags
|
_BaseNMF
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1408009,
"end": 1408307
}
|
class ____(
sgqlc.types.Type, Node, AuditEntry, OrganizationAuditEntryData, RepositoryAuditEntryData
):
"""Audit log entry for a repo.config.enable_sockpuppet_disallowed
event.
"""
__schema__ = github_schema
__field_names__ = ()
|
RepoConfigEnableSockpuppetDisallowedAuditEntry
|
python
|
airbytehq__airbyte
|
airbyte-ci/connectors/connectors_qa/tests/unit_tests/test_checks/test_packaging.py
|
{
"start": 216,
"end": 2206
}
|
class ____:
def test_fail_when_pyproject_toml_file_does_not_exist(self, tmp_path, mocker):
# Arrange
connector = mocker.MagicMock(code_directory=tmp_path)
# Act
result = packaging.CheckConnectorUsesPoetry()._run(connector)
# Assert
assert result.status == CheckStatus.FAILED
assert result.message == f"{consts.PYPROJECT_FILE_NAME} file is missing"
def test_fail_when_poetry_lock_file_does_not_exist(self, tmp_path, mocker):
# Arrange
connector = mocker.MagicMock(code_directory=tmp_path)
(tmp_path / consts.PYPROJECT_FILE_NAME).touch()
# Act
result = packaging.CheckConnectorUsesPoetry()._run(connector)
# Assert
assert result.status == CheckStatus.FAILED
assert result.message == f"{consts.POETRY_LOCK_FILE_NAME} file is missing"
def test_fail_when_setup_py_file_exists(self, tmp_path, mocker):
# Arrange
connector = mocker.MagicMock(code_directory=tmp_path)
(tmp_path / consts.PYPROJECT_FILE_NAME).touch()
(tmp_path / consts.POETRY_LOCK_FILE_NAME).touch()
(tmp_path / consts.SETUP_PY_FILE_NAME).touch()
# Act
result = packaging.CheckConnectorUsesPoetry()._run(connector)
# Assert
assert result.status == CheckStatus.FAILED
assert result.message == f"{consts.SETUP_PY_FILE_NAME} file exists. Please remove it and use {consts.PYPROJECT_FILE_NAME} instead"
def test_pass_when_poetry_is_used(self, tmp_path, mocker):
# Arrange
connector = mocker.MagicMock(code_directory=tmp_path)
(tmp_path / consts.PYPROJECT_FILE_NAME).touch()
(tmp_path / consts.POETRY_LOCK_FILE_NAME).touch()
# Act
result = packaging.CheckConnectorUsesPoetry()._run(connector)
# Assert
assert result.status == CheckStatus.PASSED
assert result.message == "Poetry is used for dependency management"
|
TestCheckConnectorUsesPoetry
|
python
|
plotly__plotly.py
|
plotly/graph_objs/scatter/_error_y.py
|
{
"start": 233,
"end": 14387
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatter"
_path_str = "scatter.error_y"
_valid_props = {
"array",
"arrayminus",
"arrayminussrc",
"arraysrc",
"color",
"symmetric",
"thickness",
"traceref",
"tracerefminus",
"type",
"value",
"valueminus",
"visible",
"width",
}
@property
def array(self):
"""
Sets the data corresponding the length of each error bar.
Values are plotted relative to the underlying data.
The 'array' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["array"]
@array.setter
def array(self, val):
self["array"] = val
@property
def arrayminus(self):
"""
Sets the data corresponding the length of each error bar in the
bottom (left) direction for vertical (horizontal) bars Values
are plotted relative to the underlying data.
The 'arrayminus' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["arrayminus"]
@arrayminus.setter
def arrayminus(self, val):
self["arrayminus"] = val
@property
def arrayminussrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`arrayminus`.
The 'arrayminussrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["arrayminussrc"]
@arrayminussrc.setter
def arrayminussrc(self, val):
self["arrayminussrc"] = val
@property
def arraysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `array`.
The 'arraysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["arraysrc"]
@arraysrc.setter
def arraysrc(self, val):
self["arraysrc"] = val
@property
def color(self):
"""
Sets the stroke color of the error bars.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def symmetric(self):
"""
Determines whether or not the error bars have the same length
in both direction (top/bottom for vertical bars, left/right for
horizontal bars.
The 'symmetric' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["symmetric"]
@symmetric.setter
def symmetric(self, val):
self["symmetric"] = val
@property
def thickness(self):
"""
Sets the thickness (in px) of the error bars.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
@property
def traceref(self):
"""
The 'traceref' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["traceref"]
@traceref.setter
def traceref(self, val):
self["traceref"] = val
@property
def tracerefminus(self):
"""
The 'tracerefminus' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["tracerefminus"]
@tracerefminus.setter
def tracerefminus(self, val):
self["tracerefminus"] = val
@property
def type(self):
"""
Determines the rule used to generate the error bars. If
"constant", the bar lengths are of a constant value. Set this
constant in `value`. If "percent", the bar lengths correspond
to a percentage of underlying data. Set this percentage in
`value`. If "sqrt", the bar lengths correspond to the square of
the underlying data. If "data", the bar lengths are set with
data set `array`.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['percent', 'constant', 'sqrt', 'data']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
@property
def value(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars.
The 'value' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def valueminus(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars in the bottom
(left) direction for vertical (horizontal) bars
The 'valueminus' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["valueminus"]
@valueminus.setter
def valueminus(self, val):
self["valueminus"] = val
@property
def visible(self):
"""
Determines whether or not this set of error bars is visible.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def width(self):
"""
Sets the width (in px) of the cross-bar at both ends of the
error bars.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def _prop_descriptions(self):
return """\
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on Chart Studio Cloud for
`arrayminus`.
arraysrc
Sets the source reference on Chart Studio Cloud for
`array`.
color
Sets the stroke color of the error bars.
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
"constant", the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the square of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
"""
def __init__(
self,
arg=None,
array=None,
arrayminus=None,
arrayminussrc=None,
arraysrc=None,
color=None,
symmetric=None,
thickness=None,
traceref=None,
tracerefminus=None,
type=None,
value=None,
valueminus=None,
visible=None,
width=None,
**kwargs,
):
"""
Construct a new ErrorY object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter.ErrorY`
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on Chart Studio Cloud for
`arrayminus`.
arraysrc
Sets the source reference on Chart Studio Cloud for
`array`.
color
Sets the stroke color of the error bars.
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
"constant", the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the square of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
Returns
-------
ErrorY
"""
super().__init__("error_y")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatter.ErrorY
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter.ErrorY`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("array", arg, array)
self._set_property("arrayminus", arg, arrayminus)
self._set_property("arrayminussrc", arg, arrayminussrc)
self._set_property("arraysrc", arg, arraysrc)
self._set_property("color", arg, color)
self._set_property("symmetric", arg, symmetric)
self._set_property("thickness", arg, thickness)
self._set_property("traceref", arg, traceref)
self._set_property("tracerefminus", arg, tracerefminus)
self._set_property("type", arg, type)
self._set_property("value", arg, value)
self._set_property("valueminus", arg, valueminus)
self._set_property("visible", arg, visible)
self._set_property("width", arg, width)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
ErrorY
|
python
|
kamyu104__LeetCode-Solutions
|
Python/minimum-number-of-work-sessions-to-finish-the-tasks.py
|
{
"start": 874,
"end": 1798
}
|
class ____(object):
def minSessions(self, tasks, sessionTime):
"""
:type tasks: List[int]
:type sessionTime: int
:rtype: int
"""
# dp[mask][0]: min number of sessions by choosing tasks in mask bitset
# dp[mask][1]: min used time of last session by choosing tasks in mask bitset
dp = [[float("inf")]*2 for _ in xrange(1<<len(tasks))]
dp[0] = [0, sessionTime]
for mask in xrange(len(dp)-1):
basis = 1
for task in tasks:
new_mask = mask|basis
basis <<= 1
if new_mask == mask:
continue
if dp[mask][1]+task <= sessionTime:
dp[new_mask] = min(dp[new_mask], [dp[mask][0], dp[mask][1]+task])
else:
dp[new_mask] = min(dp[new_mask], [dp[mask][0]+1, task])
return dp[-1][0]
|
Solution2
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 1264258,
"end": 1264438
}
|
class ____(VegaLiteSchema):
"""StepFor schema wrapper."""
_schema = {"$ref": "#/definitions/StepFor"}
def __init__(self, *args):
super().__init__(*args)
|
StepFor
|
python
|
python-pillow__Pillow
|
Tests/test_font_pcf_charsets.py
|
{
"start": 338,
"end": 3385
}
|
class ____(TypedDict):
glyph_count: int
message: str
image1: str
charsets: dict[str, Charset] = {
"iso8859-1": {
"glyph_count": 223,
"message": "hello, world",
"image1": "Tests/images/test_draw_pbm_ter_en_target.png",
},
"iso8859-2": {
"glyph_count": 223,
"message": "witaj świecie",
"image1": "Tests/images/test_draw_pbm_ter_pl_target.png",
},
"cp1250": {
"glyph_count": 250,
"message": "witaj świecie",
"image1": "Tests/images/test_draw_pbm_ter_pl_target.png",
},
}
pytestmark = skip_unless_feature("zlib")
def save_font(request: pytest.FixtureRequest, tmp_path: Path, encoding: str) -> str:
with open(fontname, "rb") as test_file:
font = PcfFontFile.PcfFontFile(test_file, encoding)
assert isinstance(font, FontFile.FontFile)
# check the number of characters in the font
assert len([_f for _f in font.glyph if _f]) == charsets[encoding]["glyph_count"]
tempname = str(tmp_path / "temp.pil")
def delete_tempfile() -> None:
try:
os.remove(tempname[:-4] + ".pbm")
except OSError:
pass # report?
request.addfinalizer(delete_tempfile)
font.save(tempname)
with Image.open(tempname.replace(".pil", ".pbm")) as loaded:
assert_image_equal_tofile(loaded, f"Tests/fonts/ter-x20b-{encoding}.pbm")
with open(tempname, "rb") as f_loaded:
with open(f"Tests/fonts/ter-x20b-{encoding}.pil", "rb") as f_target:
assert f_loaded.read() == f_target.read()
return tempname
@pytest.mark.parametrize("encoding", ("iso8859-1", "iso8859-2", "cp1250"))
def test_sanity(request: pytest.FixtureRequest, tmp_path: Path, encoding: str) -> None:
save_font(request, tmp_path, encoding)
@pytest.mark.parametrize("encoding", ("iso8859-1", "iso8859-2", "cp1250"))
def test_draw(request: pytest.FixtureRequest, tmp_path: Path, encoding: str) -> None:
tempname = save_font(request, tmp_path, encoding)
font = ImageFont.load(tempname)
im = Image.new("L", (150, 30), "white")
draw = ImageDraw.Draw(im)
message = charsets[encoding]["message"].encode(encoding)
draw.text((0, 0), message, "black", font=font)
assert_image_similar_tofile(im, charsets[encoding]["image1"], 0)
@pytest.mark.parametrize("encoding", ("iso8859-1", "iso8859-2", "cp1250"))
def test_textsize(
request: pytest.FixtureRequest, tmp_path: Path, encoding: str
) -> None:
tempname = save_font(request, tmp_path, encoding)
font = ImageFont.load(tempname)
for i in range(255):
(ox, oy, dx, dy) = font.getbbox(bytearray([i]))
assert ox == 0
assert oy == 0
assert dy == 20
assert dx in (0, 10)
assert font.getlength(bytearray([i])) == dx
message = charsets[encoding]["message"].encode(encoding)
for i in range(len(message)):
msg = message[: i + 1]
assert font.getlength(msg) == len(msg) * 10
assert font.getbbox(msg) == (0, 0, len(msg) * 10, 20)
|
Charset
|
python
|
jazzband__django-simple-history
|
simple_history/tests/tests/test_models.py
|
{
"start": 78634,
"end": 80790
}
|
class ____(TestCase):
def setUp(self):
self.model = PollWithManyToManyWithIPAddress
self.places = (
Place.objects.create(name="London"),
Place.objects.create(name="Paris"),
)
self.poll = self.model.objects.create(question="what's up?", pub_date=today)
pre_create_historical_m2m_records.connect(
add_static_history_ip_address_on_m2m,
dispatch_uid="add_static_history_ip_address_on_m2m",
)
def tearDown(self):
pre_create_historical_m2m_records.disconnect(
add_static_history_ip_address_on_m2m,
dispatch_uid="add_static_history_ip_address_on_m2m",
)
def test_ip_address_added(self):
self.poll.places.add(*self.places)
places = self.poll.history.first().places
self.assertEqual(2, places.count())
for place in places.all():
self.assertEqual("192.168.0.1", place.ip_address)
def test_extra_field(self):
self.poll.places.add(*self.places)
m2m_record = self.poll.history.first().places.first()
self.assertEqual(
m2m_record.get_class_name(),
"HistoricalPollWithManyToManyWithIPAddress_places",
)
def test_diff(self):
self.poll.places.clear()
self.poll.places.add(*self.places)
new = self.poll.history.first()
old = new.prev_record
with self.assertNumQueries(2): # Once for each record
delta = new.diff_against(old)
expected_delta = ModelDelta(
[
ModelChange(
"places",
[],
[
{
"pollwithmanytomanywithipaddress": self.poll.pk,
"place": place.pk,
"ip_address": "192.168.0.1",
}
for place in self.places
],
)
],
["places"],
old,
new,
)
self.assertEqual(delta, expected_delta)
|
ManyToManyWithSignalsTest
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/batch/base.py
|
{
"start": 5676,
"end": 5764
}
|
class ____:
batch_size: int
concurrent_requests: int
@dataclass
|
_FixedSizeBatching
|
python
|
google__pytype
|
pytype/pytd/pytd.py
|
{
"start": 11832,
"end": 11911
}
|
class ____(Node):
"""ParamSpec.args special form."""
name: str
|
ParamSpecArgs
|
python
|
optuna__optuna
|
optuna/importance/_ped_anova/evaluator.py
|
{
"start": 739,
"end": 2393
}
|
class ____:
def __init__(
self,
quantile: float,
is_lower_better: bool,
min_n_top_trials: int,
target: Callable[[FrozenTrial], float] | None,
):
assert 0 <= quantile <= 1, "quantile must be in [0, 1]."
assert min_n_top_trials > 0, "min_n_top_trials must be positive."
self._quantile = quantile
self._is_lower_better = is_lower_better
self._min_n_top_trials = min_n_top_trials
self._target = target
def filter(self, trials: list[FrozenTrial]) -> list[FrozenTrial]:
target, min_n_top_trials = self._target, self._min_n_top_trials
sign = 1.0 if self._is_lower_better else -1.0
loss_values = sign * np.asarray([t.value if target is None else target(t) for t in trials])
err_msg = "len(trials) must be larger than or equal to min_n_top_trials"
assert min_n_top_trials <= loss_values.size, err_msg
def _quantile(v: np.ndarray, q: float) -> float:
cutoff_index = int(np.ceil(q * loss_values.size)) - 1
return float(np.partition(loss_values, cutoff_index)[cutoff_index])
cutoff_val = max(
np.partition(loss_values, min_n_top_trials - 1)[min_n_top_trials - 1],
# TODO(nabenabe0928): After dropping Python3.10, replace below with
# np.quantile(loss_values, self._quantile, method="inverted_cdf").
_quantile(loss_values, self._quantile),
)
should_keep_trials = loss_values <= cutoff_val
return [t for t, should_keep in zip(trials, should_keep_trials) if should_keep]
@experimental_class("3.6.0")
|
_QuantileFilter
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 166172,
"end": 166889
}
|
class ____(sgqlc.types.Input):
"""Parameters to be used for the committer_email_pattern rule"""
__schema__ = github_schema
__field_names__ = ("name", "negate", "operator", "pattern")
name = sgqlc.types.Field(String, graphql_name="name")
"""How this rule will appear to users."""
negate = sgqlc.types.Field(Boolean, graphql_name="negate")
"""If true, the rule will fail if the pattern matches."""
operator = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="operator")
"""The operator to use for matching."""
pattern = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="pattern")
"""The pattern to match with."""
|
CommitterEmailPatternParametersInput
|
python
|
huggingface__transformers
|
examples/modular-transformers/configuration_my_new_model.py
|
{
"start": 723,
"end": 12281
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`MyNewModelModel`]. It is used to instantiate an MyNewModel
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the MyNewModel-7B.
e.g. [meta-my_new_model/MyNewModel-2-7b-hf](https://huggingface.co/meta-my_new_model/MyNewModel-2-7b-hf)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the MyNewModel model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MyNewModelModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. MyNewModel 1 supports up to 2048 tokens,
MyNewModel 2 up to 4096, CodeLlama up to 16384.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
pretraining_tp (`int`, *optional*, defaults to 1):
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to
understand more about it. This value is necessary to ensure exact reproducibility of the pretraining
results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232).
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'my_new_model3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'my_new_model3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'my_new_model3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'my_new_model3'. Scaling factor applied to high frequency components of the RoPE
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
head_dim (`int`, *optional*):
The attention head dimension. If None, it will default to hidden_size // num_attention_heads
```python
>>> from transformers import MyNewModelModel, MyNewModelConfig
>>> # Initializing a MyNewModel my_new_model-7b style configuration
>>> configuration = MyNewModelConfig()
>>> # Initializing a model from the my_new_model-7b style configuration
>>> model = MyNewModelModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "my_new_model"
keys_to_ignore_at_inference = ["past_key_values"]
# Default tensor parallel plan for base model `MyNewModelModel`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: Optional[int] = 32000,
hidden_size: Optional[int] = 4096,
intermediate_size: Optional[int] = 11008,
num_hidden_layers: Optional[int] = 32,
num_attention_heads: Optional[int] = 32,
num_key_value_heads: Optional[int] = None,
hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 2048,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-6,
use_cache: Optional[bool] = True,
pad_token_id: Optional[int] = None,
bos_token_id: Optional[int] = 1,
eos_token_id: Optional[int] = 2,
pretraining_tp: Optional[int] = 1,
tie_word_embeddings: Optional[bool] = False,
rope_parameters: Optional[RopeParameters | dict[RopeParameters]] = None,
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
mlp_bias=True,
head_dim: Optional[int] = None,
new_param=0,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.pretraining_tp = pretraining_tp
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.mlp_bias = mlp_bias
self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads
# Try to set `rope_scaling` if available, otherwise use `rope_parameters`
rope_scaling = kwargs.pop("rope_scaling", None)
self.rope_parameters = rope_scaling or rope_parameters
# Validate the correctness of rotary position embeddings parameters
rope_theta = kwargs.get("rope_theta", 10000.0)
standardize_rope_params(self, rope_theta=rope_theta)
rope_config_validation(self)
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
self.new_param = new_param
|
MyNewModelConfig
|
python
|
realpython__materials
|
queue/src/async_queues.py
|
{
"start": 204,
"end": 2672
}
|
class ____(NamedTuple):
url: str
depth: int = 1
def __lt__(self, other):
if isinstance(other, Job):
return len(self.url) < len(other.url)
async def main(args):
session = aiohttp.ClientSession()
try:
links = Counter()
queue = asyncio.Queue()
# queue = asyncio.LifoQueue()
# queue = asyncio.PriorityQueue()
tasks = [
asyncio.create_task(
worker(
f"Worker-{i + 1}",
session,
queue,
links,
args.max_depth,
)
)
for i in range(args.num_workers)
]
await queue.put(Job(args.url))
await queue.join()
for task in tasks:
task.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
display(links)
finally:
await session.close()
async def worker(worker_id, session, queue, links, max_depth):
print(f"[{worker_id} starting]", file=sys.stderr)
while True:
url, depth = await queue.get()
links[url] += 1
try:
if depth <= max_depth:
print(f"[{worker_id} {depth=} {url=}]", file=sys.stderr)
if html := await fetch_html(session, url):
for link_url in parse_links(url, html):
await queue.put(Job(link_url, depth + 1))
except aiohttp.ClientError:
print(f"[{worker_id} failed at {url=}]", file=sys.stderr)
finally:
queue.task_done()
async def fetch_html(session, url):
async with session.get(url) as response:
if response.ok and response.content_type == "text/html":
return await response.text()
def parse_links(url, html):
soup = BeautifulSoup(html, features="html.parser")
for anchor in soup.select("a[href]"):
href = anchor.get("href").lower()
if not href.startswith("javascript:"):
yield urljoin(url, href)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("url")
parser.add_argument("-d", "--max-depth", type=int, default=2)
parser.add_argument("-w", "--num-workers", type=int, default=3)
return parser.parse_args()
def display(links):
for url, count in links.most_common():
print(f"{count:>3} {url}")
if __name__ == "__main__":
asyncio.run(main(parse_args()))
|
Job
|
python
|
pypa__setuptools
|
setuptools/_vendor/backports/tarfile/__init__.py
|
{
"start": 24427,
"end": 24606
}
|
class ____(FilterError):
def __init__(self, tarinfo):
self.tarinfo = tarinfo
super().__init__(f'{tarinfo.name!r} is a link to an absolute path')
|
AbsoluteLinkError
|
python
|
pytorch__pytorch
|
torch/_inductor/codegen/cpp_wrapper_mps.py
|
{
"start": 325,
"end": 12297
}
|
class ____(CppWrapperGpu):
"""
Generates cpp wrapper for running on MPS and calls metal kernels
"""
def __init__(self) -> None:
super().__init__()
self._used_kernel_names: OrderedSet[str] = OrderedSet()
self._lambda_counter: int = 0
@staticmethod
def create(
is_subgraph: bool,
subgraph_name: Optional[str],
parent_wrapper: Optional[PythonWrapperCodegen],
partition_signatures: Optional[GraphPartitionSignature] = None,
) -> "CppWrapperMps":
return CppWrapperMps()
def _generate_kernel_call_helper(
self,
kernel_name: str,
call_args: list[str],
*,
device: Optional[torch.device] = None,
triton: bool = True,
arg_types: Optional[tuple[Any, ...]] = None,
raw_keys: Optional[tuple[Any, ...]] = None,
raw_args: Optional[tuple[Any, ...]] = None,
triton_meta: Optional[dict[str, Any]] = None,
graph_name: str = "",
original_fxnode_name: Optional[str] = None,
) -> None:
"""
Generates MPS kernel call code. It should look something like:
```
auto mps_lib_0_lambda = [&](AOTIMetalKernelFunctionHandle handle) {
aoti_torch_mps_start_encoding(handle);
aoti_torch_mps_set_arg_tensor(handle, 0, buf0);
aoti_torch_mps_set_arg_tensor(handle, 1, arg0_1);
aoti_torch_mps_set_arg_tensor(handle, 2, arg1_1);
aoti_torch_mps_dispatch_single(handle, static_cast<uint64_t>(10LL));
};
std::function<void(AOTIMetalKernelFunctionHandle)> mps_lib_0_func_wrapper = mps_lib_0_lambda;
aoti_torch_mps_run_command_block(get_mps_lib_0_handle(), aoti_torch_mps_shared_callback, &mps_lib_0_func_wrapper);
```
"""
device = device or V.graph.get_current_device_or_throw()
if device.type == "cpu":
# Even in CppWrapperGpu, we may see cpp kernels
return CppWrapperCpu._generate_kernel_call_helper(
self,
kernel_name,
call_args,
device=device,
triton=triton,
arg_types=arg_types,
raw_keys=raw_keys,
raw_args=raw_args,
triton_meta=triton_meta,
)
assert device.type == "mps"
assert arg_types is not None
new_args = []
for idx, (arg, arg_type) in enumerate(zip(call_args[:-2], arg_types[:-2])):
if isinstance(arg_type, torch.dtype):
new_args.append(f"aoti_torch_mps_set_arg_tensor(handle, {idx}, {arg});")
elif arg_type in (int, sympy.core.symbol.Symbol):
new_args.append(f"aoti_torch_mps_set_arg_int(handle, {idx}, {arg});")
else:
raise NotImplementedError(
f"Unsupported arg type {arg_type} for arg {arg} for kernel {kernel_name}"
)
threads, group_size = call_args[-2], call_args[-1]
if threads is None:
raise NotImplementedError("No threads or group_size provided")
# Check if threads is a single value or an array-like structure
threads_str = str(threads)
is_single_value = (
threads_str.startswith("{")
and threads_str.endswith("}")
and threads_str.count(",") == 0
) or not threads_str.startswith(("{", "["))
if is_single_value:
# Extract single value from braces if present
if threads_str.startswith("{") and threads_str.endswith("}"):
single_value = threads_str[1:-1].strip() # Remove braces
else:
single_value = threads_str
if group_size is None:
new_args.append(
f"aoti_torch_mps_dispatch_single(handle, {single_value});"
)
else:
# Extract group size value if it's also in braces
group_size_str = str(group_size)
if group_size_str.startswith("{") and group_size_str.endswith("}"):
group_size_value = group_size_str[1:-1].strip()
else:
group_size_value = group_size_str
new_args.append(
f"aoti_torch_mps_dispatch_single_with_group_size(handle, {single_value}, {group_size_value});"
)
else:
# Handle array case - need to convert initializer list to array
# Use kernel name to make variable names unique
threads_var = f"{kernel_name}_threads_array"
group_size_var = f"{kernel_name}_group_size_array"
# Extract array size from the initializer list string
def get_array_size(array_str: str) -> int:
# Remove braces and whitespace
content = array_str.strip()
if content.startswith("{") and content.endswith("}"):
content = content[1:-1].strip()
if not content: # Empty array
return 0
# Count elements by counting commas, accounting for nested structures
depth = 0
comma_count = 0
for char in content:
if char in "({[<":
depth += 1
elif char in ")}]>":
depth -= 1
elif char == "," and depth == 0:
comma_count += 1
return comma_count + 1 # Number of elements = commas + 1
threads_size = get_array_size(threads_str)
if group_size is None:
new_args.append("{")
new_args.append(f" uint64_t {threads_var}[] = {threads};")
new_args.append(
f" aoti_torch_mps_dispatch_array(handle, {threads_var}, {threads_size});"
)
new_args.append("}")
else:
group_size_str = str(group_size)
group_size_size = get_array_size(group_size_str)
new_args.append("{")
new_args.append(f" uint64_t {threads_var}[] = {threads};")
new_args.append(f" uint64_t {group_size_var}[] = {group_size};")
dispatch_args = f"handle, {threads_var}, {threads_size}, {group_size_var}, {group_size_size}"
new_args.append(
f" aoti_torch_mps_dispatch_array_with_group_size({dispatch_args});"
)
new_args.append("}")
# debug printer related logic for cpp kernel type.
debug_printer_manager = V.graph.wrapper_code.debug_printer
debug_printer_manager.set_printer_args(
call_args[:-2],
kernel_name,
None,
None,
"cpp",
)
with debug_printer_manager:
self.write_mps_kernel_call(kernel_name, new_args)
def write_mps_kernel_call(self, name: str, call_args: list[str]) -> None:
# Generate unique variable names to avoid duplicate declarations
# when the same MPS lib is used multiple times
unique_suffix = self._lambda_counter
self._lambda_counter += 1
lambda_name = f"{name}_lambda_{unique_suffix}"
wrapper_name = f"{name}_func_wrapper_{unique_suffix}"
# Generate the function call code (in current location)
# Create lambda that captures by reference and pass its pointer through void*
self.writeline(
f"auto {lambda_name} = [&](AOTIMetalKernelFunctionHandle handle) {{"
)
self.writeline(" aoti_torch_mps_start_encoding(handle);")
# Output call args directly since we're capturing by reference
for call_arg in call_args:
self.writeline(f" {call_arg}")
self.writeline("};")
self.writeline("")
# Pass lambda pointer through void*
self.writeline(
f"std::function<void(AOTIMetalKernelFunctionHandle)> {wrapper_name} = {lambda_name};"
)
self.writeline(
f"aoti_torch_mps_run_command_block(get_{name}_handle(), aoti_torch_mps_shared_callback, &{wrapper_name});"
)
@staticmethod
def get_device_include_path(device: str) -> str:
assert V.graph.aot_mode
return (
"#include <torch/csrc/inductor/aoti_include/mps.h>\n"
"#include <torch/csrc/inductor/aoti_torch/c/shim_mps.h>"
)
def codegen_additional_funcs(self) -> None:
"""
Generate thread-safe lazy singleton pattern for MPS shader libraries with RAII cleanup.
The generated code will look like:
```
AOTIMetalKernelFunctionHandle get_mps_lib_0_handle() {
static auto kernel_handle = []() {
AOTIMetalShaderLibraryHandle lib_handle = nullptr;
AOTIMetalKernelFunctionHandle kern_handle = nullptr;
aoti_torch_mps_create_shader_library(mps_lib_0_source, &lib_handle);
aoti_torch_mps_get_kernel_function(lib_handle, "generated_kernel", &kern_handle);
// RAII wrapper with custom deleter
auto lib_deleter = [](AOTIMetalShaderLibraryHandle h) {
if (h) aoti_torch_mps_delete_shader_library(h);
};
using LibDeleter = decltype(lib_deleter);
using LibPtr = std::unique_ptr<AOTIMetalShaderLibraryOpaque, LibDeleter>;
// Return pair of kernel handle and library smart pointer for cleanup
return std::make_pair(kern_handle, LibPtr(lib_handle, lib_deleter));
}();
return kernel_handle.first;
}
```
"""
# Add shimified handles and functions
shader_libraries: OrderedSet[str] = OrderedSet()
for line in self.lines:
if not isinstance(line, KernelCallLine):
continue
if line.device.type != "mps":
continue
# Extract library name from kernel name (e.g., "mps_lib_0" from kernel calls)
if line.kernel_name not in self._used_kernel_names:
self._used_kernel_names.add(line.kernel_name)
shader_libraries.add(line.kernel_name)
# NOTE: For shimified version, we expect the shader source constant to be generated
# by the existing MPS shader generation process, but instead of instantiating the
# DynamicMetalShaderLibrary directly, we'll use our shim functions.
# The existing codegen should produce something like:
# const char* mps_lib_0_source = R"MTL(...shader_source...)MTL";
# instead of:
# at::native::mps::DynamicMetalShaderLibrary mps_lib_0(R"MTL(...shader_source...)MTL");
# Generate thread-safe lazy singleton with RAII for each library
for lib_name in shader_libraries:
self.prefix.splice(f"""
AOTIMetalKernelFunctionHandle get_{lib_name}_handle() {{
static auto kernel_handle = []() {{
AOTIMetalShaderLibraryHandle lib_handle = nullptr;
AOTIMetalKernelFunctionHandle kern_handle = nullptr;
aoti_torch_mps_create_shader_library({lib_name}_source, &lib_handle);
aoti_torch_mps_get_kernel_function(lib_handle, "generated_kernel", &kern_handle);
// RAII wrapper with custom deleter
auto lib_deleter = [](AOTIMetalShaderLibraryHandle h) {{
if (h) aoti_torch_mps_delete_shader_library(h);
}};
using LibDeleter = decltype(lib_deleter);
using LibPtr = std::unique_ptr<AOTIMetalShaderLibraryOpaque, LibDeleter>;
// Return pair of kernel handle and library smart pointer for cleanup
return std::make_pair(kern_handle, LibPtr(lib_handle, lib_deleter));
}}();
return kernel_handle.first;
}}
""")
|
CppWrapperMps
|
python
|
PyCQA__pylint
|
tests/functional/ext/docparams/raise/missing_raises_doc_Google.py
|
{
"start": 3866,
"end": 4551
}
|
class ____:
"""test_finds_missing_raises_from_setter_google_2
Example of a setter having missing raises documentation in
its own Google style docstring of the property.
"""
@property
def foo_method(self):
"""int: docstring ...
Raises:
RuntimeError: Always
"""
raise RuntimeError()
return 10 # [unreachable]
@foo_method.setter
def foo_method(self, value): # [missing-raises-doc]
"""setter docstring ...
Raises:
RuntimeError: Never
"""
print(self)
if True: # [using-constant-test]
raise AttributeError()
raise RuntimeError()
|
Foo
|
python
|
ansible__ansible
|
lib/ansible/_internal/_templating/_engine.py
|
{
"start": 2261,
"end": 2734
}
|
class ____:
DEFAULT: t.ClassVar[t.Self]
value_for_omit: object = Omit
escape_backslashes: bool = True
preserve_trailing_newlines: bool = True
# DTFIX-FUTURE: these aren't really overrides anymore, rename the dataclass and this field
# also mention in docstring this has no effect unless used to template a string
overrides: TemplateOverrides = TemplateOverrides.DEFAULT
TemplateOptions.DEFAULT = TemplateOptions()
|
TemplateOptions
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/managed_kafka.py
|
{
"start": 22505,
"end": 25978
}
|
class ____(ManagedKafkaBaseOperator):
"""
Create a new topic in a given project and location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud region that the service belongs to.
:param cluster_id: Required. The ID of the cluster in which to create the topic.
:param topic_id: Required. The ID to use for the topic, which will become the final component of the
topic's name.
:param topic: Required. Configuration of the topic to create.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = tuple(
{"cluster_id", "topic_id", "topic"} | set(ManagedKafkaBaseOperator.template_fields)
)
operator_extra_links = (ApacheKafkaTopicLink(),)
def __init__(
self,
cluster_id: str,
topic_id: str,
topic: types.Topic | dict,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.cluster_id = cluster_id
self.topic_id = topic_id
self.topic = topic
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"location": self.location,
"cluster_id": self.cluster_id,
"topic_id": self.topic_id,
"project_id": self.project_id,
}
def execute(self, context: Context):
self.log.info("Creating an Apache Kafka topic.")
ApacheKafkaTopicLink.persist(context=context)
try:
topic_obj = self.hook.create_topic(
project_id=self.project_id,
location=self.location,
cluster_id=self.cluster_id,
topic_id=self.topic_id,
topic=self.topic,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("Apache Kafka topic for %s cluster was created.", self.cluster_id)
return types.Topic.to_dict(topic_obj)
except AlreadyExists:
self.log.info("Apache Kafka topic %s already exists.", self.topic_id)
topic_obj = self.hook.get_topic(
project_id=self.project_id,
location=self.location,
cluster_id=self.cluster_id,
topic_id=self.topic_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return types.Topic.to_dict(topic_obj)
|
ManagedKafkaCreateTopicOperator
|
python
|
google__pytype
|
pytype/abstract/abstract_test.py
|
{
"start": 48689,
"end": 50462
}
|
class ____(AbstractTestBase):
"""Tests for abstract.function.Signature."""
def test_prepend_to_paramspec(self):
paramspec = abstract.ParamSpec("P", self._ctx)
# Callable[P, Any]
in_sig = function.Signature(
name="f",
param_names=("x",),
posonly_count=0,
varargs_name=None,
kwonly_params=(),
kwargs_name=None,
defaults={},
annotations={"x": paramspec},
)
# Callable[Concatenate[int, P], Any]
out_sig = in_sig.prepend_parameter("_", self._ctx.convert.int_type)
self.assertEqual(out_sig.param_names, ("x",))
x_type = out_sig.annotations["x"]
self.assertIsInstance(x_type, abstract.Concatenate)
self.assertEqual(x_type.args, [self._ctx.convert.int_type])
self.assertEqual(x_type.paramspec, paramspec)
def test_prepend_to_concatenate(self):
paramspec = abstract.ParamSpec("P", self._ctx)
concatenate = abstract.Concatenate(
[self._ctx.convert.str_type, paramspec], self._ctx
)
# Callable[Concatenate[str, P], Any]
in_sig = function.Signature(
name="f",
param_names=("x",),
posonly_count=0,
varargs_name=None,
kwonly_params=(),
kwargs_name=None,
defaults={},
annotations={"x": concatenate},
)
# Callable[Concatenate[int, str, P], Any]
out_sig = in_sig.prepend_parameter("_", self._ctx.convert.int_type)
self.assertEqual(out_sig.param_names, ("x",))
x_type = out_sig.annotations["x"]
self.assertIsInstance(x_type, abstract.Concatenate)
self.assertEqual(
x_type.args, [self._ctx.convert.int_type, self._ctx.convert.str_type]
)
self.assertEqual(x_type.paramspec, paramspec)
if __name__ == "__main__":
unittest.main()
|
SignatureTest
|
python
|
jazzband__django-oauth-toolkit
|
tests/test_authorization_code.py
|
{
"start": 28637,
"end": 71967
}
|
class ____(BaseAuthorizationCodeTokenView):
def test_basic_auth(self):
"""
Request an access token using basic authentication for client authentication
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content["token_type"], "Bearer")
self.assertEqual(content["scope"], "read write")
self.assertEqual(content["expires_in"], self.oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
def test_refresh(self):
"""
Request an access token using a refresh token
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue("refresh_token" in content)
# make a second token request to be sure the previous refresh token remains valid, see #65
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
token_request_data = {
"grant_type": "refresh_token",
"refresh_token": content["refresh_token"],
"scope": content["scope"],
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue("access_token" in content)
# check refresh token cannot be used twice
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 400)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue("invalid_grant" in content.values())
def test_refresh_with_grace_period(self):
"""
Request an access token using a refresh token
"""
self.oauth2_settings.REFRESH_TOKEN_GRACE_PERIOD_SECONDS = 120
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue("refresh_token" in content)
# make a second token request to be sure the previous refresh token remains valid, see #65
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
token_request_data = {
"grant_type": "refresh_token",
"refresh_token": content["refresh_token"],
"scope": content["scope"],
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue("access_token" in content)
first_access_token = content["access_token"]
first_refresh_token = content["refresh_token"]
# check access token returns same data if used twice, see #497
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue("access_token" in content)
self.assertEqual(content["access_token"], first_access_token)
# refresh token should be the same as well
self.assertTrue("refresh_token" in content)
self.assertEqual(content["refresh_token"], first_refresh_token)
def test_refresh_invalidates_old_tokens(self):
"""
Ensure existing refresh tokens are cleaned up when issuing new ones
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
rt = content["refresh_token"]
at = content["access_token"]
token_request_data = {
"grant_type": "refresh_token",
"refresh_token": rt,
"scope": content["scope"],
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
refresh_token = RefreshToken.objects.filter(token=rt).first()
self.assertIsNotNone(refresh_token.revoked)
self.assertFalse(AccessToken.objects.filter(token=at).exists())
def test_refresh_no_scopes(self):
"""
Request an access token using a refresh token without passing any scope
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue("refresh_token" in content)
token_request_data = {
"grant_type": "refresh_token",
"refresh_token": content["refresh_token"],
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue("access_token" in content)
def test_refresh_bad_scopes(self):
"""
Request an access token using a refresh token and wrong scopes
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue("refresh_token" in content)
token_request_data = {
"grant_type": "refresh_token",
"refresh_token": content["refresh_token"],
"scope": "read write nuke",
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 400)
def test_refresh_fail_repeating_requests(self):
"""
Try refreshing an access token with the same refresh token more than once
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue("refresh_token" in content)
token_request_data = {
"grant_type": "refresh_token",
"refresh_token": content["refresh_token"],
"scope": content["scope"],
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 400)
def test_refresh_repeating_requests_revokes_old_token(self):
"""
If a refresh token is reused, the server should invalidate *all* access tokens that have a relation
to the reused token. This forces a malicious actor to be logged out.
The server can't determine whether the first or the second client was legitimate, so it needs to
revoke both.
See https://datatracker.ietf.org/doc/html/draft-ietf-oauth-security-topics-29#name-recommendations
"""
self.oauth2_settings.REFRESH_TOKEN_REUSE_PROTECTION = True
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue("refresh_token" in content)
token_request_data = {
"grant_type": "refresh_token",
"refresh_token": content["refresh_token"],
"scope": content["scope"],
}
# First response works as usual
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
new_tokens = json.loads(response.content.decode("utf-8"))
# Second request fails
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 400)
# Previously returned tokens are now invalid as well
new_token_request_data = {
"grant_type": "refresh_token",
"refresh_token": new_tokens["refresh_token"],
"scope": new_tokens["scope"],
}
response = self.client.post(
reverse("oauth2_provider:token"), data=new_token_request_data, **auth_headers
)
self.assertEqual(response.status_code, 400)
def test_refresh_repeating_requests(self):
"""
Trying to refresh an access token with the same refresh token more than
once succeeds in the grace period and fails outside
"""
self.oauth2_settings.REFRESH_TOKEN_GRACE_PERIOD_SECONDS = 120
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue("refresh_token" in content)
token_request_data = {
"grant_type": "refresh_token",
"refresh_token": content["refresh_token"],
"scope": content["scope"],
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
# try refreshing outside the refresh window, see #497
rt = RefreshToken.objects.get(token=content["refresh_token"])
self.assertIsNotNone(rt.revoked)
rt.revoked = timezone.now() - datetime.timedelta(minutes=10) # instead of mocking out datetime
rt.save()
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 400)
def test_refresh_repeating_requests_grace_period_with_reuse_protection(self):
"""
Trying to refresh an access token with the same refresh token more than
once succeeds. Should work within the grace period, but should revoke previous tokens
"""
self.oauth2_settings.REFRESH_TOKEN_GRACE_PERIOD_SECONDS = 120
self.oauth2_settings.REFRESH_TOKEN_REUSE_PROTECTION = True
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue("refresh_token" in content)
refresh_token_1 = content["refresh_token"]
token_request_data = {
"grant_type": "refresh_token",
"refresh_token": refresh_token_1,
"scope": content["scope"],
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
refresh_token_2 = json.loads(response.content.decode("utf-8"))["refresh_token"]
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
refresh_token_3 = json.loads(response.content.decode("utf-8"))["refresh_token"]
self.assertEqual(refresh_token_2, refresh_token_3)
# Let the first refresh token expire
rt = RefreshToken.objects.get(token=refresh_token_1)
rt.revoked = timezone.now() - datetime.timedelta(minutes=10)
rt.save()
# Using the expired token fails
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 400)
# Because we used the expired token, the recently issued token is also revoked
new_token_request_data = {
"grant_type": "refresh_token",
"refresh_token": refresh_token_2,
"scope": content["scope"],
}
response = self.client.post(
reverse("oauth2_provider:token"), data=new_token_request_data, **auth_headers
)
self.assertEqual(response.status_code, 400)
def test_refresh_repeating_requests_non_rotating_tokens(self):
"""
Try refreshing an access token with the same refresh token more than once when not rotating tokens.
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue("refresh_token" in content)
token_request_data = {
"grant_type": "refresh_token",
"refresh_token": content["refresh_token"],
"scope": content["scope"],
}
self.oauth2_settings.ROTATE_REFRESH_TOKEN = False
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
def test_refresh_with_deleted_token(self):
"""
Ensure that using a deleted refresh token returns 400
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"scope": "read write",
"code": authorization_code,
"redirect_uri": "http://example.org",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
# get a refresh token
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
rt = content["refresh_token"]
token_request_data = {
"grant_type": "refresh_token",
"refresh_token": rt,
"scope": "read write",
}
# delete the access token
AccessToken.objects.filter(token=content["access_token"]).delete()
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 400)
def test_basic_auth_bad_authcode(self):
"""
Request an access token using a bad authorization code
"""
self.client.login(username="test_user", password="123456")
token_request_data = {
"grant_type": "authorization_code",
"code": "BLAH",
"redirect_uri": "http://example.org",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 400)
def test_basic_auth_bad_granttype(self):
"""
Request an access token using a bad grant_type string
"""
self.client.login(username="test_user", password="123456")
token_request_data = {"grant_type": "UNKNOWN", "code": "BLAH", "redirect_uri": "http://example.org"}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 400)
def test_basic_auth_grant_expired(self):
"""
Request an access token using an expired grant token
"""
self.client.login(username="test_user", password="123456")
g = Grant(
application=self.application,
user=self.test_user,
code="BLAH",
expires=timezone.now(),
redirect_uri="",
scope="",
)
g.save()
token_request_data = {
"grant_type": "authorization_code",
"code": "BLAH",
"redirect_uri": "http://example.org",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 400)
def test_basic_auth_bad_secret(self):
"""
Request an access token using basic authentication for client authentication
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
}
auth_headers = get_basic_auth_header(self.application.client_id, "BOOM!")
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 401)
def test_basic_auth_wrong_auth_type(self):
"""
Request an access token using basic authentication for client authentication
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
}
user_pass = "{0}:{1}".format(self.application.client_id, CLEARTEXT_SECRET)
auth_string = base64.b64encode(user_pass.encode("utf-8"))
auth_headers = {
"HTTP_AUTHORIZATION": "Wrong " + auth_string.decode("utf-8"),
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 401)
def test_request_body_params(self):
"""
Request an access token using client_type: public
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
"client_id": self.application.client_id,
"client_secret": CLEARTEXT_SECRET,
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content["token_type"], "Bearer")
self.assertEqual(content["scope"], "read write")
self.assertEqual(content["expires_in"], self.oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
def test_request_body_params_client_typo(self):
"""
Verify that using incorrect parameter name (client instead of client_id) returns invalid_client error
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
"client": self.application.client_id,
"client_secret": CLEARTEXT_SECRET,
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data)
self.assertEqual(response.status_code, 401)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content["error"], "invalid_client")
def test_public(self):
"""
Request an access token using client_type: public
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
"client_id": self.application.client_id,
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content["token_type"], "Bearer")
self.assertEqual(content["scope"], "read write")
self.assertEqual(content["expires_in"], self.oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
def test_public_pkce_S256_authorize_get(self):
"""
Request an access token using client_type: public
and PKCE enabled. Tests if the authorize get is successful
for the S256 algorithm and form data are properly passed.
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
code_verifier, code_challenge = self.generate_pkce_codes("S256")
query_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": True,
"code_challenge": code_challenge,
"code_challenge_method": "S256",
}
response = self.client.get(reverse("oauth2_provider:authorize"), data=query_data)
self.assertContains(response, 'value="S256"', count=1, status_code=200)
self.assertContains(response, 'value="{0}"'.format(code_challenge), count=1, status_code=200)
def test_public_pkce_plain_authorize_get(self):
"""
Request an access token using client_type: public
and PKCE enabled. Tests if the authorize get is successful
for the plain algorithm and form data are properly passed.
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
code_verifier, code_challenge = self.generate_pkce_codes("plain")
query_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": True,
"code_challenge": code_challenge,
"code_challenge_method": "plain",
}
response = self.client.get(reverse("oauth2_provider:authorize"), data=query_data)
self.assertContains(response, 'value="plain"', count=1, status_code=200)
self.assertContains(response, 'value="{0}"'.format(code_challenge), count=1, status_code=200)
def test_public_pkce_S256(self):
"""
Request an access token using client_type: public
and PKCE enabled with the S256 algorithm
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
code_verifier, code_challenge = self.generate_pkce_codes("S256")
authorization_code = self.get_pkce_auth(code_challenge, "S256")
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
"client_id": self.application.client_id,
"code_verifier": code_verifier,
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content["token_type"], "Bearer")
self.assertEqual(content["scope"], "read write")
self.assertEqual(content["expires_in"], self.oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
def test_public_pkce_plain(self):
"""
Request an access token using client_type: public
and PKCE enabled with the plain algorithm
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
code_verifier, code_challenge = self.generate_pkce_codes("plain")
authorization_code = self.get_pkce_auth(code_challenge, "plain")
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
"client_id": self.application.client_id,
"code_verifier": code_verifier,
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content["token_type"], "Bearer")
self.assertEqual(content["scope"], "read write")
self.assertEqual(content["expires_in"], self.oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
def test_public_pkce_invalid_algorithm(self):
"""
Request an access token using client_type: public
and PKCE enabled with an invalid algorithm
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
code_verifier, code_challenge = self.generate_pkce_codes("invalid")
query_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": True,
"code_challenge": code_challenge,
"code_challenge_method": "invalid",
}
response = self.client.get(reverse("oauth2_provider:authorize"), data=query_data)
self.assertEqual(response.status_code, 302)
self.assertIn("error=invalid_request", response["Location"])
def test_public_pkce_missing_code_challenge(self):
"""
Request an access token using client_type: public
and PKCE enabled but with the code_challenge missing
"""
self.oauth2_settings.PKCE_REQUIRED = True
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.skip_authorization = True
self.application.save()
code_verifier, code_challenge = self.generate_pkce_codes("S256")
query_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": True,
"code_challenge_method": "S256",
}
response = self.client.get(reverse("oauth2_provider:authorize"), data=query_data)
self.assertEqual(response.status_code, 302)
self.assertIn("error=invalid_request", response["Location"])
def test_public_pkce_missing_code_challenge_method(self):
"""
Request an access token using client_type: public
and PKCE enabled but with the code_challenge_method missing
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
code_verifier, code_challenge = self.generate_pkce_codes("S256")
query_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": True,
"code_challenge": code_challenge,
}
response = self.client.get(reverse("oauth2_provider:authorize"), data=query_data)
self.assertEqual(response.status_code, 200)
def test_public_pkce_S256_invalid_code_verifier(self):
"""
Request an access token using client_type: public
and PKCE enabled with the S256 algorithm and an invalid code_verifier
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
code_verifier, code_challenge = self.generate_pkce_codes("S256")
authorization_code = self.get_pkce_auth(code_challenge, "S256")
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
"client_id": self.application.client_id,
"code_verifier": "invalid",
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data)
self.assertEqual(response.status_code, 400)
def test_public_pkce_plain_invalid_code_verifier(self):
"""
Request an access token using client_type: public
and PKCE enabled with the plain algorithm and an invalid code_verifier
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
code_verifier, code_challenge = self.generate_pkce_codes("plain")
authorization_code = self.get_pkce_auth(code_challenge, "plain")
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
"client_id": self.application.client_id,
"code_verifier": "invalid",
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data)
self.assertEqual(response.status_code, 400)
def test_public_pkce_S256_missing_code_verifier(self):
"""
Request an access token using client_type: public
and PKCE enabled with the S256 algorithm and the code_verifier missing
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
code_verifier, code_challenge = self.generate_pkce_codes("S256")
authorization_code = self.get_pkce_auth(code_challenge, "S256")
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
"client_id": self.application.client_id,
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data)
self.assertEqual(response.status_code, 400)
def test_public_pkce_plain_missing_code_verifier(self):
"""
Request an access token using client_type: public
and PKCE enabled with the plain algorithm and the code_verifier missing
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
code_verifier, code_challenge = self.generate_pkce_codes("plain")
authorization_code = self.get_pkce_auth(code_challenge, "plain")
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
"client_id": self.application.client_id,
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data)
self.assertEqual(response.status_code, 400)
def test_malicious_redirect_uri(self):
"""
Request an access token using client_type: public and ensure redirect_uri is
properly validated.
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
authorization_code = self.get_auth()
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "/../",
"client_id": self.application.client_id,
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data)
self.assertEqual(response.status_code, 400)
data = response.json()
self.assertEqual(data["error"], "invalid_request")
self.assertEqual(
data["error_description"],
oauthlib_errors.MismatchingRedirectURIError.description,
)
def test_code_exchange_succeed_when_redirect_uri_match(self):
"""
Tests code exchange succeed when redirect uri matches the one used for code request
"""
self.client.login(username="test_user", password="123456")
# retrieve a valid authorization code
authcode_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org?foo=bar",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=authcode_data)
query_dict = parse_qs(urlparse(response["Location"]).query)
authorization_code = query_dict["code"].pop()
# exchange authorization code for a valid access token
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org?foo=bar",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content["token_type"], "Bearer")
self.assertEqual(content["scope"], "read write")
self.assertEqual(content["expires_in"], self.oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
def test_code_exchange_fails_when_redirect_uri_does_not_match(self):
"""
Tests code exchange fails when redirect uri does not match the one used for code request
"""
self.client.login(username="test_user", password="123456")
# retrieve a valid authorization code
authcode_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org?foo=bar",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=authcode_data)
query_dict = parse_qs(urlparse(response["Location"]).query)
authorization_code = query_dict["code"].pop()
# exchange authorization code for a valid access token
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org?foo=baraa",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 400)
data = response.json()
self.assertEqual(data["error"], "invalid_request")
self.assertEqual(
data["error_description"],
oauthlib_errors.MismatchingRedirectURIError.description,
)
def test_code_exchange_succeed_when_redirect_uri_match_with_multiple_query_params(
self,
):
"""
Tests code exchange succeed when redirect uri matches the one used for code request
"""
self.client.login(username="test_user", password="123456")
self.application.redirect_uris = "http://localhost http://example.com?foo=bar"
self.application.save()
# retrieve a valid authorization code
authcode_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.com?bar=baz&foo=bar",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=authcode_data)
query_dict = parse_qs(urlparse(response["Location"]).query)
authorization_code = query_dict["code"].pop()
# exchange authorization code for a valid access token
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.com?bar=baz&foo=bar",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content["token_type"], "Bearer")
self.assertEqual(content["scope"], "read write")
self.assertEqual(content["expires_in"], self.oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
@pytest.mark.oauth2_settings(presets.OIDC_SETTINGS_RW)
|
TestAuthorizationCodeTokenView
|
python
|
conda__conda
|
conda/common/configuration.py
|
{
"start": 32937,
"end": 35992
}
|
class ____(metaclass=ABCMeta):
# (type) describes the type of parameter
_type = None
# (Parameter or type) if the Parameter is holds a collection, describes the element held in
# the collection. if not, describes the primitive type held by the Parameter.
_element_type = None
def __init__(self, default, validation=None):
"""
The Parameter class represents an unloaded configuration parameter, holding type, default
and validation information until the parameter is loaded with a configuration.
Args:
default (Any): the typed, python representation default value given if the Parameter
is not found in a Configuration.
validation (callable): Given a parameter value as input, return a boolean indicating
validity, or alternately return a string describing an invalid value.
"""
self._default = default
self._validation = validation
@property
def default(self):
"""Returns a DefaultValueRawParameter that wraps the actual default value."""
wrapped_default = DefaultValueRawParameter("default", "default", self._default)
return self.load("default", wrapped_default)
def get_all_matches(self, name, names, instance):
"""
Finds all matches of a Parameter in a Configuration instance
Args:
name (str): canonical name of the parameter to search for
names (tuple(str)): alternative aliases of the parameter
instance (Configuration): instance of the configuration to search within
Returns (List(RawParameter)): matches of the parameter found in the configuration.
"""
matches = []
multikey_exceptions = []
for filepath, raw_parameters in instance.raw_data.items():
match, error = ParameterLoader.raw_parameters_from_single_source(
name, names, raw_parameters
)
if match is not None:
matches.append(match)
if error:
multikey_exceptions.append(error)
return matches, multikey_exceptions
@abstractmethod
def load(self, name, match):
"""
Loads a Parameter with the value in a RawParameter.
Args:
name (str): name of the parameter to pass through
match (RawParameter): the value of the RawParameter match
Returns a LoadedParameter
"""
raise NotImplementedError()
def typify(self, name, source, value):
element_type = self._element_type
try:
return typify_data_structure(value, element_type)
except TypeCoercionError as e:
msg = str(e)
if issubclass(element_type, Enum):
choices = ", ".join(
map("'{}'".format, element_type.__members__.values())
)
msg += f"\nValid choices for {name}: {choices}"
raise CustomValidationError(name, e.value, source, msg)
|
Parameter
|
python
|
pallets__itsdangerous
|
src/itsdangerous/exc.py
|
{
"start": 1788,
"end": 2561
}
|
class ____(BadSignature):
"""Raised if a signed header is invalid in some form. This only
happens for serializers that have a header that goes with the
signature.
.. versionadded:: 0.24
"""
def __init__(
self,
message: str,
payload: t.Any | None = None,
header: t.Any | None = None,
original_error: Exception | None = None,
):
super().__init__(message, payload)
#: If the header is actually available but just malformed it
#: might be stored here.
self.header: t.Any | None = header
#: If available, the error that indicates why the payload was
#: not valid. This might be ``None``.
self.original_error: Exception | None = original_error
|
BadHeader
|
python
|
tensorflow__tensorflow
|
tensorflow/python/distribute/test_util_test.py
|
{
"start": 3124,
"end": 4343
}
|
class ____(test.TestCase):
def test1(self):
@def_function.function
def f():
a = array_ops.identity(1., name='a')
b = a + 1
c = array_ops.identity(2., name='c')
d = array_ops.identity(a + c, name='d')
with ops.control_dependencies([b]):
e = array_ops.identity(3., name='e')
f = array_ops.identity(c + e, name='f')
return d, f
graph = f.get_concrete_function().graph
order = test_util.topological_sort_operations(graph.get_operations())
a = graph.get_operation_by_name('a')
c = graph.get_operation_by_name('c')
d = graph.get_operation_by_name('d')
e = graph.get_operation_by_name('e')
f = graph.get_operation_by_name('f')
test_util.assert_sequential_execution(order, [a, d])
test_util.assert_sequential_execution(order, [e, a, f])
with self.assertRaises(AssertionError):
test_util.assert_sequential_execution(order, [a, c])
with self.assertRaises(AssertionError):
test_util.assert_sequential_execution(order, [f, a, c])
with self.assertRaises(AssertionError):
test_util.assert_sequential_execution(order, [d, e, a, c])
if __name__ == '__main__':
test_util.main()
|
AssertSequentailExecutionTest
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/types/beta/beta_container.py
|
{
"start": 250,
"end": 522
}
|
class ____(BaseModel):
id: str
"""Identifier for the container used in this request"""
expires_at: datetime
"""The time at which the container will expire."""
skills: Optional[List[BetaSkill]] = None
"""Skills loaded in the container"""
|
BetaContainer
|
python
|
getsentry__sentry
|
tests/sentry/api/serializers/test_release.py
|
{
"start": 1240,
"end": 36915
}
|
class ____(TestCase, SnubaTestCase):
def test_simple(self) -> None:
user = self.create_user()
project = self.create_project()
project2 = self.create_project(organization=project.organization)
release_version = uuid4().hex
release = Release.objects.create(
organization_id=project.organization_id, version=release_version
)
release.add_project(project)
release.add_project(project2)
ReleaseProject.objects.filter(release=release, project=project).update(new_groups=1)
ReleaseProject.objects.filter(release=release, project=project2).update(new_groups=1)
self.store_event(
data={
"timestamp": before_now(seconds=1).isoformat(),
"release": release_version,
"environment": "prod",
},
project_id=project.id,
)
release = Release.objects.get(version=release_version)
commit_author = CommitAuthor.objects.create(
name="stebe", email="stebe@sentry.io", organization_id=project.organization_id
)
commit = Commit.objects.create(
organization_id=project.organization_id,
repository_id=1,
key="abc",
author=commit_author,
message="waddap",
)
ReleaseCommit.objects.create(
organization_id=project.organization_id,
project_id=project.id,
release=release,
commit=commit,
order=1,
)
release.update(authors=[str(commit_author.id)], commit_count=1, last_commit_id=commit.id)
result = serialize(release, user)
assert result["version"] == release.version
# should be sum of all projects
assert result["newGroups"] == 2
(tagvalue1,) = tagstore.backend.get_release_tags(
1, [project.id], environment_id=None, versions=[release_version]
)
assert result["lastEvent"] == tagvalue1.last_seen
assert result["commitCount"] == 1
assert result["authors"] == [{"name": "stebe", "email": "stebe@sentry.io"}]
assert result["version"] == release.version
assert result["versionInfo"]["package"] is None
assert result["versionInfo"]["version"]["raw"] == release_version
assert result["versionInfo"]["buildHash"] == release_version
assert result["versionInfo"]["description"] == release_version[:12]
current_formatted_datetime = datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%S+00:00")
current_project_meta = {
"prev_release_version": "foobar@1.0.0",
"next_release_version": "foobar@2.0.0",
"sessions_lower_bound": current_formatted_datetime,
"sessions_upper_bound": current_formatted_datetime,
"first_release_version": "foobar@1.0.0",
"last_release_version": "foobar@2.0.0",
}
result = serialize(
release, user, project=project, current_project_meta=current_project_meta
)
assert result["newGroups"] == 1
assert result["firstEvent"] == tagvalue1.first_seen
assert result["lastEvent"] == tagvalue1.last_seen
assert (
result["currentProjectMeta"]["prevReleaseVersion"]
== current_project_meta["prev_release_version"]
)
assert (
result["currentProjectMeta"]["nextReleaseVersion"]
== current_project_meta["next_release_version"]
)
assert (
result["currentProjectMeta"]["sessionsLowerBound"]
== current_project_meta["sessions_lower_bound"]
)
assert (
result["currentProjectMeta"]["sessionsUpperBound"]
== current_project_meta["sessions_upper_bound"]
)
assert (
result["currentProjectMeta"]["firstReleaseVersion"]
== current_project_meta["first_release_version"]
)
assert (
result["currentProjectMeta"]["lastReleaseVersion"]
== current_project_meta["last_release_version"]
)
def test_authors_is_none(self) -> None:
release = Release.objects.create(
organization_id=self.organization.id, version="1", authors=None
)
release.add_project(self.project)
result = serialize(release, self.user)
assert result["authors"] == []
def test_mobile_version(self) -> None:
user = self.create_user()
project = self.create_project()
release_version = "foo.bar.BazApp@1.0a+20200101100"
release = Release.objects.create(
organization_id=project.organization_id, version=release_version
)
release.add_project(project)
ReleaseProject.objects.filter(release=release, project=project).update(new_groups=1)
self.store_event(
data={
"timestamp": before_now(seconds=1).isoformat(),
"release": release_version,
"environment": "prod",
},
project_id=project.id,
)
release = Release.objects.get(version=release_version)
result = serialize(release, user)
assert result["version"] == release.version
assert result["versionInfo"]["package"] == "foo.bar.BazApp"
assert result["versionInfo"]["version"]["raw"] == "1.0a+20200101100"
assert result["versionInfo"]["version"]["major"] == 1
assert result["versionInfo"]["version"]["minor"] == 0
assert result["versionInfo"]["version"]["patch"] == 0
assert result["versionInfo"]["version"]["pre"] == "a"
assert result["versionInfo"]["version"]["buildCode"] == "20200101100"
assert result["versionInfo"]["buildHash"] is None
assert result["versionInfo"]["description"] == "1.0a (20200101100)"
assert result["versionInfo"]["version"]["components"] == 2
def test_no_tag_data(self) -> None:
user = self.create_user()
project = self.create_project()
release = Release.objects.create(
organization_id=project.organization_id, version=uuid4().hex
)
release.add_project(project)
commit_author = CommitAuthor.objects.create(
name="stebe", email="stebe@sentry.io", organization_id=project.organization_id
)
commit = Commit.objects.create(
organization_id=project.organization_id,
repository_id=1,
key="abc",
author=commit_author,
message="waddap",
)
ReleaseCommit.objects.create(
organization_id=project.organization_id,
project_id=project.id,
release=release,
commit=commit,
order=1,
)
result = serialize(release, user)
assert result["version"] == release.version
assert not result["firstEvent"]
assert not result["lastEvent"]
def test_get_user_from_email(self) -> None:
# upper case so we can test case sensitivity
user = self.create_user(email="Stebe@sentry.io")
project = self.create_project()
self.create_member(user=user, organization=project.organization)
release = Release.objects.create(
organization_id=project.organization_id, version=uuid4().hex
)
release.add_project(project)
commit_author = CommitAuthor.objects.create(
name="stebe", email="stebe@sentry.io", organization_id=project.organization_id
)
commit = Commit.objects.create(
organization_id=project.organization_id,
repository_id=1,
key="abc",
author=commit_author,
message="waddap",
)
ReleaseCommit.objects.create(
organization_id=project.organization_id,
project_id=project.id,
release=release,
commit=commit,
order=1,
)
release.update(authors=[str(commit_author.id)], commit_count=1, last_commit_id=commit.id)
result = serialize(release, user)
result_author = result["authors"][0]
assert int(result_author["id"]) == user.id
assert result_author["email"] == user.email
assert result_author["username"] == user.username
def test_get_single_user_from_email(self) -> None:
"""
If 1 commit author email links to 2 users - prefer user with this as their primary email.
"""
user = self.create_user(email="stebe@sentry.io")
otheruser = self.create_user(email="adifferentstebe@sentry.io")
self.create_useremail(email="stebe@sentry.io", user=otheruser)
project = self.create_project()
self.create_member(user=user, organization=project.organization)
self.create_member(user=otheruser, organization=project.organization)
release = Release.objects.create(
organization_id=project.organization_id, version=uuid4().hex
)
release.add_project(project)
commit_author = CommitAuthor.objects.create(
name="stebe", email="stebe@sentry.io", organization_id=project.organization_id
)
commit = Commit.objects.create(
organization_id=project.organization_id,
repository_id=1,
key="abc",
author=commit_author,
message="waddap",
)
ReleaseCommit.objects.create(
organization_id=project.organization_id,
project_id=project.id,
release=release,
commit=commit,
order=1,
)
release.update(authors=[str(commit_author.id)], commit_count=1, last_commit_id=commit.id)
result = serialize(release, user)
assert len(result["authors"]) == 1
result_author = result["authors"][0]
assert int(result_author["id"]) == user.id
assert result_author["email"] == user.email
assert result_author["username"] == user.username
def test_select_user_from_appropriate_org(self) -> None:
"""
Tests that a user not belonging to the organization
is not returned as the author
"""
user = self.create_user(email="stebe@sentry.io")
with assume_test_silo_mode(SiloMode.CONTROL):
email = UserEmail.objects.get(user=user, email="stebe@sentry.io")
otheruser = self.create_user(email="adifferentstebe@sentry.io")
otheremail = self.create_useremail(email="stebe@sentry.io", user=otheruser)
project = self.create_project()
self.create_member(user=otheruser, organization=project.organization)
release = Release.objects.create(
organization_id=project.organization_id, version=uuid4().hex
)
release.add_project(project)
commit_author = CommitAuthor.objects.create(
name="stebe", email="stebe@sentry.io", organization_id=project.organization_id
)
commit = Commit.objects.create(
organization_id=project.organization_id,
repository_id=1,
key="abc",
author=commit_author,
message="waddap",
)
ReleaseCommit.objects.create(
organization_id=project.organization_id,
project_id=project.id,
release=release,
commit=commit,
order=1,
)
release.update(authors=[str(commit_author.id)], commit_count=1, last_commit_id=commit.id)
assert email.id < otheremail.id
result = serialize(release, user)
assert len(result["authors"]) == 1
result_author = result["authors"][0]
assert int(result_author["id"]) == otheruser.id
assert result_author["email"] == otheruser.email
assert result_author["username"] == otheruser.username
def test_no_commit_author(self) -> None:
user = self.create_user(email="stebe@sentry.io")
otheruser = self.create_user(email="adifferentstebe@sentry.io")
project = self.create_project()
self.create_member(user=otheruser, organization=project.organization)
release = Release.objects.create(
organization_id=project.organization_id, version=uuid4().hex
)
release.add_project(project)
commit = Commit.objects.create(
organization_id=project.organization_id, repository_id=1, key="abc", message="waddap"
)
ReleaseCommit.objects.create(
organization_id=project.organization_id,
project_id=project.id,
release=release,
commit=commit,
order=1,
)
result = serialize(release, user)
assert result["authors"] == []
def test_deduplicate_users(self) -> None:
"""
Tests that the same user is not returned more than once
if there are commits associated with multiple of their emails.
"""
email = "stebe@sentry.io"
user = self.create_user(email=email)
new_useremail = self.create_useremail(email="alsostebe@sentry.io", user=user)
project = self.create_project()
self.create_member(user=user, organization=project.organization)
release = Release.objects.create(
organization_id=project.organization_id, version=uuid4().hex
)
release.add_project(project)
commit_author1 = CommitAuthor.objects.create(
name="stebe", email=email, organization_id=project.organization_id, external_id=None
)
commit_author2 = CommitAuthor.objects.create(
name="stebe",
email=new_useremail.email,
organization_id=project.organization_id,
external_id=None,
)
commit1 = Commit.objects.create(
organization_id=project.organization_id,
repository_id=1,
key="abc",
author=commit_author1,
message="waddap",
)
commit2 = Commit.objects.create(
organization_id=project.organization_id,
repository_id=1,
key="cde",
author=commit_author2,
message="oh hi",
)
ReleaseCommit.objects.create(
organization_id=project.organization_id,
project_id=project.id,
release=release,
commit=commit1,
order=1,
)
ReleaseCommit.objects.create(
organization_id=project.organization_id,
project_id=project.id,
release=release,
commit=commit2,
order=2,
)
ReleaseProject.objects.filter(release=release, project=project).update(new_groups=1)
release.update(
authors=[str(commit_author1.id), str(commit_author2.id)],
commit_count=2,
last_commit_id=commit2.id,
)
result = serialize(release, user)
assert len(result["authors"]) == 1
assert result["authors"][0]["email"] == "stebe@sentry.io"
assert result["newGroups"] == 1
def test_with_deploy(self) -> None:
user = self.create_user()
project = self.create_project()
release = Release.objects.create(
organization_id=project.organization_id, version=uuid4().hex
)
release.add_project(project)
env = Environment.objects.create(organization_id=project.organization_id, name="production")
env.add_project(project)
ReleaseProjectEnvironment.objects.create(
project_id=project.id, release_id=release.id, environment_id=env.id, new_issues_count=1
)
deploy = Deploy.objects.create(
organization_id=project.organization_id, release=release, environment_id=env.id
)
release.update(total_deploys=1, last_deploy_id=deploy.id)
result = serialize(release, user)
assert result["version"] == release.version
assert result["deployCount"] == 1
assert result["lastDeploy"]["id"] == str(deploy.id)
def test_release_no_users(self) -> None:
"""
Testing when a repo gets deleted leaving dangling last commit id and author_ids
Made the decision that the Serializer must handle the data even in the case that the
commit_id or the author_ids point to records that do not exist.
"""
commit_id = 9999999
commit_author_id = 9999999
project = self.create_project()
release = Release.objects.create(
organization_id=project.organization_id,
version=uuid4().hex,
authors=[str(commit_author_id)],
commit_count=1,
last_commit_id=commit_id,
)
release.add_project(project)
serialize(release)
def test_get_user_for_authors_simple(self) -> None:
user = self.create_user(email="chrib@sentry.io")
project = self.create_project()
self.create_member(user=user, organization=project.organization)
author = CommitAuthor(
email="chrib@sentry.io", name="Chrib", organization_id=project.organization_id
)
users = get_users_for_authors(organization_id=project.organization_id, authors=[author])
assert len(users) == 1
assert users[str(author.id)]["email"] == author.email
def test_get_user_for_authors_no_user(self) -> None:
author = CommitAuthor(email="notactuallyauser@sentry.io")
project = self.create_project()
users = get_users_for_authors(organization_id=project.organization_id, authors=[author])
assert len(users) == 1
assert users[str(author.id)]["email"] == author.email
@patch("sentry.api.serializers.models.release.serialize")
def test_get_user_for_authors_caching(self, patched_serialize_base: MagicMock) -> None:
# Ensure the fetched/miss caching logic works.
user = self.create_user(email="chrib@sentry.io")
user2 = self.create_user(email="alsochrib@sentry.io")
project = self.create_project()
self.create_member(user=user, organization=project.organization)
self.create_member(user=user2, organization=project.organization)
commit_author = CommitAuthor.objects.create(
email="chrib@sentry.io", name="Chrib", organization_id=project.organization_id
)
commit_author2 = CommitAuthor.objects.create(
email="alsochrib@sentry.io", name="Also Chrib", organization_id=project.organization_id
)
users = get_users_for_authors(
organization_id=project.organization_id, authors=[commit_author]
)
assert len(users) == 1
assert users[str(commit_author.id)]["email"] == user.email
patched_serialize_base.call_count = 1
users = get_users_for_authors(
organization_id=project.organization_id, authors=[commit_author]
)
assert len(users) == 1
assert users[str(commit_author.id)]["email"] == user.email
patched_serialize_base.call_count = 1
users = get_users_for_authors(
organization_id=project.organization_id, authors=[commit_author, commit_author2]
)
assert len(users) == 2
assert users[str(commit_author.id)]["email"] == user.email
assert users[str(commit_author2.id)]["email"] == user2.email
patched_serialize_base.call_count = 2
users = get_users_for_authors(
organization_id=project.organization_id, authors=[commit_author, commit_author2]
)
assert len(users) == 2
assert users[str(commit_author.id)]["email"] == user.email
assert users[str(commit_author2.id)]["email"] == user2.email
patched_serialize_base.call_count = 2
def test_adoption_stages(self) -> None:
user = self.create_user()
project = self.create_project()
release = Release.objects.create(
organization_id=project.organization_id, version=uuid4().hex
)
release.add_project(project)
env = Environment.objects.create(organization_id=project.organization_id, name="staging")
env.add_project(project)
ReleaseProjectEnvironment.objects.create(
project_id=project.id, release_id=release.id, environment_id=env.id, new_issues_count=1
)
result = serialize(release, user)
assert "adoptionStages" not in result
result = serialize(release, user)
assert "adoptionStages" not in result
result = serialize(release, user, with_adoption_stages=True)
assert result["adoptionStages"][project.slug]["stage"] == ReleaseStages.LOW_ADOPTION
assert result["adoptionStages"][project.slug]["unadopted"] is None
assert result["adoptionStages"][project.slug]["adopted"] is None
env2 = Environment.objects.create(
organization_id=project.organization_id, name="production"
)
rpe = ReleaseProjectEnvironment.objects.create(
project_id=project.id,
release_id=release.id,
environment_id=env2.id,
new_issues_count=1,
adopted=datetime.now(UTC),
)
result = serialize(release, user, with_adoption_stages=True)
assert result["adoptionStages"][project.slug]["stage"] == ReleaseStages.ADOPTED
assert result["adoptionStages"][project.slug]["unadopted"] is None
assert result["adoptionStages"][project.slug]["adopted"] is not None
project2 = self.create_project()
ReleaseProjectEnvironment.objects.create(
project_id=project2.id,
release_id=release.id,
environment_id=env2.id,
new_issues_count=1,
)
result = serialize(release, user, with_adoption_stages=True)
assert result["adoptionStages"][project.slug]["stage"] == ReleaseStages.ADOPTED
assert result["adoptionStages"][project2.slug]["stage"] == ReleaseStages.LOW_ADOPTION
ReleaseProjectEnvironment.objects.create(
project_id=project2.id,
release_id=release.id,
environment_id=env.id,
new_issues_count=1,
adopted=datetime.now(UTC),
)
result = serialize(release, user, with_adoption_stages=True)
assert result["adoptionStages"][project.slug]["stage"] == ReleaseStages.ADOPTED
assert result["adoptionStages"][project2.slug]["stage"] == ReleaseStages.ADOPTED
rpe.update(unadopted=datetime.now(UTC))
result = serialize(release, user, with_adoption_stages=True)
assert result["adoptionStages"][project.slug]["stage"] == ReleaseStages.REPLACED
assert result["adoptionStages"][project2.slug]["stage"] == ReleaseStages.ADOPTED
def test_with_none_new_groups(self) -> None:
"""Test that release serializer works correctly when new_groups is None."""
project = self.create_project()
release = Release.objects.create(
organization_id=project.organization_id,
version="0.1",
)
release.add_project(project)
ReleaseProject.objects.filter(release=release, project=project).update(new_groups=None)
result = serialize(release, user=self.user, project=project)
assert result["version"] == "0.1"
assert result["newGroups"] == 0 # Should default to 0 when None
def test_new_groups_single_release(self) -> None:
"""
Test new groups counts for one release with multiple projects, each having different issue counts.
"""
project_a = self.create_project(name="Project A", slug="project-a")
project_b = self.create_project(
name="Project B", slug="project-b", organization=project_a.organization
)
release_version = "1.0.0"
release = Release.objects.create(
organization_id=project_a.organization_id, version=release_version
)
release.add_project(project_a)
release.add_project(project_b)
# 3 new groups for project A, 2 new groups for project B
ReleaseProject.objects.filter(release=release, project=project_a).update(new_groups=3)
ReleaseProject.objects.filter(release=release, project=project_b).update(new_groups=2)
result = serialize(release, self.user)
assert result["newGroups"] == 5
projects = {p["id"]: p for p in result["projects"]}
assert projects[project_a.id]["newGroups"] == 3
assert projects[project_b.id]["newGroups"] == 2
assert projects[project_a.id]["name"] == "Project A"
assert projects[project_a.id]["slug"] == "project-a"
assert projects[project_b.id]["name"] == "Project B"
assert projects[project_b.id]["slug"] == "project-b"
def test_new_groups_multiple_releases(self) -> None:
"""
Test new groups count for multiple releases per project.
"""
project_a = self.create_project(name="Project A", slug="project-a")
project_b = self.create_project(
name="Project B", slug="project-b", organization=project_a.organization
)
release_1 = Release.objects.create(
organization_id=project_a.organization_id, version="1.0.0"
)
release_1.add_project(project_a)
release_1.add_project(project_b)
release_2 = Release.objects.create(
organization_id=project_a.organization_id, version="2.0.0"
)
release_2.add_project(project_a)
release_2.add_project(project_b)
# Release 1.0.0 has 3 new groups for project A, 2 new groups for project B
ReleaseProject.objects.filter(release=release_1, project=project_a).update(new_groups=3)
ReleaseProject.objects.filter(release=release_1, project=project_b).update(new_groups=2)
# Release 2.0.0 has 1 new groups for project A, 4 new groups for project B
ReleaseProject.objects.filter(release=release_2, project=project_a).update(new_groups=1)
ReleaseProject.objects.filter(release=release_2, project=project_b).update(new_groups=4)
# 1. Serialize Release 1.0.0
result = serialize(release_1, self.user)
assert result["version"] == "1.0.0"
assert result["newGroups"] == 5
projects = {p["id"]: p for p in result["projects"]}
assert projects[project_a.id]["newGroups"] == 3
assert projects[project_b.id]["newGroups"] == 2
# 2. Serialize Release 2.0.0
result = serialize(release_2, self.user)
assert result["version"] == "2.0.0"
assert result["newGroups"] == 5
projects = {p["id"]: p for p in result["projects"]}
assert projects[project_a.id]["newGroups"] == 1
assert projects[project_b.id]["newGroups"] == 4
# 3. Serialize both releases together
result = serialize([release_1, release_2], self.user)
assert len(result) == 2
serialized_releases = {r["version"]: r for r in result}
serialized_release_1 = serialized_releases["1.0.0"]
serialized_release_2 = serialized_releases["2.0.0"]
assert serialized_release_1["newGroups"] == 5
assert serialized_release_2["newGroups"] == 5
projects_1 = {p["id"]: p for p in serialized_release_1["projects"]}
projects_2 = {p["id"]: p for p in serialized_release_2["projects"]}
assert projects_1[project_a.id]["newGroups"] == 3
assert projects_1[project_b.id]["newGroups"] == 2
assert projects_2[project_a.id]["newGroups"] == 1
assert projects_2[project_b.id]["newGroups"] == 4
def test_new_groups_environment_filtering(self) -> None:
"""
Test new group counts for a single release with environment filtering.
"""
project_a = self.create_project(name="Project A", slug="project-a")
project_b = self.create_project(
name="Project B", slug="project-b", organization=project_a.organization
)
production = self.create_environment(name="production", organization=project_a.organization)
staging = self.create_environment(name="staging", organization=project_a.organization)
release = Release.objects.create(organization_id=project_a.organization_id, version="1.0.0")
release.add_project(project_a)
release.add_project(project_b)
# 4 new groups for project A, 2 new groups for project B
ReleaseProject.objects.filter(release=release, project=project_a).update(new_groups=4)
ReleaseProject.objects.filter(release=release, project=project_b).update(new_groups=2)
# Project A: 3 issues in production, 1 issue in staging (total = 4)
ReleaseProjectEnvironment.objects.create(
release=release, project=project_a, environment=production, new_issues_count=3
)
ReleaseProjectEnvironment.objects.create(
release=release, project=project_a, environment=staging, new_issues_count=1
)
# Project B: 2 issues in production, 0 issues in staging (total = 2)
ReleaseProjectEnvironment.objects.create(
release=release, project=project_b, environment=production, new_issues_count=2
)
ReleaseProjectEnvironment.objects.create(
release=release, project=project_b, environment=staging, new_issues_count=0
)
# 1. No environment filter
result = serialize(release, self.user)
projects = {p["id"]: p for p in result["projects"]}
assert projects[project_a.id]["newGroups"] == 4
assert projects[project_b.id]["newGroups"] == 2
assert result["newGroups"] == 6
# 2. Filter by production environment
result = serialize(release, self.user, environments=["production"])
projects = {p["id"]: p for p in result["projects"]}
assert projects[project_a.id]["newGroups"] == 3
assert projects[project_b.id]["newGroups"] == 2
assert result["newGroups"] == 5
# 3. Filter by staging environment
result = serialize(release, self.user, environments=["staging"])
projects = {p["id"]: p for p in result["projects"]}
assert projects[project_a.id]["newGroups"] == 1
assert projects[project_b.id]["newGroups"] == 0
assert result["newGroups"] == 1
# 4. Filter by both environments
result = serialize(release, self.user, environments=["production", "staging"])
projects = {p["id"]: p for p in result["projects"]}
assert projects[project_a.id]["newGroups"] == 4
assert projects[project_b.id]["newGroups"] == 2
assert result["newGroups"] == 6
def test_new_groups_multiple_releases_environment_filtering(self) -> None:
"""
Test new group counts for multiple releases with different environments.
"""
project_a = self.create_project(name="Project A", slug="project-a")
project_b = self.create_project(
name="Project B", slug="project-b", organization=project_a.organization
)
production = self.create_environment(name="production", organization=project_a.organization)
staging = self.create_environment(name="staging", organization=project_a.organization)
release_1 = Release.objects.create(
organization_id=project_a.organization_id, version="1.0.0"
)
release_1.add_project(project_a)
release_1.add_project(project_b)
release_2 = Release.objects.create(
organization_id=project_a.organization_id, version="2.0.0"
)
release_2.add_project(project_a)
release_2.add_project(project_b)
# Release 1.0.0: Project A = 4 (3+1), Project B = 2 (2+0)
ReleaseProject.objects.filter(release=release_1, project=project_a).update(new_groups=4)
ReleaseProject.objects.filter(release=release_1, project=project_b).update(new_groups=2)
# Release 2.0.0: Project A = 3 (1+2), Project B = 5 (4+1)
ReleaseProject.objects.filter(release=release_2, project=project_a).update(new_groups=3)
ReleaseProject.objects.filter(release=release_2, project=project_b).update(new_groups=5)
# Release 1.0.0 - Project A: 3 in production, 1 in staging
ReleaseProjectEnvironment.objects.create(
release=release_1, project=project_a, environment=production, new_issues_count=3
)
ReleaseProjectEnvironment.objects.create(
release=release_1, project=project_a, environment=staging, new_issues_count=1
)
# Release 1.0.0 - Project B: 2 in production, 0 in staging (no staging record)
ReleaseProjectEnvironment.objects.create(
release=release_1, project=project_b, environment=production, new_issues_count=2
)
# Release 2.0.0 - Project A: 1 in production, 2 in staging
ReleaseProjectEnvironment.objects.create(
release=release_2, project=project_a, environment=production, new_issues_count=1
)
ReleaseProjectEnvironment.objects.create(
release=release_2, project=project_a, environment=staging, new_issues_count=2
)
# Release 2.0.0 - Project B: 4 in production, 1 in staging
ReleaseProjectEnvironment.objects.create(
release=release_2, project=project_b, environment=production, new_issues_count=4
)
ReleaseProjectEnvironment.objects.create(
release=release_2, project=project_b, environment=staging, new_issues_count=1
)
# 1. Serialize Release 1.0.0 with no environment filter
result = serialize(release_1, self.user)
assert result["newGroups"] == 6
projects = {p["id"]: p for p in result["projects"]}
assert projects[project_a.id]["newGroups"] == 4
assert projects[project_b.id]["newGroups"] == 2
# 2. Serialize Release 1.0.0 with production filter
result = serialize(release_1, self.user, environments=["production"])
assert result["version"] == "1.0.0"
assert result["newGroups"] == 5
projects = {p["id"]: p for p in result["projects"]}
assert projects[project_a.id]["newGroups"] == 3
assert projects[project_b.id]["newGroups"] == 2
# 3. Serialize Release 2.0.0 with production filter
result = serialize(release_2, self.user, environments=["production"])
assert result["version"] == "2.0.0"
assert result["newGroups"] == 5
projects = {p["id"]: p for p in result["projects"]}
assert projects[project_a.id]["newGroups"] == 1
assert projects[project_b.id]["newGroups"] == 4
# 4. Serialize both releases with production filter
result = serialize([release_1, release_2], self.user, environments=["production"])
assert len(result) == 2
serialized_releases = {r["version"]: r for r in result}
serialized_release_1 = serialized_releases["1.0.0"]
serialized_release_2 = serialized_releases["2.0.0"]
assert serialized_release_1["newGroups"] == 5
assert serialized_release_2["newGroups"] == 5
projects_1 = {p["id"]: p for p in serialized_release_1["projects"]}
projects_2 = {p["id"]: p for p in serialized_release_2["projects"]}
assert projects_1[project_a.id]["newGroups"] == 3
assert projects_1[project_b.id]["newGroups"] == 2
assert projects_2[project_a.id]["newGroups"] == 1
assert projects_2[project_b.id]["newGroups"] == 4
|
ReleaseSerializerTest
|
python
|
pallets__click
|
src/click/core.py
|
{
"start": 57885,
"end": 74961
}
|
class ____(Command):
"""A group is a command that nests other commands (or more groups).
:param name: The name of the group command.
:param commands: Map names to :class:`Command` objects. Can be a list, which
will use :attr:`Command.name` as the keys.
:param invoke_without_command: Invoke the group's callback even if a
subcommand is not given.
:param no_args_is_help: If no arguments are given, show the group's help and
exit. Defaults to the opposite of ``invoke_without_command``.
:param subcommand_metavar: How to represent the subcommand argument in help.
The default will represent whether ``chain`` is set or not.
:param chain: Allow passing more than one subcommand argument. After parsing
a command's arguments, if any arguments remain another command will be
matched, and so on.
:param result_callback: A function to call after the group's and
subcommand's callbacks. The value returned by the subcommand is passed.
If ``chain`` is enabled, the value will be a list of values returned by
all the commands. If ``invoke_without_command`` is enabled, the value
will be the value returned by the group's callback, or an empty list if
``chain`` is enabled.
:param kwargs: Other arguments passed to :class:`Command`.
.. versionchanged:: 8.0
The ``commands`` argument can be a list of command objects.
.. versionchanged:: 8.2
Merged with and replaces the ``MultiCommand`` base class.
"""
allow_extra_args = True
allow_interspersed_args = False
#: If set, this is used by the group's :meth:`command` decorator
#: as the default :class:`Command` class. This is useful to make all
#: subcommands use a custom command class.
#:
#: .. versionadded:: 8.0
command_class: type[Command] | None = None
#: If set, this is used by the group's :meth:`group` decorator
#: as the default :class:`Group` class. This is useful to make all
#: subgroups use a custom group class.
#:
#: If set to the special value :class:`type` (literally
#: ``group_class = type``), this group's class will be used as the
#: default class. This makes a custom group class continue to make
#: custom groups.
#:
#: .. versionadded:: 8.0
group_class: type[Group] | type[type] | None = None
# Literal[type] isn't valid, so use Type[type]
def __init__(
self,
name: str | None = None,
commands: cabc.MutableMapping[str, Command]
| cabc.Sequence[Command]
| None = None,
invoke_without_command: bool = False,
no_args_is_help: bool | None = None,
subcommand_metavar: str | None = None,
chain: bool = False,
result_callback: t.Callable[..., t.Any] | None = None,
**kwargs: t.Any,
) -> None:
super().__init__(name, **kwargs)
if commands is None:
commands = {}
elif isinstance(commands, abc.Sequence):
commands = {c.name: c for c in commands if c.name is not None}
#: The registered subcommands by their exported names.
self.commands: cabc.MutableMapping[str, Command] = commands
if no_args_is_help is None:
no_args_is_help = not invoke_without_command
self.no_args_is_help = no_args_is_help
self.invoke_without_command = invoke_without_command
if subcommand_metavar is None:
if chain:
subcommand_metavar = "COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]..."
else:
subcommand_metavar = "COMMAND [ARGS]..."
self.subcommand_metavar = subcommand_metavar
self.chain = chain
# The result callback that is stored. This can be set or
# overridden with the :func:`result_callback` decorator.
self._result_callback = result_callback
if self.chain:
for param in self.params:
if isinstance(param, Argument) and not param.required:
raise RuntimeError(
"A group in chain mode cannot have optional arguments."
)
def to_info_dict(self, ctx: Context) -> dict[str, t.Any]:
info_dict = super().to_info_dict(ctx)
commands = {}
for name in self.list_commands(ctx):
command = self.get_command(ctx, name)
if command is None:
continue
sub_ctx = ctx._make_sub_context(command)
with sub_ctx.scope(cleanup=False):
commands[name] = command.to_info_dict(sub_ctx)
info_dict.update(commands=commands, chain=self.chain)
return info_dict
def add_command(self, cmd: Command, name: str | None = None) -> None:
"""Registers another :class:`Command` with this group. If the name
is not provided, the name of the command is used.
"""
name = name or cmd.name
if name is None:
raise TypeError("Command has no name.")
_check_nested_chain(self, name, cmd, register=True)
self.commands[name] = cmd
@t.overload
def command(self, __func: t.Callable[..., t.Any]) -> Command: ...
@t.overload
def command(
self, *args: t.Any, **kwargs: t.Any
) -> t.Callable[[t.Callable[..., t.Any]], Command]: ...
def command(
self, *args: t.Any, **kwargs: t.Any
) -> t.Callable[[t.Callable[..., t.Any]], Command] | Command:
"""A shortcut decorator for declaring and attaching a command to
the group. This takes the same arguments as :func:`command` and
immediately registers the created command with this group by
calling :meth:`add_command`.
To customize the command class used, set the
:attr:`command_class` attribute.
.. versionchanged:: 8.1
This decorator can be applied without parentheses.
.. versionchanged:: 8.0
Added the :attr:`command_class` attribute.
"""
from .decorators import command
func: t.Callable[..., t.Any] | None = None
if args and callable(args[0]):
assert len(args) == 1 and not kwargs, (
"Use 'command(**kwargs)(callable)' to provide arguments."
)
(func,) = args
args = ()
if self.command_class and kwargs.get("cls") is None:
kwargs["cls"] = self.command_class
def decorator(f: t.Callable[..., t.Any]) -> Command:
cmd: Command = command(*args, **kwargs)(f)
self.add_command(cmd)
return cmd
if func is not None:
return decorator(func)
return decorator
@t.overload
def group(self, __func: t.Callable[..., t.Any]) -> Group: ...
@t.overload
def group(
self, *args: t.Any, **kwargs: t.Any
) -> t.Callable[[t.Callable[..., t.Any]], Group]: ...
def group(
self, *args: t.Any, **kwargs: t.Any
) -> t.Callable[[t.Callable[..., t.Any]], Group] | Group:
"""A shortcut decorator for declaring and attaching a group to
the group. This takes the same arguments as :func:`group` and
immediately registers the created group with this group by
calling :meth:`add_command`.
To customize the group class used, set the :attr:`group_class`
attribute.
.. versionchanged:: 8.1
This decorator can be applied without parentheses.
.. versionchanged:: 8.0
Added the :attr:`group_class` attribute.
"""
from .decorators import group
func: t.Callable[..., t.Any] | None = None
if args and callable(args[0]):
assert len(args) == 1 and not kwargs, (
"Use 'group(**kwargs)(callable)' to provide arguments."
)
(func,) = args
args = ()
if self.group_class is not None and kwargs.get("cls") is None:
if self.group_class is type:
kwargs["cls"] = type(self)
else:
kwargs["cls"] = self.group_class
def decorator(f: t.Callable[..., t.Any]) -> Group:
cmd: Group = group(*args, **kwargs)(f)
self.add_command(cmd)
return cmd
if func is not None:
return decorator(func)
return decorator
def result_callback(self, replace: bool = False) -> t.Callable[[F], F]:
"""Adds a result callback to the command. By default if a
result callback is already registered this will chain them but
this can be disabled with the `replace` parameter. The result
callback is invoked with the return value of the subcommand
(or the list of return values from all subcommands if chaining
is enabled) as well as the parameters as they would be passed
to the main callback.
Example::
@click.group()
@click.option('-i', '--input', default=23)
def cli(input):
return 42
@cli.result_callback()
def process_result(result, input):
return result + input
:param replace: if set to `True` an already existing result
callback will be removed.
.. versionchanged:: 8.0
Renamed from ``resultcallback``.
.. versionadded:: 3.0
"""
def decorator(f: F) -> F:
old_callback = self._result_callback
if old_callback is None or replace:
self._result_callback = f
return f
def function(value: t.Any, /, *args: t.Any, **kwargs: t.Any) -> t.Any:
inner = old_callback(value, *args, **kwargs)
return f(inner, *args, **kwargs)
self._result_callback = rv = update_wrapper(t.cast(F, function), f)
return rv # type: ignore[return-value]
return decorator
def get_command(self, ctx: Context, cmd_name: str) -> Command | None:
"""Given a context and a command name, this returns a :class:`Command`
object if it exists or returns ``None``.
"""
return self.commands.get(cmd_name)
def list_commands(self, ctx: Context) -> list[str]:
"""Returns a list of subcommand names in the order they should appear."""
return sorted(self.commands)
def collect_usage_pieces(self, ctx: Context) -> list[str]:
rv = super().collect_usage_pieces(ctx)
rv.append(self.subcommand_metavar)
return rv
def format_options(self, ctx: Context, formatter: HelpFormatter) -> None:
super().format_options(ctx, formatter)
self.format_commands(ctx, formatter)
def format_commands(self, ctx: Context, formatter: HelpFormatter) -> None:
"""Extra format methods for multi methods that adds all the commands
after the options.
"""
commands = []
for subcommand in self.list_commands(ctx):
cmd = self.get_command(ctx, subcommand)
# What is this, the tool lied about a command. Ignore it
if cmd is None:
continue
if cmd.hidden:
continue
commands.append((subcommand, cmd))
# allow for 3 times the default spacing
if len(commands):
limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands)
rows = []
for subcommand, cmd in commands:
help = cmd.get_short_help_str(limit)
rows.append((subcommand, help))
if rows:
with formatter.section(_("Commands")):
formatter.write_dl(rows)
def parse_args(self, ctx: Context, args: list[str]) -> list[str]:
if not args and self.no_args_is_help and not ctx.resilient_parsing:
raise NoArgsIsHelpError(ctx)
rest = super().parse_args(ctx, args)
if self.chain:
ctx._protected_args = rest
ctx.args = []
elif rest:
ctx._protected_args, ctx.args = rest[:1], rest[1:]
return ctx.args
def invoke(self, ctx: Context) -> t.Any:
def _process_result(value: t.Any) -> t.Any:
if self._result_callback is not None:
value = ctx.invoke(self._result_callback, value, **ctx.params)
return value
if not ctx._protected_args:
if self.invoke_without_command:
# No subcommand was invoked, so the result callback is
# invoked with the group return value for regular
# groups, or an empty list for chained groups.
with ctx:
rv = super().invoke(ctx)
return _process_result([] if self.chain else rv)
ctx.fail(_("Missing command."))
# Fetch args back out
args = [*ctx._protected_args, *ctx.args]
ctx.args = []
ctx._protected_args = []
# If we're not in chain mode, we only allow the invocation of a
# single command but we also inform the current context about the
# name of the command to invoke.
if not self.chain:
# Make sure the context is entered so we do not clean up
# resources until the result processor has worked.
with ctx:
cmd_name, cmd, args = self.resolve_command(ctx, args)
assert cmd is not None
ctx.invoked_subcommand = cmd_name
super().invoke(ctx)
sub_ctx = cmd.make_context(cmd_name, args, parent=ctx)
with sub_ctx:
return _process_result(sub_ctx.command.invoke(sub_ctx))
# In chain mode we create the contexts step by step, but after the
# base command has been invoked. Because at that point we do not
# know the subcommands yet, the invoked subcommand attribute is
# set to ``*`` to inform the command that subcommands are executed
# but nothing else.
with ctx:
ctx.invoked_subcommand = "*" if args else None
super().invoke(ctx)
# Otherwise we make every single context and invoke them in a
# chain. In that case the return value to the result processor
# is the list of all invoked subcommand's results.
contexts = []
while args:
cmd_name, cmd, args = self.resolve_command(ctx, args)
assert cmd is not None
sub_ctx = cmd.make_context(
cmd_name,
args,
parent=ctx,
allow_extra_args=True,
allow_interspersed_args=False,
)
contexts.append(sub_ctx)
args, sub_ctx.args = sub_ctx.args, []
rv = []
for sub_ctx in contexts:
with sub_ctx:
rv.append(sub_ctx.command.invoke(sub_ctx))
return _process_result(rv)
def resolve_command(
self, ctx: Context, args: list[str]
) -> tuple[str | None, Command | None, list[str]]:
cmd_name = make_str(args[0])
original_cmd_name = cmd_name
# Get the command
cmd = self.get_command(ctx, cmd_name)
# If we can't find the command but there is a normalization
# function available, we try with that one.
if cmd is None and ctx.token_normalize_func is not None:
cmd_name = ctx.token_normalize_func(cmd_name)
cmd = self.get_command(ctx, cmd_name)
# If we don't find the command we want to show an error message
# to the user that it was not provided. However, there is
# something else we should do: if the first argument looks like
# an option we want to kick off parsing again for arguments to
# resolve things like --help which now should go to the main
# place.
if cmd is None and not ctx.resilient_parsing:
if _split_opt(cmd_name)[0]:
self.parse_args(ctx, args)
ctx.fail(_("No such command {name!r}.").format(name=original_cmd_name))
return cmd_name if cmd else None, cmd, args[1:]
def shell_complete(self, ctx: Context, incomplete: str) -> list[CompletionItem]:
"""Return a list of completions for the incomplete value. Looks
at the names of options, subcommands, and chained
multi-commands.
:param ctx: Invocation context for this command.
:param incomplete: Value being completed. May be empty.
.. versionadded:: 8.0
"""
from click.shell_completion import CompletionItem
results = [
CompletionItem(name, help=command.get_short_help_str())
for name, command in _complete_visible_commands(ctx, incomplete)
]
results.extend(super().shell_complete(ctx, incomplete))
return results
|
Group
|
python
|
walkccc__LeetCode
|
solutions/3332. Maximum Points Tourist Can Earn/3332.py
|
{
"start": 0,
"end": 657
}
|
class ____:
def maxScore(
self,
n: int,
k: int,
stayScore: list[list[int]],
travelScore: list[list[int]]
) -> int:
# dp[i][j] := the maximum score after i days being at city j
dp = [[0] * n for _ in range(k + 1)]
for i in range(1, k + 1):
for dest in range(n):
# 1. Stay at the current city.
dp[i][dest] = dp[i - 1][dest] + stayScore[i - 1][dest]
# 2. Travel from any other city.
for curr in range(n):
if curr != dest:
dp[i][dest] = max(dp[i][dest],
dp[i - 1][curr] + travelScore[curr][dest])
return max(dp[k])
|
Solution
|
python
|
tiangolo__fastapi
|
scripts/mkdocs_hooks.py
|
{
"start": 1034,
"end": 5721
}
|
class ____(File):
pass
def on_config(config: MkDocsConfig, **kwargs: Any) -> MkDocsConfig:
available_langs = get_mkdocs_material_langs()
dir_path = Path(config.docs_dir)
lang = dir_path.parent.name
if lang in available_langs:
config.theme["language"] = lang
if not (config.site_url or "").endswith(f"{lang}/") and lang != "en":
config.site_url = f"{config.site_url}{lang}/"
return config
def resolve_file(*, item: str, files: Files, config: MkDocsConfig) -> None:
item_path = Path(config.docs_dir) / item
if not item_path.is_file():
en_src_dir = (Path(config.docs_dir) / "../../en/docs").resolve()
potential_path = en_src_dir / item
if potential_path.is_file():
files.append(
EnFile(
path=item,
src_dir=str(en_src_dir),
dest_dir=config.site_dir,
use_directory_urls=config.use_directory_urls,
)
)
def resolve_files(*, items: List[Any], files: Files, config: MkDocsConfig) -> None:
for item in items:
if isinstance(item, str):
resolve_file(item=item, files=files, config=config)
elif isinstance(item, dict):
assert len(item) == 1
values = list(item.values())
if not values:
continue
if isinstance(values[0], str):
resolve_file(item=values[0], files=files, config=config)
elif isinstance(values[0], list):
resolve_files(items=values[0], files=files, config=config)
else:
raise ValueError(f"Unexpected value: {values}")
def on_files(files: Files, *, config: MkDocsConfig) -> Files:
resolve_files(items=config.nav or [], files=files, config=config)
if "logo" in config.theme:
resolve_file(item=config.theme["logo"], files=files, config=config)
if "favicon" in config.theme:
resolve_file(item=config.theme["favicon"], files=files, config=config)
resolve_files(items=config.extra_css, files=files, config=config)
resolve_files(items=config.extra_javascript, files=files, config=config)
return files
def generate_renamed_section_items(
items: List[Union[Page, Section, Link]], *, config: MkDocsConfig
) -> List[Union[Page, Section, Link]]:
new_items: List[Union[Page, Section, Link]] = []
for item in items:
if isinstance(item, Section):
new_title = item.title
new_children = generate_renamed_section_items(item.children, config=config)
first_child = new_children[0]
if isinstance(first_child, Page):
if first_child.file.src_path.endswith("index.md"):
# Read the source so that the title is parsed and available
first_child.read_source(config=config)
new_title = first_child.title or new_title
# Creating a new section makes it render it collapsed by default
# no idea why, so, let's just modify the existing one
# new_section = Section(title=new_title, children=new_children)
item.title = new_title.split("{ #")[0]
item.children = new_children
new_items.append(item)
else:
new_items.append(item)
return new_items
def on_nav(
nav: Navigation, *, config: MkDocsConfig, files: Files, **kwargs: Any
) -> Navigation:
new_items = generate_renamed_section_items(nav.items, config=config)
return Navigation(items=new_items, pages=nav.pages)
def on_pre_page(page: Page, *, config: MkDocsConfig, files: Files) -> Page:
return page
def on_page_markdown(
markdown: str, *, page: Page, config: MkDocsConfig, files: Files
) -> str:
# Set matadata["social"]["cards_layout_options"]["title"] to clean title (without
# permalink)
title = page.title
clean_title = title.split("{ #")[0]
if clean_title:
page.meta.setdefault("social", {})
page.meta["social"].setdefault("cards_layout_options", {})
page.meta["social"]["cards_layout_options"]["title"] = clean_title
if isinstance(page.file, EnFile):
for excluded_section in non_translated_sections:
if page.file.src_path.startswith(excluded_section):
return markdown
missing_translation_content = get_missing_translation_content(config.docs_dir)
header = ""
body = markdown
if markdown.startswith("#"):
header, _, body = markdown.partition("\n\n")
return f"{header}\n\n{missing_translation_content}\n\n{body}"
return markdown
|
EnFile
|
python
|
pytorch__pytorch
|
test/jit/test_backends.py
|
{
"start": 5202,
"end": 6847
}
|
class ____(JitBackendTestCase):
"""
Tests for BasicModule with a backend that is not available.
Fundamentally:
* _jit_to_backend is successful.
* Execution fails with an exception.
* Saving is successful.
* Loading fails with an exception.
"""
def setUp(self):
super().setUp()
# Create Python, JIT and backend versions of BasicModule.
self.module = BasicModule()
self.scripted_module = torch.jit.script(BasicModule())
self.lowered_module = torch._C._jit_to_backend(
"test_backend_unavailable",
self.scripted_module,
{"forward": {"": ""}},
)
def test_execution(self):
# Test execution with backend fails because the backend that is not available.
input = torch.randn(5)
# Test exception is thrown.
with self.assertRaisesRegexWithHighlight(
Exception,
r"Backend is not available.",
'raise Exception("Backend is not available."',
):
backend_method = self.lowered_module.__getattr__("forward")
backend_method(*(input, input))
def test_save_load(self):
# Test that saving the lowered module is OK but loading fails because the backend is not available.
buffer = io.BytesIO()
torch.jit.save(self.lowered_module, buffer)
buffer.seek(0)
with self.assertRaisesRegexWithHighlight(
Exception,
r"Backend is not available.",
'raise Exception("Backend is not available."',
):
torch.jit.load(buffer)
|
BasicModuleUnavailableTest
|
python
|
langchain-ai__langchain
|
libs/langchain_v1/langchain/agents/middleware/tool_call_limit.py
|
{
"start": 3679,
"end": 4892
}
|
class ____(Exception):
"""Exception raised when tool call limits are exceeded.
This exception is raised when the configured exit behavior is `'error'` and either
the thread or run tool call limit has been exceeded.
"""
def __init__(
self,
thread_count: int,
run_count: int,
thread_limit: int | None,
run_limit: int | None,
tool_name: str | None = None,
) -> None:
"""Initialize the exception with call count information.
Args:
thread_count: Current thread tool call count.
run_count: Current run tool call count.
thread_limit: Thread tool call limit (if set).
run_limit: Run tool call limit (if set).
tool_name: Tool name being limited (if specific tool), or None for all tools.
"""
self.thread_count = thread_count
self.run_count = run_count
self.thread_limit = thread_limit
self.run_limit = run_limit
self.tool_name = tool_name
msg = _build_final_ai_message_content(
thread_count, run_count, thread_limit, run_limit, tool_name
)
super().__init__(msg)
|
ToolCallLimitExceededError
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/init_ops_v2.py
|
{
"start": 18448,
"end": 22952
}
|
class ____(Initializer):
"""Initializer capable of adapting its scale to the shape of weights tensors.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
With `distribution="truncated_normal" or "untruncated_normal"`, samples are
drawn from a truncated/untruncated normal distribution with a mean of zero and
a standard deviation (after truncation, if used) `stddev = sqrt(scale / n)`
where n is:
- number of input units in the weight tensor, if mode = "fan_in"
- number of output units, if mode = "fan_out"
- average of the numbers of input and output units, if mode = "fan_avg"
With `distribution="uniform"`, samples are drawn from a uniform distribution
within [-limit, limit], with `limit = sqrt(3 * scale / n)`.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.initializers.VarianceScaling(scale=1.))
>>> v1
<tf.Variable ... shape=(3,) ... numpy=array([...], dtype=float32)>
>>> v2
<tf.Variable ... shape=(3, 3) ... numpy=
...
>>> make_variables(4, tf.initializers.VarianceScaling(distribution='uniform'))
(<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ...
Args:
scale: Scaling factor (positive float).
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "truncated_normal",
"untruncated_normal" and "uniform".
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
Raises:
ValueError: In case of an invalid value for the "scale", mode" or
"distribution" arguments.
"""
def __init__(self,
scale=1.0,
mode="fan_in",
distribution="truncated_normal",
seed=None):
if scale <= 0.:
raise ValueError("Argument `scale` must be a positive float. Received: "
f"{scale}")
if mode not in {"fan_in", "fan_out", "fan_avg"}:
raise ValueError("Argument `mode` should be one of ('fan_in', 'fan_out', "
f"'fan_avg'). Received: {mode}")
distribution = distribution.lower()
# Compatibility with keras-team/keras.
if distribution == "normal":
distribution = "truncated_normal"
if distribution not in {"uniform", "truncated_normal",
"untruncated_normal"}:
raise ValueError("Argument `distribution` should be one of ('uniform', "
"'truncated_normal', 'untruncated_normal'). Received: "
f"{distribution}")
self.scale = scale
self.mode = mode
self.distribution = distribution
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
**kwargs: Additional keyword arguments.
Raises:
ValueError: If the dtype is not floating point
"""
self._validate_kwargs(kwargs)
dtype = _assert_float_dtype(dtype)
scale = self.scale
fan_in, fan_out = _compute_fans(shape)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
if self.mode == "fan_in":
scale /= max(1., fan_in)
elif self.mode == "fan_out":
scale /= max(1., fan_out)
else:
scale /= max(1., (fan_in + fan_out) / 2.)
if self.distribution == "truncated_normal":
# constant from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)
stddev = math.sqrt(scale) / .87962566103423978
return self._random_generator.truncated_normal(shape, 0.0, stddev, dtype)
elif self.distribution == "untruncated_normal":
stddev = math.sqrt(scale)
return self._random_generator.random_normal(shape, 0.0, stddev, dtype)
else:
limit = math.sqrt(3.0 * scale)
return self._random_generator.random_uniform(shape, -limit, limit, dtype)
def get_config(self):
return {
"scale": self.scale,
"mode": self.mode,
"distribution": self.distribution,
"seed": self.seed
}
|
VarianceScaling
|
python
|
scipy__scipy
|
scipy/spatial/tests/test_kdtree.py
|
{
"start": 5580,
"end": 5743
}
|
class ____(_Test_small):
def setup_method(self):
super().setup_method()
self.kdtree = self.kdtree_type(self.data, leafsize=1)
|
_Test_small_nonleaf
|
python
|
fastapi__sqlmodel
|
docs_src/tutorial/fastapi/app_testing/tutorial001_py310/main.py
|
{
"start": 267,
"end": 362
}
|
class ____(HeroBase, table=True):
id: int | None = Field(default=None, primary_key=True)
|
Hero
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-rki-covid/source_rki_covid/source.py
|
{
"start": 16757,
"end": 17195
}
|
class ____(RkiCovidStream, ABC):
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
if response.json().get("data"):
for key, value in response.json().get("data").items():
for record in value.get("history"):
record.update({"name": value.get("name"), "abbreviation": key})
yield record
return [{}]
|
ByStateRkiCovidStream
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/ops/load_op.py
|
{
"start": 6094,
"end": 7087
}
|
class ____(dataset_ops.DatasetSource):
"""A dataset that loads previously saved dataset."""
def __init__(
self,
path: str,
element_spec: Any,
compression: str,
reader_func: Callable[[dataset_ops.Dataset], dataset_ops.Dataset]):
self._path = path
self._element_spec = element_spec
self._compression = compression
self._reader_func = structured_function.StructuredFunctionWrapper(
reader_func,
"load()",
# Dataset of datasets of input elements
input_structure=dataset_ops.DatasetSpec(
dataset_ops.DatasetSpec(self._element_spec)))
variant_tensor = ged_ops.load_dataset(
path,
reader_func_other_args=self._reader_func.function.captured_inputs,
compression=compression,
reader_func=self._reader_func.function,
**self._flat_structure)
super().__init__(variant_tensor)
@property
def element_spec(self) -> Any:
return self._element_spec
|
_LoadDataset
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/orm/writeonly.py
|
{
"start": 18443,
"end": 22635
}
|
class ____(_AbstractCollectionWriter[_T]):
"""Write-only collection which can synchronize changes into the
attribute event system.
The :class:`.WriteOnlyCollection` is used in a mapping by
using the ``"write_only"`` lazy loading strategy with
:func:`_orm.relationship`. For background on this configuration,
see :ref:`write_only_relationship`.
.. versionadded:: 2.0
.. seealso::
:ref:`write_only_relationship`
"""
__slots__ = (
"instance",
"attr",
"_where_criteria",
"_from_obj",
"_order_by_clauses",
)
def __iter__(self) -> NoReturn:
raise TypeError(
"WriteOnly collections don't support iteration in-place; "
"to query for collection items, use the select() method to "
"produce a SQL statement and execute it with session.scalars()."
)
def select(self) -> Select[_T]:
"""Produce a :class:`_sql.Select` construct that represents the
rows within this instance-local :class:`_orm.WriteOnlyCollection`.
"""
stmt = select(self.attr.target_mapper).where(*self._where_criteria)
if self._from_obj:
stmt = stmt.select_from(*self._from_obj)
if self._order_by_clauses:
stmt = stmt.order_by(*self._order_by_clauses)
return stmt
def insert(self) -> Insert:
"""For one-to-many collections, produce a :class:`_dml.Insert` which
will insert new rows in terms of this this instance-local
:class:`_orm.WriteOnlyCollection`.
This construct is only supported for a :class:`_orm.Relationship`
that does **not** include the :paramref:`_orm.relationship.secondary`
parameter. For relationships that refer to a many-to-many table,
use ordinary bulk insert techniques to produce new objects, then
use :meth:`_orm.AbstractCollectionWriter.add_all` to associate them
with the collection.
"""
state = inspect(self.instance)
mapper = state.mapper
prop = mapper._props[self.attr.key]
if prop.direction is not RelationshipDirection.ONETOMANY:
raise exc.InvalidRequestError(
"Write only bulk INSERT only supported for one-to-many "
"collections; for many-to-many, use a separate bulk "
"INSERT along with add_all()."
)
dict_: Dict[str, Any] = {}
for l, r in prop.synchronize_pairs:
fn = prop._get_attr_w_warn_on_none(
mapper,
state,
state.dict,
l,
)
dict_[r.key] = bindparam(None, callable_=fn)
return insert(self.attr.target_mapper).values(**dict_)
def update(self) -> Update:
"""Produce a :class:`_dml.Update` which will refer to rows in terms
of this instance-local :class:`_orm.WriteOnlyCollection`.
"""
return update(self.attr.target_mapper).where(*self._where_criteria)
def delete(self) -> Delete:
"""Produce a :class:`_dml.Delete` which will refer to rows in terms
of this instance-local :class:`_orm.WriteOnlyCollection`.
"""
return delete(self.attr.target_mapper).where(*self._where_criteria)
def add_all(self, iterator: Iterable[_T]) -> None:
"""Add an iterable of items to this :class:`_orm.WriteOnlyCollection`.
The given items will be persisted to the database in terms of
the parent instance's collection on the next flush.
"""
self._add_all_impl(iterator)
def add(self, item: _T) -> None:
"""Add an item to this :class:`_orm.WriteOnlyCollection`.
The given item will be persisted to the database in terms of
the parent instance's collection on the next flush.
"""
self._add_all_impl([item])
def remove(self, item: _T) -> None:
"""Remove an item from this :class:`_orm.WriteOnlyCollection`.
The given item will be removed from the parent instance's collection on
the next flush.
"""
self._remove_impl(item)
|
WriteOnlyCollection
|
python
|
mlflow__mlflow
|
mlflow/genai/label_schemas/label_schemas.py
|
{
"start": 3875,
"end": 4720
}
|
class ____(InputType):
"""A free-form text box for collecting assessments from stakeholders.
.. note::
This functionality is only available in Databricks. Please run
`pip install mlflow[databricks]` to use it.
"""
max_length: int | None = None
"""Maximum character length for the text input. None means no limit."""
def _to_databricks_input(self) -> "_InputText":
"""Convert to the internal Databricks input type."""
from databricks.agents.review_app import label_schemas as _label_schemas
return _label_schemas.InputText(max_length=self.max_length)
@classmethod
def _from_databricks_input(cls, input_obj: "_InputText") -> "InputText":
"""Create from the internal Databricks input type."""
return cls(max_length=input_obj.max_length)
@dataclass
|
InputText
|
python
|
huggingface__transformers
|
src/transformers/models/blenderbot_small/modeling_blenderbot_small.py
|
{
"start": 4439,
"end": 10176
}
|
class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
is_causal: bool = False,
config: Optional[BlenderbotSmallConfig] = None,
layer_idx: Optional[int] = None,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.config = config
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.is_causal = is_causal
self.layer_idx = layer_idx
if layer_idx is None and self.is_decoder:
logger.warning_once(
f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and "
"will lead to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
"when creating this class."
)
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
cache_position: Optional[torch.Tensor] = None,
# TODO: we need a refactor so that the different attention modules can get their specific kwargs
# ATM, we have mixed things encoder, decoder, and encoder-decoder attn
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
# determine input shapes
bsz, tgt_len = hidden_states.shape[:-1]
src_len = key_value_states.shape[1] if is_cross_attention else tgt_len
q_input_shape = (bsz, tgt_len, -1, self.head_dim)
kv_input_shape = (bsz, src_len, -1, self.head_dim)
# get query proj
query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2)
is_updated = False
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_states from cache
curr_past_key_values = past_key_values.cross_attention_cache
else:
curr_past_key_values = past_key_values.self_attention_cache
else:
curr_past_key_values = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_states = curr_past_key_values.layers[self.layer_idx].keys
value_states = curr_past_key_values.layers[self.layer_idx].values
else:
key_states = self.k_proj(current_states)
value_states = self.v_proj(current_states)
key_states = key_states.view(*kv_input_shape).transpose(1, 2)
value_states = value_states.view(*kv_input_shape).transpose(1, 2)
if past_key_values is not None:
# save all key/value_states to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_values.update(
key_states, value_states, self.layer_idx, {"cache_position": cache_position}
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.dropout,
scaling=self.scaling,
output_attentions=output_attentions,
**kwargs,
)
attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous()
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
# Copied from transformers.models.bart.modeling_bart.BartEncoderLayer with Bart->BlenderbotSmall, BART->BLENDERBOT_SMALL
|
BlenderbotSmallAttention
|
python
|
huggingface__transformers
|
tests/models/mobilebert/test_modeling_mobilebert.py
|
{
"start": 1554,
"end": 10667
}
|
class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=64,
embedding_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.embedding_size = embedding_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return MobileBertConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
embedding_size=self.embedding_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
)
def create_and_check_mobilebert_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = MobileBertModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_mobilebert_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = MobileBertForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_mobilebert_for_next_sequence_prediction(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = MobileBertForNextSentencePrediction(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=sequence_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def create_and_check_mobilebert_for_pretraining(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = MobileBertForPreTraining(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=token_labels,
next_sentence_label=sequence_labels,
)
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def create_and_check_mobilebert_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = MobileBertForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_mobilebert_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = MobileBertForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_mobilebert_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = MobileBertForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_mobilebert_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = MobileBertForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
|
MobileBertModelTester
|
python
|
pydantic__pydantic
|
tests/mypy/outputs/mypy-plugin-strict_ini/plugin_fail.py
|
{
"start": 2326,
"end": 2599
}
|
class ____(BaseModel):
model_config = ConfigDict(from_attributes=list) # type: ignore[typeddict-item]
# MYPY: error: Invalid value for "Config.from_attributes" [pydantic-config]
# MYPY: note: Error code "pydantic-config" not covered by "type: ignore" comment
|
BadConfig2
|
python
|
keras-team__keras
|
keras/src/layers/core/identity_test.py
|
{
"start": 145,
"end": 1125
}
|
class ____(testing.TestCase):
@parameterized.named_parameters(
[
{"testcase_name": "dense", "sparse": False},
{"testcase_name": "sparse", "sparse": True},
]
)
@pytest.mark.requires_trainable_backend
def test_identity_basics(self, sparse):
if sparse and not backend.SUPPORTS_SPARSE_TENSORS:
pytest.skip("Backend does not support sparse tensors.")
self.run_layer_test(
layers.Identity,
init_kwargs={},
input_shape=(2, 3),
input_sparse=sparse,
expected_output_shape=(2, 3),
expected_output_sparse=sparse,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
run_training_check=not sparse,
supports_masking=True,
assert_built_after_instantiation=True,
)
|
IdentityTest
|
python
|
Netflix__metaflow
|
metaflow/tutorials/05-hello-cloud/hello-cloud.py
|
{
"start": 57,
"end": 1596
}
|
class ____(FlowSpec):
"""
A flow where Metaflow prints 'Metaflow says Hi from the cloud!'
Run this flow to validate your Kubernetes configuration.
"""
@step
def start(self):
"""
The 'start' step is a regular step, so runs locally on the machine from
which the flow is executed.
"""
from metaflow import get_metadata
print("HelloCloud is starting.")
print("")
print("Using metadata provider: %s" % get_metadata())
print("")
print("The start step is running locally. Next, the ")
print("'hello' step will run remotely on Kubernetes. ")
self.next(self.hello)
@kubernetes(cpu=1, memory=500)
@retry
@step
def hello(self):
"""
This steps runs remotely on Kubernetes using 1 virtual CPU and 500Mb of
memory. Since we are now using a remote metadata service and data
store, the flow information and artifacts are available from
anywhere. The step also uses the retry decorator, so that if something
goes wrong, the step will be automatically retried.
"""
self.message = "Hi from the cloud!"
print("Metaflow says: %s" % self.message)
self.next(self.end)
@step
def end(self):
"""
The 'end' step is a regular step, so runs locally on the machine from
which the flow is executed.
"""
print("HelloCloud is finished.")
if __name__ == "__main__":
HelloCloudFlow()
|
HelloCloudFlow
|
python
|
pallets__flask
|
src/flask/testing.py
|
{
"start": 642,
"end": 3374
}
|
class ____(werkzeug.test.EnvironBuilder):
"""An :class:`~werkzeug.test.EnvironBuilder`, that takes defaults from the
application.
:param app: The Flask application to configure the environment from.
:param path: URL path being requested.
:param base_url: Base URL where the app is being served, which
``path`` is relative to. If not given, built from
:data:`PREFERRED_URL_SCHEME`, ``subdomain``,
:data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.
:param subdomain: Subdomain name to append to :data:`SERVER_NAME`.
:param url_scheme: Scheme to use instead of
:data:`PREFERRED_URL_SCHEME`.
:param json: If given, this is serialized as JSON and passed as
``data``. Also defaults ``content_type`` to
``application/json``.
:param args: other positional arguments passed to
:class:`~werkzeug.test.EnvironBuilder`.
:param kwargs: other keyword arguments passed to
:class:`~werkzeug.test.EnvironBuilder`.
"""
def __init__(
self,
app: Flask,
path: str = "/",
base_url: str | None = None,
subdomain: str | None = None,
url_scheme: str | None = None,
*args: t.Any,
**kwargs: t.Any,
) -> None:
assert not (base_url or subdomain or url_scheme) or (
base_url is not None
) != bool(subdomain or url_scheme), (
'Cannot pass "subdomain" or "url_scheme" with "base_url".'
)
if base_url is None:
http_host = app.config.get("SERVER_NAME") or "localhost"
app_root = app.config["APPLICATION_ROOT"]
if subdomain:
http_host = f"{subdomain}.{http_host}"
if url_scheme is None:
url_scheme = app.config["PREFERRED_URL_SCHEME"]
url = urlsplit(path)
base_url = (
f"{url.scheme or url_scheme}://{url.netloc or http_host}"
f"/{app_root.lstrip('/')}"
)
path = url.path
if url.query:
path = f"{path}?{url.query}"
self.app = app
super().__init__(path, base_url, *args, **kwargs)
def json_dumps(self, obj: t.Any, **kwargs: t.Any) -> str:
"""Serialize ``obj`` to a JSON-formatted string.
The serialization will be configured according to the config associated
with this EnvironBuilder's ``app``.
"""
return self.app.json.dumps(obj, **kwargs)
_werkzeug_version = ""
def _get_werkzeug_version() -> str:
global _werkzeug_version
if not _werkzeug_version:
_werkzeug_version = importlib.metadata.version("werkzeug")
return _werkzeug_version
|
EnvironBuilder
|
python
|
numba__numba
|
setup.py
|
{
"start": 1938,
"end": 16996
}
|
class ____(build_ext):
user_options = build_ext.user_options + numba_be_user_options
boolean_options = build_ext.boolean_options + ['werror', 'wall', 'noopt']
def initialize_options(self):
super().initialize_options()
self.werror = 0
self.wall = 0
self.noopt = 0
def run(self):
extra_compile_args = []
if self.noopt:
if sys.platform == 'win32':
extra_compile_args.append('/Od')
else:
extra_compile_args.append('-O0')
if self.werror:
extra_compile_args.append('-Werror')
if self.wall:
extra_compile_args.append('-Wall')
for ext in self.extensions:
ext.extra_compile_args.extend(extra_compile_args)
super().run()
cmdclass['build_ext'] = NumbaBuildExt
def is_building():
"""
Parse the setup.py command and return whether a build is requested.
If False is returned, only an informational command is run.
If True is returned, information about C extensions will have to
be passed to the setup() function.
"""
if len(sys.argv) < 2:
# User forgot to give an argument probably, let setuptools handle that.
return True
build_commands = ['build', 'build_py', 'build_ext', 'build_clib',
'build_scripts', 'install', 'install_lib',
'install_headers', 'install_scripts', 'install_data',
'sdist', 'bdist', 'bdist_dumb', 'bdist_rpm',
'bdist_wininst', 'check', 'build_doc', 'bdist_wheel',
'bdist_egg', 'develop', 'easy_install', 'test',
'editable_wheel', ]
return any(bc in sys.argv[1:] for bc in build_commands)
def get_ext_modules():
"""
Return a list of Extension instances for the setup() call.
"""
# Note we don't import NumPy at the toplevel, since setup.py
# should be able to run without NumPy for pip to discover the
# build dependencies. Need NumPy headers and libm linkage.
import numpy as np
np_compile_args = {'include_dirs': [np.get_include(),],}
if sys.platform != 'win32':
np_compile_args['libraries'] = ['m',]
ext_devicearray = Extension(name='numba._devicearray',
sources=['numba/_devicearray.cpp'],
depends=['numba/_pymodule.h',
'numba/_devicearray.h'],
include_dirs=['numba'],
extra_compile_args=['-std=c++11'],
)
ext_dynfunc = Extension(name='numba._dynfunc',
sources=['numba/_dynfuncmod.c'],
depends=['numba/_pymodule.h',
'numba/_dynfunc.c'])
dispatcher_sources = [
'numba/_dispatcher.cpp',
'numba/_typeof.cpp',
'numba/_hashtable.cpp',
'numba/core/typeconv/typeconv.cpp',
]
ext_dispatcher = Extension(name="numba._dispatcher",
sources=dispatcher_sources,
depends=["numba/_pymodule.h",
"numba/_typeof.h",
"numba/_hashtable.h"],
extra_compile_args=['-std=c++11'],
**np_compile_args)
ext_helperlib = Extension(name="numba._helperlib",
sources=["numba/_helpermod.c",
"numba/cext/utils.c",
"numba/cext/dictobject.c",
"numba/cext/listobject.c",
],
# numba/_random.c needs pthreads
extra_link_args=install_name_tool_fixer +
extra_link_args,
depends=["numba/_pymodule.h",
"numba/_helperlib.c",
"numba/_lapack.c",
"numba/_random.c",
"numba/mathnames.inc",
],
**np_compile_args)
ext_typeconv = Extension(name="numba.core.typeconv._typeconv",
sources=["numba/core/typeconv/typeconv.cpp",
"numba/core/typeconv/_typeconv.cpp"],
depends=["numba/_pymodule.h"],
extra_compile_args=['-std=c++11'],
)
ext_np_ufunc = Extension(name="numba.np.ufunc._internal",
sources=["numba/np/ufunc/_internal.c"],
depends=["numba/np/ufunc/_ufunc.c",
"numba/np/ufunc/_internal.h",
"numba/_pymodule.h"],
**np_compile_args)
ext_npyufunc_num_threads = Extension(name="numba.np.ufunc._num_threads",
sources=[
"numba/np/ufunc/_num_threads.c"],
depends=["numba/_pymodule.h"],
)
ext_np_ufunc_backends = []
def check_file_at_path(path2file):
"""
Takes a list as a path, a single glob (*) is permitted as an entry which
indicates that expansion at this location is required (i.e. version
might not be known).
"""
found = None
path2check = [os.path.split(os.path.split(sys.executable)[0])[0]]
path2check += [os.path.dirname(sys.executable)] # for GHA win toolcache: ...\Python\<ver>\x64
path2check += [os.getenv(n, '') for n in ['CONDA_PREFIX', 'PREFIX']]
if sys.platform.startswith('win'):
path2check += [os.path.join(p, 'Library') for p in path2check]
for p in path2check:
if p:
if '*' in path2file:
globloc = path2file.index('*')
searchroot = os.path.join(*path2file[:globloc])
try:
potential_locs = os.listdir(os.path.join(p, searchroot))
except BaseException:
continue
searchfor = path2file[globloc + 1:]
for x in potential_locs:
potpath = os.path.join(p, searchroot, x, *searchfor)
if os.path.isfile(potpath):
found = p # the latest is used
elif os.path.isfile(os.path.join(p, *path2file)):
found = p # the latest is used
return found
# Set various flags for use in TBB and openmp. On OSX, also find OpenMP!
have_openmp = True
if sys.platform.startswith('win'):
if 'MSC' in sys.version:
cpp11flags = []
ompcompileflags = ['-openmp']
omplinkflags = []
else:
# For non-MSVC toolchain e.g. gcc and clang with mingw
cpp11flags = ['-std=c++11']
ompcompileflags = ['-fopenmp']
omplinkflags = ['-fopenmp']
elif sys.platform.startswith('darwin'):
cpp11flags = ['-std=c++11']
# This is a bit unusual but necessary...
# llvm (clang) OpenMP is used for headers etc at compile time
# Intel OpenMP (libiomp5) provides the link library.
# They are binary compatible and may not safely coexist in a process, as
# libiomp5 is more prevalent and often linked in for NumPy it is used
# here!
# Apple clang requires -Xclang -fopenmp, conda clang uses -fopenmp
try:
is_apple_clang = b'Apple' in subprocess.check_output(['clang', '--version'])
except:
is_apple_clang = False
if is_apple_clang:
ompcompileflags = ['-Xclang', '-fopenmp']
omplinkflags = ['-Xclang', '-fopenmp', '-liomp5']
else:
ompcompileflags = ['-fopenmp']
omplinkflags = ['-fopenmp=libiomp5']
omppath = ['lib', 'clang', '*', 'include', 'omp.h']
have_openmp = check_file_at_path(omppath)
else:
cpp11flags = ['-std=c++11']
ompcompileflags = ['-fopenmp']
if platform.machine() == 'ppc64le':
omplinkflags = ['-fopenmp']
else:
omplinkflags = ['-fopenmp']
# Disable tbb if forced by user with NUMBA_DISABLE_TBB=1
if os.getenv("NUMBA_DISABLE_TBB"):
print("TBB disabled")
else:
# Search for Intel TBB, first check env var TBBROOT then conda locations
tbb_root = os.getenv('TBBROOT')
if not tbb_root:
tbb_root = check_file_at_path(['include', 'tbb', 'tbb.h'])
if tbb_root:
print("Using Intel TBB from:", tbb_root)
ext_np_ufunc_tbb_backend = Extension(
name='numba.np.ufunc.tbbpool',
sources=[
'numba/np/ufunc/tbbpool.cpp',
'numba/np/ufunc/gufunc_scheduler.cpp',
],
depends=['numba/np/ufunc/workqueue.h'],
include_dirs=[os.path.join(tbb_root, 'include')],
extra_compile_args=cpp11flags,
extra_link_args=extra_link_args,
libraries=['tbb'], # TODO: if --debug or -g, use 'tbb_debug'
library_dirs=[
# for Linux
os.path.join(tbb_root, 'lib', 'intel64', 'gcc4.4'),
# for MacOS
os.path.join(tbb_root, 'lib'),
# for Windows
os.path.join(tbb_root, 'lib', 'intel64', 'vc_mt'),
],
)
ext_np_ufunc_backends.append(ext_np_ufunc_tbb_backend)
else:
print("TBB not found")
# Disable OpenMP if forced by user with NUMBA_DISABLE_OPENMP=1
if os.getenv('NUMBA_DISABLE_OPENMP'):
print("OpenMP disabled")
elif have_openmp:
print("Using OpenMP from:", have_openmp)
# OpenMP backed work queue
ext_np_ufunc_omppool_backend = Extension(
name='numba.np.ufunc.omppool',
sources=[
'numba/np/ufunc/omppool.cpp',
'numba/np/ufunc/gufunc_scheduler.cpp',
],
depends=['numba/np/ufunc/workqueue.h'],
extra_compile_args=ompcompileflags + cpp11flags,
extra_link_args=omplinkflags,
)
ext_np_ufunc_backends.append(ext_np_ufunc_omppool_backend)
else:
print("OpenMP not found")
# Build the Numba workqueue implementation irrespective of whether the TBB
# version is built. Users can select a backend via env vars.
ext_np_ufunc_workqueue_backend = Extension(
name='numba.np.ufunc.workqueue',
sources=['numba/np/ufunc/workqueue.c',
'numba/np/ufunc/gufunc_scheduler.cpp'],
depends=['numba/np/ufunc/workqueue.h'],
extra_link_args=extra_link_args)
ext_np_ufunc_backends.append(ext_np_ufunc_workqueue_backend)
ext_mviewbuf = Extension(name='numba.mviewbuf',
extra_link_args=install_name_tool_fixer,
sources=['numba/mviewbuf.c'])
ext_nrt_python = Extension(name='numba.core.runtime._nrt_python',
sources=['numba/core/runtime/_nrt_pythonmod.c',
'numba/core/runtime/nrt.cpp'],
depends=['numba/core/runtime/nrt.h',
'numba/_pymodule.h',
'numba/core/runtime/_nrt_python.c'],
**np_compile_args)
ext_jitclass_box = Extension(name='numba.experimental.jitclass._box',
sources=['numba/experimental/jitclass/_box.c'],
depends=['numba/experimental/_pymodule.h'],
)
ext_cuda_extras = Extension(name='numba.cuda.cudadrv._extras',
sources=['numba/cuda/cudadrv/_extras.c'],
depends=['numba/_pymodule.h'],
include_dirs=["numba"])
ext_modules = [ext_dynfunc, ext_dispatcher, ext_helperlib,
ext_typeconv, ext_np_ufunc, ext_npyufunc_num_threads,
ext_mviewbuf, ext_nrt_python, ext_jitclass_box,
ext_cuda_extras, ext_devicearray]
ext_modules += ext_np_ufunc_backends
return ext_modules
packages = find_packages(include=["numba", "numba.*"])
build_requires = ['numpy >={}'.format(min_numpy_build_version)]
install_requires = [
'llvmlite >={},<{}'.format(min_llvmlite_version, max_llvmlite_version),
'numpy >={}'.format(min_numpy_run_version),
]
metadata = dict(
name='numba',
description="compiling Python code using LLVM",
version=versioneer.get_version(),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
"Programming Language :: Python :: 3.14",
"Topic :: Software Development :: Compilers",
],
package_data={
# HTML templates for type annotations
"numba.core.annotations": ["*.html"],
# Various test data
"numba.cuda.tests.data": ["*.ptx", "*.cu"],
"numba.cuda.tests.doc_examples.ffi": ["*.cu"],
"numba.tests": ["pycc_distutils_usecase/*.py"],
# Some C files are needed by pycc
"numba": ["*.c", "*.h"],
"numba.pycc": ["*.c", "*.h"],
"numba.core.runtime": ["*.cpp", "*.c", "*.h"],
"numba.cext": ["*.c", "*.h"],
# numba gdb hook init command language file
"numba.misc": ["cmdlang.gdb"],
"numba.typed": ["py.typed"],
"numba.cuda" : ["cpp_function_wrappers.cu", "cuda_fp16.h",
"cuda_fp16.hpp"]
},
scripts=["bin/numba"],
url="https://numba.pydata.org",
packages=packages,
setup_requires=build_requires,
install_requires=install_requires,
python_requires=">={}".format(min_python_version),
license="BSD",
cmdclass=cmdclass,
)
with open('README.rst') as f:
metadata['long_description'] = f.read()
if is_building():
metadata['ext_modules'] = get_ext_modules()
setup(**metadata)
|
NumbaBuildExt
|
python
|
qdrant__qdrant-client
|
qdrant_client/http/models/models.py
|
{
"start": 68724,
"end": 68913
}
|
class ____(BaseModel, extra="forbid"):
"""
Full-text phrase match of the string.
"""
phrase: str = Field(..., description="Full-text phrase match of the string.")
|
MatchPhrase
|
python
|
django__django
|
tests/migrations/test_migrations_no_changes/0003_third.py
|
{
"start": 43,
"end": 1256
}
|
class ____(migrations.Migration):
dependencies = [
("migrations", "0002_second"),
]
operations = [
migrations.CreateModel(
name="ModelWithCustomBase",
fields=[
(
"id",
models.BigAutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
],
options={},
bases=(models.Model,),
),
migrations.CreateModel(
name="UnmigratedModel",
fields=[
(
"id",
models.BigAutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
],
options={},
bases=(models.Model,),
),
migrations.DeleteModel(
name="Author",
),
migrations.DeleteModel(
name="Book",
),
]
|
Migration
|
python
|
crytic__slither
|
slither/detectors/abstract_detector.py
|
{
"start": 652,
"end": 1921
}
|
class ____(ComparableEnum):
HIGH = 0
MEDIUM = 1
LOW = 2
INFORMATIONAL = 3
OPTIMIZATION = 4
UNIMPLEMENTED = 999
classification_colors: Dict[DetectorClassification, Callable[[str], str]] = {
DetectorClassification.INFORMATIONAL: green,
DetectorClassification.OPTIMIZATION: green,
DetectorClassification.LOW: green,
DetectorClassification.MEDIUM: yellow,
DetectorClassification.HIGH: red,
}
classification_txt = {
DetectorClassification.INFORMATIONAL: "Informational",
DetectorClassification.OPTIMIZATION: "Optimization",
DetectorClassification.LOW: "Low",
DetectorClassification.MEDIUM: "Medium",
DetectorClassification.HIGH: "High",
}
def make_solc_versions(minor: int, patch_min: int, patch_max: int) -> List[str]:
"""
Create a list of solc version: [0.minor.patch_min .... 0.minor.patch_max]
"""
return [f"0.{minor}.{x}" for x in range(patch_min, patch_max + 1)]
ALL_SOLC_VERSIONS_04 = make_solc_versions(4, 0, 26)
ALL_SOLC_VERSIONS_05 = make_solc_versions(5, 0, 17)
ALL_SOLC_VERSIONS_06 = make_solc_versions(6, 0, 12)
ALL_SOLC_VERSIONS_07 = make_solc_versions(7, 0, 6)
# No VERSIONS_08 as it is still in dev
DETECTOR_INFO = List[Union[str, SupportedOutput]]
|
DetectorClassification
|
python
|
ray-project__ray
|
python/ray/util/client/server/server.py
|
{
"start": 3431,
"end": 38964
}
|
class ____(ray_client_pb2_grpc.RayletDriverServicer):
def __init__(self, ray_connect_handler: Callable):
"""Construct a raylet service
Args:
ray_connect_handler: Function to connect to ray cluster
"""
# Stores client_id -> (ref_id -> ObjectRef)
self.object_refs: Dict[str, Dict[bytes, ray.ObjectRef]] = defaultdict(dict)
# Stores client_id -> (client_ref_id -> ref_id (in self.object_refs))
self.client_side_ref_map: Dict[str, Dict[bytes, bytes]] = defaultdict(dict)
self.function_refs = {}
self.actor_refs: Dict[bytes, ray.ActorHandle] = {}
self.actor_owners: Dict[str, Set[bytes]] = defaultdict(set)
self.registered_actor_classes = {}
self.named_actors = set()
self.state_lock = threading.Lock()
self.ray_connect_handler = ray_connect_handler
self.response_caches: Dict[str, ResponseCache] = defaultdict(ResponseCache)
def Init(
self, request: ray_client_pb2.InitRequest, context=None
) -> ray_client_pb2.InitResponse:
if request.job_config:
job_config = pickle.loads(request.job_config)
job_config._client_job = True
else:
job_config = None
current_job_config = None
with disable_client_hook():
if ray.is_initialized():
worker = ray._private.worker.global_worker
current_job_config = worker.core_worker.get_job_config()
else:
extra_kwargs = json.loads(request.ray_init_kwargs or "{}")
try:
self.ray_connect_handler(job_config, **extra_kwargs)
except Exception as e:
logger.exception("Running Ray Init failed:")
return ray_client_pb2.InitResponse(
ok=False,
msg=f"Call to `ray.init()` on the server failed with: {e}",
)
if job_config is None:
return ray_client_pb2.InitResponse(ok=True)
# NOTE(edoakes): this code should not be necessary anymore because we
# only allow a single client/job per server. There is an existing test
# that tests the behavior of multiple clients with the same job config
# connecting to one server (test_client_init.py::test_num_clients),
# so I'm leaving it here for now.
job_config = job_config._get_proto_job_config()
# If the server has been initialized, we need to compare whether the
# runtime env is compatible.
if current_job_config:
job_uris = set(job_config.runtime_env_info.uris.working_dir_uri)
job_uris.update(job_config.runtime_env_info.uris.py_modules_uris)
current_job_uris = set(
current_job_config.runtime_env_info.uris.working_dir_uri
)
current_job_uris.update(
current_job_config.runtime_env_info.uris.py_modules_uris
)
if job_uris != current_job_uris and len(job_uris) > 0:
return ray_client_pb2.InitResponse(
ok=False,
msg="Runtime environment doesn't match "
f"request one {job_config.runtime_env_info.uris} "
f"current one {current_job_config.runtime_env_info.uris}",
)
return ray_client_pb2.InitResponse(ok=True)
@_use_response_cache
def KVPut(self, request, context=None) -> ray_client_pb2.KVPutResponse:
try:
with disable_client_hook():
already_exists = ray.experimental.internal_kv._internal_kv_put(
request.key,
request.value,
overwrite=request.overwrite,
namespace=request.namespace,
)
except Exception as e:
return_exception_in_context(e, context)
already_exists = False
return ray_client_pb2.KVPutResponse(already_exists=already_exists)
def KVGet(self, request, context=None) -> ray_client_pb2.KVGetResponse:
try:
with disable_client_hook():
value = ray.experimental.internal_kv._internal_kv_get(
request.key, namespace=request.namespace
)
except Exception as e:
return_exception_in_context(e, context)
value = b""
return ray_client_pb2.KVGetResponse(value=value)
@_use_response_cache
def KVDel(self, request, context=None) -> ray_client_pb2.KVDelResponse:
try:
with disable_client_hook():
deleted_num = ray.experimental.internal_kv._internal_kv_del(
request.key,
del_by_prefix=request.del_by_prefix,
namespace=request.namespace,
)
except Exception as e:
return_exception_in_context(e, context)
deleted_num = 0
return ray_client_pb2.KVDelResponse(deleted_num=deleted_num)
def KVList(self, request, context=None) -> ray_client_pb2.KVListResponse:
try:
with disable_client_hook():
keys = ray.experimental.internal_kv._internal_kv_list(
request.prefix, namespace=request.namespace
)
except Exception as e:
return_exception_in_context(e, context)
keys = []
return ray_client_pb2.KVListResponse(keys=keys)
def KVExists(self, request, context=None) -> ray_client_pb2.KVExistsResponse:
try:
with disable_client_hook():
exists = ray.experimental.internal_kv._internal_kv_exists(
request.key, namespace=request.namespace
)
except Exception as e:
return_exception_in_context(e, context)
exists = False
return ray_client_pb2.KVExistsResponse(exists=exists)
def ListNamedActors(
self, request, context=None
) -> ray_client_pb2.ClientListNamedActorsResponse:
with disable_client_hook():
actors = ray.util.list_named_actors(all_namespaces=request.all_namespaces)
return ray_client_pb2.ClientListNamedActorsResponse(
actors_json=json.dumps(actors)
)
def ClusterInfo(self, request, context=None) -> ray_client_pb2.ClusterInfoResponse:
resp = ray_client_pb2.ClusterInfoResponse()
resp.type = request.type
if request.type == ray_client_pb2.ClusterInfoType.CLUSTER_RESOURCES:
with disable_client_hook():
resources = ray.cluster_resources()
# Normalize resources into floats
# (the function may return values that are ints)
float_resources = {k: float(v) for k, v in resources.items()}
resp.resource_table.CopyFrom(
ray_client_pb2.ClusterInfoResponse.ResourceTable(table=float_resources)
)
elif request.type == ray_client_pb2.ClusterInfoType.AVAILABLE_RESOURCES:
with disable_client_hook():
resources = ray.available_resources()
# Normalize resources into floats
# (the function may return values that are ints)
float_resources = {k: float(v) for k, v in resources.items()}
resp.resource_table.CopyFrom(
ray_client_pb2.ClusterInfoResponse.ResourceTable(table=float_resources)
)
elif request.type == ray_client_pb2.ClusterInfoType.RUNTIME_CONTEXT:
ctx = ray_client_pb2.ClusterInfoResponse.RuntimeContext()
with disable_client_hook():
rtc = ray.get_runtime_context()
ctx.job_id = ray._common.utils.hex_to_binary(rtc.get_job_id())
ctx.node_id = ray._common.utils.hex_to_binary(rtc.get_node_id())
ctx.namespace = rtc.namespace
ctx.capture_client_tasks = (
rtc.should_capture_child_tasks_in_placement_group
)
ctx.gcs_address = rtc.gcs_address
ctx.runtime_env = rtc.get_runtime_env_string()
resp.runtime_context.CopyFrom(ctx)
else:
with disable_client_hook():
resp.json = self._return_debug_cluster_info(request, context)
return resp
def _return_debug_cluster_info(self, request, context=None) -> str:
"""Handle ClusterInfo requests that only return a json blob."""
data = None
if request.type == ray_client_pb2.ClusterInfoType.NODES:
data = ray.nodes()
elif request.type == ray_client_pb2.ClusterInfoType.IS_INITIALIZED:
data = ray.is_initialized()
elif request.type == ray_client_pb2.ClusterInfoType.TIMELINE:
data = ray.timeline()
elif request.type == ray_client_pb2.ClusterInfoType.PING:
data = {}
elif request.type == ray_client_pb2.ClusterInfoType.DASHBOARD_URL:
data = {"dashboard_url": ray._private.worker.get_dashboard_url()}
else:
raise TypeError("Unsupported cluster info type")
return json.dumps(data)
def release(self, client_id: str, id: bytes) -> bool:
with self.state_lock:
if client_id in self.object_refs:
if id in self.object_refs[client_id]:
logger.debug(f"Releasing object {id.hex()} for {client_id}")
del self.object_refs[client_id][id]
return True
if client_id in self.actor_owners:
if id in self.actor_owners[client_id]:
logger.debug(f"Releasing actor {id.hex()} for {client_id}")
self.actor_owners[client_id].remove(id)
if self._can_remove_actor_ref(id):
logger.debug(f"Deleting reference to actor {id.hex()}")
del self.actor_refs[id]
return True
return False
def release_all(self, client_id):
with self.state_lock:
self._release_objects(client_id)
self._release_actors(client_id)
# NOTE: Try to actually dereference the object and actor refs.
# Otherwise dereferencing will happen later, which may run concurrently
# with ray.shutdown() and will crash the process. The crash is a bug
# that should be fixed eventually.
gc.collect()
def _can_remove_actor_ref(self, actor_id_bytes):
no_owner = not any(
actor_id_bytes in actor_list for actor_list in self.actor_owners.values()
)
return no_owner and actor_id_bytes not in self.named_actors
def _release_objects(self, client_id):
if client_id not in self.object_refs:
logger.debug(f"Releasing client with no references: {client_id}")
return
count = len(self.object_refs[client_id])
del self.object_refs[client_id]
if client_id in self.client_side_ref_map:
del self.client_side_ref_map[client_id]
if client_id in self.response_caches:
del self.response_caches[client_id]
logger.debug(f"Released all {count} objects for client {client_id}")
def _release_actors(self, client_id):
if client_id not in self.actor_owners:
logger.debug(f"Releasing client with no actors: {client_id}")
return
count = 0
actors_to_remove = self.actor_owners.pop(client_id)
for id_bytes in actors_to_remove:
count += 1
if self._can_remove_actor_ref(id_bytes):
logger.debug(f"Deleting reference to actor {id_bytes.hex()}")
del self.actor_refs[id_bytes]
logger.debug(f"Released all {count} actors for client: {client_id}")
@_use_response_cache
def Terminate(self, req, context=None):
if req.WhichOneof("terminate_type") == "task_object":
try:
object_ref = self.object_refs[req.client_id][req.task_object.id]
with disable_client_hook():
ray.cancel(
object_ref,
force=req.task_object.force,
recursive=req.task_object.recursive,
)
except Exception as e:
return_exception_in_context(e, context)
elif req.WhichOneof("terminate_type") == "actor":
try:
actor_ref = self.actor_refs[req.actor.id]
with disable_client_hook():
ray.kill(actor_ref, no_restart=req.actor.no_restart)
except Exception as e:
return_exception_in_context(e, context)
else:
raise RuntimeError(
"Client requested termination without providing a valid terminate_type"
)
return ray_client_pb2.TerminateResponse(ok=True)
def _async_get_object(
self,
request: ray_client_pb2.GetRequest,
client_id: str,
req_id: int,
result_queue: queue.Queue,
context=None,
) -> Optional[ray_client_pb2.GetResponse]:
"""Attempts to schedule a callback to push the GetResponse to the
main loop when the desired object is ready. If there is some failure
in scheduling, a GetResponse will be immediately returned.
"""
if len(request.ids) != 1:
raise ValueError(
f"Async get() must have exactly 1 Object ID. Actual: {request}"
)
rid = request.ids[0]
ref = self.object_refs[client_id].get(rid, None)
if not ref:
return ray_client_pb2.GetResponse(
valid=False,
error=cloudpickle.dumps(
ValueError(
f"ClientObjectRef with id {rid} not found for "
f"client {client_id}"
)
),
)
try:
logger.debug("async get: %s" % ref)
with disable_client_hook():
def send_get_response(result: Any) -> None:
"""Pushes GetResponses to the main DataPath loop to send
to the client. This is called when the object is ready
on the server side."""
try:
serialized = dumps_from_server(result, client_id, self)
total_size = len(serialized)
assert total_size > 0, "Serialized object cannot be zero bytes"
total_chunks = math.ceil(
total_size / OBJECT_TRANSFER_CHUNK_SIZE
)
for chunk_id in range(request.start_chunk_id, total_chunks):
start = chunk_id * OBJECT_TRANSFER_CHUNK_SIZE
end = min(
total_size, (chunk_id + 1) * OBJECT_TRANSFER_CHUNK_SIZE
)
get_resp = ray_client_pb2.GetResponse(
valid=True,
data=serialized[start:end],
chunk_id=chunk_id,
total_chunks=total_chunks,
total_size=total_size,
)
chunk_resp = ray_client_pb2.DataResponse(
get=get_resp, req_id=req_id
)
result_queue.put(chunk_resp)
except Exception as exc:
get_resp = ray_client_pb2.GetResponse(
valid=False, error=cloudpickle.dumps(exc)
)
resp = ray_client_pb2.DataResponse(get=get_resp, req_id=req_id)
result_queue.put(resp)
ref._on_completed(send_get_response)
return None
except Exception as e:
return ray_client_pb2.GetResponse(valid=False, error=cloudpickle.dumps(e))
def GetObject(self, request: ray_client_pb2.GetRequest, context):
metadata = dict(context.invocation_metadata())
client_id = metadata.get("client_id")
if client_id is None:
yield ray_client_pb2.GetResponse(
valid=False,
error=cloudpickle.dumps(
ValueError("client_id is not specified in request metadata")
),
)
else:
yield from self._get_object(request, client_id)
def _get_object(self, request: ray_client_pb2.GetRequest, client_id: str):
objectrefs = []
for rid in request.ids:
ref = self.object_refs[client_id].get(rid, None)
if ref:
objectrefs.append(ref)
else:
yield ray_client_pb2.GetResponse(
valid=False,
error=cloudpickle.dumps(
ValueError(
f"ClientObjectRef {rid} is not found for client {client_id}"
)
),
)
return
try:
logger.debug("get: %s" % objectrefs)
with disable_client_hook():
items = ray.get(objectrefs, timeout=request.timeout)
except Exception as e:
yield ray_client_pb2.GetResponse(valid=False, error=cloudpickle.dumps(e))
return
serialized = dumps_from_server(items, client_id, self)
total_size = len(serialized)
assert total_size > 0, "Serialized object cannot be zero bytes"
total_chunks = math.ceil(total_size / OBJECT_TRANSFER_CHUNK_SIZE)
for chunk_id in range(request.start_chunk_id, total_chunks):
start = chunk_id * OBJECT_TRANSFER_CHUNK_SIZE
end = min(total_size, (chunk_id + 1) * OBJECT_TRANSFER_CHUNK_SIZE)
yield ray_client_pb2.GetResponse(
valid=True,
data=serialized[start:end],
chunk_id=chunk_id,
total_chunks=total_chunks,
total_size=total_size,
)
def PutObject(
self, request: ray_client_pb2.PutRequest, context=None
) -> ray_client_pb2.PutResponse:
"""gRPC entrypoint for unary PutObject"""
return self._put_object(
request.data, request.client_ref_id, "", request.owner_id, context
)
def _put_object(
self,
data: Union[bytes, bytearray],
client_ref_id: bytes,
client_id: str,
owner_id: bytes,
context=None,
):
"""Put an object in the cluster with ray.put() via gRPC.
Args:
data: Pickled data. Can either be bytearray if this is called
from the dataservicer, or bytes if called from PutObject.
client_ref_id: The id associated with this object on the client.
client_id: The client who owns this data, for tracking when to
delete this reference.
owner_id: The owner id of the object.
context: gRPC context.
"""
try:
obj = loads_from_client(data, self)
if owner_id:
owner = self.actor_refs[owner_id]
else:
owner = None
with disable_client_hook():
objectref = ray.put(obj, _owner=owner)
except Exception as e:
logger.exception("Put failed:")
return ray_client_pb2.PutResponse(
id=b"", valid=False, error=cloudpickle.dumps(e)
)
self.object_refs[client_id][objectref.binary()] = objectref
if len(client_ref_id) > 0:
self.client_side_ref_map[client_id][client_ref_id] = objectref.binary()
logger.debug("put: %s" % objectref)
return ray_client_pb2.PutResponse(id=objectref.binary(), valid=True)
def WaitObject(self, request, context=None) -> ray_client_pb2.WaitResponse:
object_refs = []
for rid in request.object_ids:
if rid not in self.object_refs[request.client_id]:
raise Exception(
"Asking for a ref not associated with this client: %s" % str(rid)
)
object_refs.append(self.object_refs[request.client_id][rid])
num_returns = request.num_returns
timeout = request.timeout
try:
with disable_client_hook():
ready_object_refs, remaining_object_refs = ray.wait(
object_refs,
num_returns=num_returns,
timeout=timeout if timeout != -1 else None,
)
except Exception as e:
# TODO(ameer): improve exception messages.
logger.error(f"Exception {e}")
return ray_client_pb2.WaitResponse(valid=False)
logger.debug(
"wait: %s %s" % (str(ready_object_refs), str(remaining_object_refs))
)
ready_object_ids = [
ready_object_ref.binary() for ready_object_ref in ready_object_refs
]
remaining_object_ids = [
remaining_object_ref.binary()
for remaining_object_ref in remaining_object_refs
]
return ray_client_pb2.WaitResponse(
valid=True,
ready_object_ids=ready_object_ids,
remaining_object_ids=remaining_object_ids,
)
def Schedule(
self,
task: ray_client_pb2.ClientTask,
arglist: List[Any],
kwargs: Dict[str, Any],
context=None,
) -> ray_client_pb2.ClientTaskTicket:
logger.debug(
"schedule: %s %s"
% (task.name, ray_client_pb2.ClientTask.RemoteExecType.Name(task.type))
)
try:
with disable_client_hook():
if task.type == ray_client_pb2.ClientTask.FUNCTION:
result = self._schedule_function(task, arglist, kwargs, context)
elif task.type == ray_client_pb2.ClientTask.ACTOR:
result = self._schedule_actor(task, arglist, kwargs, context)
elif task.type == ray_client_pb2.ClientTask.METHOD:
result = self._schedule_method(task, arglist, kwargs, context)
elif task.type == ray_client_pb2.ClientTask.NAMED_ACTOR:
result = self._schedule_named_actor(task, context)
else:
raise NotImplementedError(
"Unimplemented Schedule task type: %s"
% ray_client_pb2.ClientTask.RemoteExecType.Name(task.type)
)
result.valid = True
return result
except Exception as e:
logger.debug("Caught schedule exception", exc_info=True)
return ray_client_pb2.ClientTaskTicket(
valid=False, error=cloudpickle.dumps(e)
)
def _schedule_method(
self,
task: ray_client_pb2.ClientTask,
arglist: List[Any],
kwargs: Dict[str, Any],
context=None,
) -> ray_client_pb2.ClientTaskTicket:
actor_handle = self.actor_refs.get(task.payload_id)
if actor_handle is None:
raise Exception("Can't run an actor the server doesn't have a handle for")
method = getattr(actor_handle, task.name)
opts = decode_options(task.options)
if opts is not None:
method = method.options(**opts)
output = method.remote(*arglist, **kwargs)
ids = self.unify_and_track_outputs(output, task.client_id)
return ray_client_pb2.ClientTaskTicket(return_ids=ids)
def _schedule_actor(
self,
task: ray_client_pb2.ClientTask,
arglist: List[Any],
kwargs: Dict[str, Any],
context=None,
) -> ray_client_pb2.ClientTaskTicket:
remote_class = self.lookup_or_register_actor(
task.payload_id, task.client_id, decode_options(task.baseline_options)
)
opts = decode_options(task.options)
if opts is not None:
remote_class = remote_class.options(**opts)
with current_server(self):
actor = remote_class.remote(*arglist, **kwargs)
self.actor_refs[actor._actor_id.binary()] = actor
self.actor_owners[task.client_id].add(actor._actor_id.binary())
return ray_client_pb2.ClientTaskTicket(return_ids=[actor._actor_id.binary()])
def _schedule_function(
self,
task: ray_client_pb2.ClientTask,
arglist: List[Any],
kwargs: Dict[str, Any],
context=None,
) -> ray_client_pb2.ClientTaskTicket:
remote_func = self.lookup_or_register_func(
task.payload_id, task.client_id, decode_options(task.baseline_options)
)
opts = decode_options(task.options)
if opts is not None:
remote_func = remote_func.options(**opts)
with current_server(self):
output = remote_func.remote(*arglist, **kwargs)
ids = self.unify_and_track_outputs(output, task.client_id)
return ray_client_pb2.ClientTaskTicket(return_ids=ids)
def _schedule_named_actor(
self, task: ray_client_pb2.ClientTask, context=None
) -> ray_client_pb2.ClientTaskTicket:
assert len(task.payload_id) == 0
# Convert empty string back to None.
actor = ray.get_actor(task.name, task.namespace or None)
bin_actor_id = actor._actor_id.binary()
if bin_actor_id not in self.actor_refs:
self.actor_refs[bin_actor_id] = actor
self.actor_owners[task.client_id].add(bin_actor_id)
self.named_actors.add(bin_actor_id)
return ray_client_pb2.ClientTaskTicket(return_ids=[actor._actor_id.binary()])
def lookup_or_register_func(
self, id: bytes, client_id: str, options: Optional[Dict]
) -> ray.remote_function.RemoteFunction:
with disable_client_hook():
if id not in self.function_refs:
funcref = self.object_refs[client_id][id]
func = ray.get(funcref)
if not inspect.isfunction(func):
raise Exception(
"Attempting to register function that isn't a function."
)
if options is None or len(options) == 0:
self.function_refs[id] = ray.remote(func)
else:
self.function_refs[id] = ray.remote(**options)(func)
return self.function_refs[id]
def lookup_or_register_actor(
self, id: bytes, client_id: str, options: Optional[Dict]
):
with disable_client_hook():
if id not in self.registered_actor_classes:
actor_class_ref = self.object_refs[client_id][id]
actor_class = ray.get(actor_class_ref)
if not inspect.isclass(actor_class):
raise Exception("Attempting to schedule actor that isn't a class.")
if options is None or len(options) == 0:
reg_class = ray.remote(actor_class)
else:
reg_class = ray.remote(**options)(actor_class)
self.registered_actor_classes[id] = reg_class
return self.registered_actor_classes[id]
def unify_and_track_outputs(self, output, client_id):
if output is None:
outputs = []
elif isinstance(output, list):
outputs = output
else:
outputs = [output]
for out in outputs:
if out.binary() in self.object_refs[client_id]:
logger.warning(f"Already saw object_ref {out}")
self.object_refs[client_id][out.binary()] = out
return [out.binary() for out in outputs]
def return_exception_in_context(err, context):
if context is not None:
context.set_details(encode_exception(err))
# Note: https://grpc.github.io/grpc/core/md_doc_statuscodes.html
# ABORTED used here since it should never be generated by the
# grpc lib -- this way we know the error was generated by ray logic
context.set_code(grpc.StatusCode.ABORTED)
def encode_exception(exception) -> str:
data = cloudpickle.dumps(exception)
return base64.standard_b64encode(data).decode()
def decode_options(options: ray_client_pb2.TaskOptions) -> Optional[Dict[str, Any]]:
if not options.pickled_options:
return None
opts = pickle.loads(options.pickled_options)
assert isinstance(opts, dict)
return opts
def serve(host: str, port: int, ray_connect_handler=None):
def default_connect_handler(
job_config: JobConfig = None, **ray_init_kwargs: Dict[str, Any]
):
with disable_client_hook():
if not ray.is_initialized():
return ray.init(job_config=job_config, **ray_init_kwargs)
from ray._private.grpc_utils import create_grpc_server_with_interceptors
ray_connect_handler = ray_connect_handler or default_connect_handler
server = create_grpc_server_with_interceptors(
max_workers=CLIENT_SERVER_MAX_THREADS,
thread_name_prefix="ray_client_server",
options=GRPC_OPTIONS,
asynchronous=False,
)
task_servicer = RayletServicer(ray_connect_handler)
data_servicer = DataServicer(task_servicer)
logs_servicer = LogstreamServicer()
ray_client_pb2_grpc.add_RayletDriverServicer_to_server(task_servicer, server)
ray_client_pb2_grpc.add_RayletDataStreamerServicer_to_server(data_servicer, server)
ray_client_pb2_grpc.add_RayletLogStreamerServicer_to_server(logs_servicer, server)
if not is_localhost(host):
add_port_to_grpc_server(server, f"127.0.0.1:{port}")
add_port_to_grpc_server(server, f"{host}:{port}")
current_handle = ClientServerHandle(
task_servicer=task_servicer,
data_servicer=data_servicer,
logs_servicer=logs_servicer,
grpc_server=server,
)
server.start()
return current_handle
def init_and_serve(host: str, port: int, *args, **kwargs):
with disable_client_hook():
# Disable client mode inside the worker's environment
info = ray.init(*args, **kwargs)
def ray_connect_handler(job_config=None, **ray_init_kwargs):
# Ray client will disconnect from ray when
# num_clients == 0.
if ray.is_initialized():
return info
else:
return ray.init(job_config=job_config, *args, **kwargs)
server_handle = serve(host, port, ray_connect_handler=ray_connect_handler)
return (server_handle, info)
def shutdown_with_server(server, _exiting_interpreter=False):
server.stop(1)
with disable_client_hook():
ray.shutdown(_exiting_interpreter)
def create_ray_handler(address, redis_password, redis_username=None):
def ray_connect_handler(job_config: JobConfig = None, **ray_init_kwargs):
if address:
if redis_password:
ray.init(
address=address,
_redis_username=redis_username,
_redis_password=redis_password,
job_config=job_config,
**ray_init_kwargs,
)
else:
ray.init(address=address, job_config=job_config, **ray_init_kwargs)
else:
ray.init(job_config=job_config, **ray_init_kwargs)
return ray_connect_handler
def try_create_gcs_client(address: Optional[str]) -> Optional[GcsClient]:
"""
Try to create a gcs client based on the command line args or by
autodetecting a running Ray cluster.
"""
address = canonicalize_bootstrap_address_or_die(address)
return GcsClient(address=address)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--host", type=str, default="0.0.0.0", help="Host IP to bind to"
)
parser.add_argument("-p", "--port", type=int, default=10001, help="Port to bind to")
parser.add_argument(
"--mode",
type=str,
choices=["proxy", "legacy", "specific-server"],
default="proxy",
)
parser.add_argument(
"--address", required=False, type=str, help="Address to use to connect to Ray"
)
parser.add_argument(
"--redis-username",
required=False,
type=str,
help="username for connecting to Redis",
)
parser.add_argument(
"--redis-password",
required=False,
type=str,
help="Password for connecting to Redis",
)
parser.add_argument(
"--runtime-env-agent-address",
required=False,
type=str,
default=None,
help="The port to use for connecting to the runtime_env_agent.",
)
args, _ = parser.parse_known_args()
setup_logger(ray_constants.LOGGER_LEVEL, ray_constants.LOGGER_FORMAT)
ray_connect_handler = create_ray_handler(
args.address, args.redis_password, args.redis_username
)
hostport = build_address(args.host, args.port)
args_str = str(args)
if args.redis_password:
args_str = args_str.replace(args.redis_password, "****")
logger.info(f"Starting Ray Client server on {hostport}, args {args_str}")
if args.mode == "proxy":
server = serve_proxier(
args.host,
args.port,
args.address,
redis_username=args.redis_username,
redis_password=args.redis_password,
runtime_env_agent_address=args.runtime_env_agent_address,
)
else:
server = serve(args.host, args.port, ray_connect_handler)
try:
idle_checks_remaining = TIMEOUT_FOR_SPECIFIC_SERVER_S
while True:
health_report = {
"time": time.time(),
}
try:
if not ray.experimental.internal_kv._internal_kv_initialized():
gcs_client = try_create_gcs_client(args.address)
ray.experimental.internal_kv._initialize_internal_kv(gcs_client)
ray.experimental.internal_kv._internal_kv_put(
"ray_client_server",
json.dumps(health_report),
namespace=ray_constants.KV_NAMESPACE_HEALTHCHECK,
)
except Exception as e:
logger.error(
f"[{args.mode}] Failed to put health check on {args.address}"
)
logger.exception(e)
time.sleep(1)
if args.mode == "specific-server":
if server.data_servicer.num_clients > 0:
idle_checks_remaining = TIMEOUT_FOR_SPECIFIC_SERVER_S
else:
idle_checks_remaining -= 1
if idle_checks_remaining == 0:
raise KeyboardInterrupt()
if (
idle_checks_remaining % 5 == 0
and idle_checks_remaining != TIMEOUT_FOR_SPECIFIC_SERVER_S
):
logger.info(f"{idle_checks_remaining} idle checks before shutdown.")
except KeyboardInterrupt:
server.stop(0)
if __name__ == "__main__":
main()
|
RayletServicer
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/core/property/test_alias.py
|
{
"start": 1470,
"end": 2304
}
|
class ____:
def test_create_default(self) -> None:
alias = bcpa.DeprecatedAlias("width", since=(3, 1, 0), help="Object's width")
assert alias.aliased_name == "width"
assert alias.since == (3, 1, 0)
assert alias.help == "Object's width"
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
Test___all__ = verify_all(bcpa, ALL)
|
Test_DeprecatedAlias
|
python
|
ray-project__ray
|
doc/source/tune/doc_code/trial_checkpoint.py
|
{
"start": 2142,
"end": 4409
}
|
class ____:
def state_dict(self) -> dict:
return {}
def load_state_dict(self, state_dict):
pass
# __function_api_checkpointing_from_dir_start__
import os
import tempfile
from ray import tune
from ray.tune import Checkpoint
def train_func(config):
start = 1
my_model = MyModel()
checkpoint = tune.get_checkpoint()
if checkpoint:
with checkpoint.as_directory() as checkpoint_dir:
checkpoint_dict = torch.load(os.path.join(checkpoint_dir, "checkpoint.pt"))
start = checkpoint_dict["epoch"] + 1
my_model.load_state_dict(checkpoint_dict["model_state"])
for epoch in range(start, config["epochs"] + 1):
# Model training here
# ...
metrics = {"metric": 1}
with tempfile.TemporaryDirectory() as tempdir:
torch.save(
{"epoch": epoch, "model_state": my_model.state_dict()},
os.path.join(tempdir, "checkpoint.pt"),
)
tune.report(metrics=metrics, checkpoint=Checkpoint.from_directory(tempdir))
tuner = tune.Tuner(train_func, param_space={"epochs": 5})
result_grid = tuner.fit()
# __function_api_checkpointing_from_dir_end__
assert not result_grid.errors
# __function_api_checkpointing_periodic_start__
NUM_EPOCHS = 12
# checkpoint every three epochs.
CHECKPOINT_FREQ = 3
def train_func(config):
for epoch in range(1, config["epochs"] + 1):
# Model training here
# ...
# Report metrics and save a checkpoint
metrics = {"metric": "my_metric"}
if epoch % CHECKPOINT_FREQ == 0:
with tempfile.TemporaryDirectory() as tempdir:
# Save a checkpoint in tempdir.
tune.report(metrics, checkpoint=Checkpoint.from_directory(tempdir))
else:
tune.report(metrics)
tuner = tune.Tuner(train_func, param_space={"epochs": NUM_EPOCHS})
result_grid = tuner.fit()
# __function_api_checkpointing_periodic_end__
assert not result_grid.errors
assert len(result_grid[0].best_checkpoints) == NUM_EPOCHS // CHECKPOINT_FREQ
# __callback_api_checkpointing_start__
from ray import tune
from ray.tune.experiment import Trial
from ray.tune.result import SHOULD_CHECKPOINT, TRAINING_ITERATION
|
MyModel
|
python
|
django__django
|
tests/admin_docs/models.py
|
{
"start": 312,
"end": 773
}
|
class ____(models.Model):
"""
Links with different link text.
This is a line with tag :tag:`extends <built_in-extends>`
This is a line with model :model:`Family <myapp.Family>`
This is a line with view :view:`Index <myapp.views.Index>`
This is a line with template :template:`index template <Index.html>`
This is a line with filter :filter:`example filter <filtername>`
"""
last_name = models.CharField(max_length=200)
|
Family
|
python
|
doocs__leetcode
|
solution/1700-1799/1730.Shortest Path to Get Food/Solution.py
|
{
"start": 0,
"end": 752
}
|
class ____:
def getFood(self, grid: List[List[str]]) -> int:
m, n = len(grid), len(grid[0])
i, j = next((i, j) for i in range(m) for j in range(n) if grid[i][j] == '*')
q = deque([(i, j)])
dirs = (-1, 0, 1, 0, -1)
ans = 0
while q:
ans += 1
for _ in range(len(q)):
i, j = q.popleft()
for a, b in pairwise(dirs):
x, y = i + a, j + b
if 0 <= x < m and 0 <= y < n:
if grid[x][y] == '#':
return ans
if grid[x][y] == 'O':
grid[x][y] = 'X'
q.append((x, y))
return -1
|
Solution
|
python
|
huggingface__transformers
|
src/transformers/models/efficientnet/modeling_efficientnet.py
|
{
"start": 15647,
"end": 17997
}
|
class ____(EfficientNetPreTrainedModel):
def __init__(self, config: EfficientNetConfig):
super().__init__(config)
self.config = config
self.embeddings = EfficientNetEmbeddings(config)
self.encoder = EfficientNetEncoder(config)
# Final pooling layer
if config.pooling_type == "mean":
self.pooler = nn.AvgPool2d(config.hidden_dim, ceil_mode=True)
elif config.pooling_type == "max":
self.pooler = nn.MaxPool2d(config.hidden_dim, ceil_mode=True)
else:
raise ValueError(f"config.pooling must be one of ['mean', 'max'] got {config.pooling}")
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
embedding_output = self.embeddings(pixel_values)
encoder_outputs = self.encoder(
embedding_output,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# Apply pooling
last_hidden_state = encoder_outputs[0]
pooled_output = self.pooler(last_hidden_state)
# Reshape (batch_size, 1280, 1 , 1) -> (batch_size, 1280)
pooled_output = pooled_output.reshape(pooled_output.shape[:2])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
)
@auto_docstring(
custom_intro="""
EfficientNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g.
for ImageNet.
"""
)
|
EfficientNetModel
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_13/models.py
|
{
"start": 59548,
"end": 61400
}
|
class ____(Response):
"""
Response of models.edit endpoint.
:param updated: Number of models updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "models"
_action = "edit"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of models updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, fields: Optional[dict] = None, **kwargs: Any) -> None:
super(EditResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self) -> Optional[dict]:
return self._property_fields
@fields.setter
def fields(self, value: Optional[dict]) -> None:
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
|
EditResponse
|
python
|
joke2k__faker
|
tests/providers/test_date_time.py
|
{
"start": 31280,
"end": 31393
}
|
class ____(TestFilPh):
def setup_faker(self):
self.fake = Faker("tl_PH")
Faker.seed(0)
|
TestTlPh
|
python
|
pydantic__pydantic
|
pydantic-core/tests/validators/test_uuid.py
|
{
"start": 183,
"end": 12986
}
|
class ____(str): ...
@pytest.mark.parametrize(
'input_value,expected',
[
# Valid UUIDs
('12345678-1234-1234-1234-567812345678', UUID('12345678-1234-1234-1234-567812345678')),
('550e8400-e29b-41d4-a716-446655440000', UUID('550e8400-e29b-41d4-a716-446655440000')),
('f47ac10b-58cc-4372-a567-0e02b2c3d479', UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479')),
('123e4567-e89b-12d3-a456-426655440000', UUID('123e4567-e89b-12d3-a456-426655440000')),
('de305d54-75b4-431b-adb2-eb6b9e546014', UUID('de305d54-75b4-431b-adb2-eb6b9e546014')),
('00000000-0000-0000-0000-000000000000', UUID('00000000-0000-0000-0000-000000000000')),
('1b4e28ba-2fa1-11d2-883f-0016d3cca427', UUID('1b4e28ba-2fa1-11d2-883f-0016d3cca427')),
('6ba7b810-9dad-11d1-80b4-00c04fd430c8', UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8')),
('886313e1-3b8a-5372-9b90-0c9aee199e5d', UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d')),
('c0a8f9a8-aa5e-482b-a067-9cb3a51f5c11', UUID('c0a8f9a8-aa5e-482b-a067-9cb3a51f5c11')),
('1efea93d-7bb8-6ea0-afdc-e76cbc0c8e05', UUID('1efea93d-7bb8-6ea0-afdc-e76cbc0c8e05')),
('0194fdc2-5d6a-733c-97f9-2feeb9d2a609', UUID('0194fdc2-5d6a-733c-97f9-2feeb9d2a609')),
('c0a8f9a8-aa5e-882b-a067-9cb3a51f5c11', UUID('c0a8f9a8-aa5e-882b-a067-9cb3a51f5c11')),
('00000000-8000-4000-8000-000000000000', UUID('00000000-8000-4000-8000-000000000000')),
('00000000-0000-4000-0000-000000000000', UUID('00000000-0000-4000-0000-000000000000')),
(MyStr('00000000-0000-4000-0000-000000000000'), UUID('00000000-0000-4000-0000-000000000000')),
(b'\x12\x34\x56\x78' * 4, UUID('12345678-1234-5678-1234-567812345678')),
(b'\x00\x00\x00\x00' * 4, UUID('00000000-0000-0000-0000-000000000000')),
(b'ebcdab58-6eb8-46fb-a190-d07a33e9eac8', UUID('ebcdab58-6eb8-46fb-a190-d07a33e9eac8')),
(UUID('12345678-1234-5678-1234-567812345678'), UUID('12345678-1234-5678-1234-567812345678')),
(UUID('550e8400-e29b-41d4-a716-446655440000'), UUID('550e8400-e29b-41d4-a716-446655440000')),
# Invalid UUIDs
(
'not-a-valid-uuid',
Err(
'Input should be a valid UUID, invalid character: expected an optional prefix of'
+ ' `urn:uuid:` followed by [0-9a-fA-F-], found `n` at 1'
),
),
(
'12345678-1234-5678-1234-5678123456789',
Err('Input should be a valid UUID, invalid group length in group 4: expected 12, found 13'),
),
(
'12345678-1234-1234-1234-1234567890123',
Err('Input should be a valid UUID, invalid group length in group 4: expected 12, found 13'),
),
(b'\x00\x00\x00\x000' * 4, Err('Input should be a valid UUID, invalid length: expected 16 bytes, found 20')),
('550e8400-e29b-41d4-a716', Err('Input should be a valid UUID, invalid group count: expected 5, found 4')),
(
'f47ac10b-58cc-4372-a567-0e02b2c3d47',
Err('Input should be a valid UUID, invalid group length in group 4: expected 12, found 11'),
),
(
'de305d54-75b4-431b-adb2-eb6b9e54601',
Err('Input should be a valid UUID, invalid group length in group 4: expected 12, found 11'),
),
(
'1b4e28ba-2fa1-11d2-883f-0016d3cca42',
Err('Input should be a valid UUID, invalid group length in group 4: expected 12, found 11'),
),
(
'6ba7b810-9dad-11d1-80b4-00c04fd430c',
Err('Input should be a valid UUID, invalid group length in group 4: expected 12, found 11'),
),
(
'886313e1-3b8a-5372-9b90-0c9aee199e5',
Err('Input should be a valid UUID, invalid group length in group 4: expected 12, found 11'),
),
(
'c0a8f9a8-aa5e-482b-a067-9cb3a51f5c1',
Err('Input should be a valid UUID, invalid group length in group 4: expected 12, found 11'),
),
(0xA1A2A3A4B1B2C1C2D1D2D3D4D5D6D7D8, Err('UUID input should be a string, bytes or UUID object')),
(00000000000000000000000000, Err('UUID input should be a string, bytes or UUID object')),
],
)
def test_uuid(input_value, expected):
v = SchemaValidator(core_schema.uuid_schema())
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
result = v.validate_python(input_value)
print(f'input_value={input_value} result={result}')
else:
output = v.validate_python(input_value)
assert output == expected
assert isinstance(output, UUID)
@pytest.mark.parametrize(
'input_value,expected',
[
(UUID('12345678-1234-5678-1234-567812345678'), UUID('12345678-1234-5678-1234-567812345678')),
('12345678-1234-5678-1234-567812345678', Err('Input should be an instance of UUID [type=is_instance_of,')),
(b'12345678-1234-5678-1234-567812345678', Err('Input should be an instance of UUID [type=is_instance_of,')),
(1654646400, Err('Input should be an instance of UUID [type=is_instance_of')),
],
)
def test_uuid_strict(input_value, expected):
v = SchemaValidator(core_schema.uuid_schema(strict=True))
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_python(input_value)
else:
output = v.validate_python(input_value)
assert output == expected
assert isinstance(output, UUID)
@pytest.mark.parametrize(
'input_value, version, expected',
[
# Valid UUIDs
('a6cc5730-2261-11ee-9c43-2eb5a363657c', 1, UUID('a6cc5730-2261-11ee-9c43-2eb5a363657c')),
(UUID('a6cc5730-2261-11ee-9c43-2eb5a363657c'), 1, UUID('a6cc5730-2261-11ee-9c43-2eb5a363657c')),
('04e4aeb3-8f20-30d0-8852-d295e1265eed', 3, UUID('04e4aeb3-8f20-30d0-8852-d295e1265eed')),
(UUID('04e4aeb3-8f20-30d0-8852-d295e1265eed'), 3, UUID('04e4aeb3-8f20-30d0-8852-d295e1265eed')),
('0e7ac198-9acd-4c0c-b4b4-761974bf71d7', 4, UUID('0e7ac198-9acd-4c0c-b4b4-761974bf71d7')),
(UUID('0e7ac198-9acd-4c0c-b4b4-761974bf71d7'), 4, UUID('0e7ac198-9acd-4c0c-b4b4-761974bf71d7')),
('0e7ac198-9acd-4c0c-b4b4-761974bf71d7', 4, UUID('0e7ac198-9acd-4c0c-b4b4-761974bf71d7')),
(UUID('0e7ac198-9acd-4c0c-b4b4-761974bf71d7'), 4, UUID('0e7ac198-9acd-4c0c-b4b4-761974bf71d7')),
('0194fdc2-5d6a-733c-97f9-2feeb9d2a609', 7, UUID('0194fdc2-5d6a-733c-97f9-2feeb9d2a609')),
(UUID('0194fdc2-5d6a-733c-97f9-2feeb9d2a609'), 7, UUID('0194fdc2-5d6a-733c-97f9-2feeb9d2a609')),
('1efea93d-7bb8-6ea0-afdc-e76cbc0c8e05', 6, UUID('1efea93d-7bb8-6ea0-afdc-e76cbc0c8e05')),
(UUID('1efea93d-7bb8-6ea0-afdc-e76cbc0c8e05'), 6, UUID('1efea93d-7bb8-6ea0-afdc-e76cbc0c8e05')),
('c0a8f9a8-aa5e-882b-a067-9cb3a51f5c11', 8, UUID('c0a8f9a8-aa5e-882b-a067-9cb3a51f5c11')),
(UUID('c0a8f9a8-aa5e-882b-a067-9cb3a51f5c11'), 8, UUID('c0a8f9a8-aa5e-882b-a067-9cb3a51f5c11')),
# Cases from pydantic#7355 and pydantic#7537
# `UUID.version` makes sense for RFC 4122 UUIDs only. For non RFC 4122 UUIDs Python uses `UUID.version=None`
('00000000-8000-4000-8000-000000000000', 4, UUID('00000000-8000-4000-8000-000000000000')),
(UUID('00000000-8000-4000-8000-000000000000'), 4, UUID('00000000-8000-4000-8000-000000000000')),
('00000000-0000-4000-0000-000000000000', None, UUID('00000000-0000-4000-0000-000000000000')),
(UUID('00000000-0000-4000-0000-000000000000'), None, UUID('00000000-0000-4000-0000-000000000000')),
('00000000-7fff-4000-7fff-000000000000', None, UUID('00000000-7fff-4000-7fff-000000000000')),
(UUID('00000000-7fff-4000-7fff-000000000000'), None, UUID('00000000-7fff-4000-7fff-000000000000')),
(UUID('00000000-7fff-4000-7fff-000000000000'), 4, Err('UUID version 4 expected')),
('b34b6755-f49c-3bd2-6f06-131a708c2bf3', None, UUID('b34b6755-f49c-3bd2-6f06-131a708c2bf3')),
(UUID('b34b6755-f49c-3bd2-6f06-131a708c2bf3'), None, UUID('b34b6755-f49c-3bd2-6f06-131a708c2bf3')),
(UUID('b34b6755-f49c-3bd2-6f06-131a708c2bf3'), 4, Err('UUID version 4 expected')),
# Invalid UUIDs
('1efea93d-7bb8-6ea0-afdc-e76cbc0c8e05', 8, Err('UUID version 8 expected')),
(UUID('1efea93d-7bb8-6ea0-afdc-e76cbc0c8e05'), 8, Err('UUID version 8 expected')),
('c0a8f9a8-aa5e-882b-a067-9cb3a51f5c11', 6, Err('UUID version 6 expected')),
(UUID('c0a8f9a8-aa5e-882b-a067-9cb3a51f5c11'), 6, Err('UUID version 6 expected')),
('a6cc5730-2261-11ee-9c43-2eb5a363657c', 7, Err('UUID version 7 expected')),
(UUID('a6cc5730-2261-11ee-9c43-2eb5a363657c'), 7, Err('UUID version 7 expected')),
('a6cc5730-2261-11ee-9c43-2eb5a363657c', 5, Err('UUID version 5 expected')),
(UUID('a6cc5730-2261-11ee-9c43-2eb5a363657c'), 5, Err('UUID version 5 expected')),
('04e4aeb3-8f20-30d0-8852-d295e1265eed', 4, Err('UUID version 4 expected')),
(UUID('04e4aeb3-8f20-30d0-8852-d295e1265eed'), 4, Err('UUID version 4 expected')),
('0e7ac198-9acd-4c0c-b4b4-761974bf71d7', 3, Err('UUID version 3 expected')),
(UUID('0e7ac198-9acd-4c0c-b4b4-761974bf71d7'), 3, Err('UUID version 3 expected')),
('08ed0736-fb95-5cc5-85ed-37e4f3df9b29', 1, Err('UUID version 1 expected')),
(UUID('08ed0736-fb95-5cc5-85ed-37e4f3df9b29'), 1, Err('UUID version 1 expected')),
('00000000-0000-4000-0000-000000000000', 4, Err('UUID version 4 expected')),
(UUID('00000000-0000-4000-0000-000000000000'), 4, Err('UUID version 4 expected')),
],
)
def test_uuid_version(input_value, version, expected):
schema = core_schema.uuid_schema()
if version is not None:
schema = core_schema.uuid_schema(version=version)
v = SchemaValidator(schema)
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_python(input_value)
else:
output = v.validate_python(input_value)
assert output == expected
assert isinstance(output, UUID)
@pytest.mark.parametrize(
'input_value,expected',
[
('a6cc5730-2261-11ee-9c43-2eb5a363657c', UUID('a6cc5730-2261-11ee-9c43-2eb5a363657c')),
('12345678123456781234567812345678', UUID('12345678-1234-5678-1234-567812345678')),
(
'c0a8f9a8-aa5e-482b-a067-9cb3a51f5c1',
Err('Input should be a valid UUID, invalid group length in group 4: expected 12, found 11'),
),
(1e1, Err('input should be a string, bytes or UUID object')),
(None, Err('input should be a string, bytes or UUID object')),
(True, Err('input should be a string, bytes or UUID object')),
(0xA1A2A3A4B1B2C1C2D1D2D3D4D5D6D7D8, Err('input should be a string, bytes or UUID object')),
(0x12345678123456781234567812345678, Err('input should be a string, bytes or UUID object')),
],
)
def test_uuid_json(py_and_json: PyAndJson, input_value, expected):
v = py_and_json({'type': 'uuid'})
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_test(input_value)
else:
output = v.validate_test(input_value)
assert output == expected
assert isinstance(output, UUID)
def test_uuid_deepcopy():
output = SchemaValidator(core_schema.uuid_schema()).validate_python('a6cc5730-2261-11ee-9c43-2eb5a363657c')
c = copy.deepcopy(output)
assert repr(output) == "UUID('a6cc5730-2261-11ee-9c43-2eb5a363657c')"
assert c == output
assert isinstance(output, UUID)
def test_uuid_copy():
output = SchemaValidator(core_schema.uuid_schema()).validate_python('a6cc5730-2261-11ee-9c43-2eb5a363657c')
c = copy.copy(output)
assert repr(output) == "UUID('a6cc5730-2261-11ee-9c43-2eb5a363657c')"
assert c == output
assert isinstance(output, UUID)
def test_uuid_wrap_json():
# https://github.com/pydantic/pydantic/issues/8147
schema = core_schema.no_info_wrap_validator_function(lambda v, handler: handler(v), core_schema.uuid_schema())
v = SchemaValidator(schema)
assert v.validate_python(UUID('a6cc5730-2261-11ee-9c43-2eb5a363657c'), strict=True) == UUID(
'a6cc5730-2261-11ee-9c43-2eb5a363657c'
)
assert v.validate_json('"a6cc5730-2261-11ee-9c43-2eb5a363657c"', strict=True) == UUID(
'a6cc5730-2261-11ee-9c43-2eb5a363657c'
)
def uuid_safety_unknown():
output = SchemaValidator(core_schema.uuid_schema()).validate_python('a6cc5730-2261-11ee-9c43-2eb5a363657c')
assert output.is_safe is SafeUUID.unknown
|
MyStr
|
python
|
pytorch__pytorch
|
test/distributed/_composable/fsdp/test_fully_shard_compile.py
|
{
"start": 3913,
"end": 47417
}
|
class ____(FSDPTest):
fake_pg = not at_least_x_gpu(2)
# This method is an override of the base class.
# Tests in this class requires bf16 support, so SM arch must be 80 or
# higher.
def skipTestForOldSm(self):
# Assumption: This test class is only run on GPU. See `HAS_GPU` check at
# the top of the class.
device = torch.device(
device_type.type,
self.rank % torch.get_device_module(device_type).device_count(),
)
if (
device_type.type == "cuda"
and not torch.version.hip
and not sm_is_or_higher_than(device, 8, 0)
):
self.skipTest("bf16 requires sm >= 8.0")
def test_dynamo_trace_use_training_state(self):
torch._dynamo.reset()
# Construct a dummy FSDPParamGroup, since we just want to test the `use_training_state` ctx manager.
param_group = FSDPParamGroup(
[], # params: List[nn.Parameter],
(torch.nn.Linear(1, 1),), # module: Tuple[nn.Module, ...],
None, # mesh_info: FSDPMeshInfo,
None, # post_forward_mesh_info: Optional[FSDPMeshInfo],
device_type, # device: torch.device,
None, # shard_placement_fn: Optional[Callable],
None, # mp_policy: MixedPrecisionPolicy,
None, # offload_policy: OffloadPolicy,
)
def f(x):
param_group._training_state = TrainingState.IDLE
with param_group.use_training_state(TrainingState.FORWARD):
if param_group._training_state == TrainingState.FORWARD:
return x + 1
else:
return x
inp = torch.zeros(1)
self.assertEqual(param_group._training_state, TrainingState.IDLE)
eager_out = f(inp)
self.assertEqual(param_group._training_state, TrainingState.IDLE)
self.assertEqual(eager_out, inp + 1)
cnt = torch._dynamo.testing.CompileCounterWithBackend("aot_eager")
compiled_out = torch.compile(f, backend=cnt, fullgraph=True)(inp)
self.assertEqual(param_group._training_state, TrainingState.IDLE)
self.assertEqual(eager_out, compiled_out)
self.assertEqual(cnt.frame_count, 1)
self.assertEqual(cnt.op_count, 1)
self.assertEqual(len(cnt.graphs), 1)
def test_trace_fsdp_copy_(self):
@torch.library.custom_op("mylib::add_one_out", mutates_args={"out"})
def add_one_out(x: torch.Tensor, out: torch.Tensor) -> None:
torch.add(x, 1, out=out)
def f(x):
buf = torch.zeros(2)
buf_view = buf.view(-1)
torch.ops.mylib.add_one_out(x, out=buf_view)
buf_view2 = buf.view(-1)
torch.ops.fsdp.copy_(x, buf_view2)
ref_x = torch.zeros(2)
x = copy.deepcopy(ref_x)
f(ref_x)
torch.compile(f, backend="aot_eager")(x)
self.assertEqual(x, ref_x)
def _get_resize_count_in_fx_graph(self, graph: torch.fx.Graph):
resize_count = 0
for node in graph.nodes:
if (
node.op == "call_function"
and node.target == torch.ops.inductor.resize_storage_bytes_.default
):
resize_count += 1
return resize_count
def _assert_no_aliased_unsharded_params_in_graph_inputs(
self, model, graph: torch.fx.Graph
) -> None:
# FSDP2 unsharded params are mutated in the graph without going through functionalization.
# Therefore, we want to make sure they don't have aliases in the graph inputs, to make it easier
# for us to do the replacement of unsharded params with the all-gathered temporary buffer directly
# in downstream users in the graph.
storage_id_to_graph_inputs = defaultdict(list)
unsharded_param_graph_inputs = set()
for node in graph.nodes:
if (
node.op == "call_function"
and node.target
in [
torch.ops.inductor.resize_storage_bytes_.default,
torch.ops.fsdp.copy_.default,
]
and node.args[0].op == "placeholder"
):
unsharded_param_graph_inputs.add(node.args[0])
assert len(unsharded_param_graph_inputs) > 0
assert len(unsharded_param_graph_inputs) == len(list(model.parameters())), """\
Expected all model parameters to be wrapped by FSDP2 and
have their unsharded version as graph input, but it's not true!
"""
no_aliased_unsharded_params_in_graph_inputs = True
err_msg = ""
for aliased_graph_inputs in storage_id_to_graph_inputs.values():
if len(aliased_graph_inputs) > 1 and any(
x in unsharded_param_graph_inputs for x in aliased_graph_inputs
):
no_aliased_unsharded_params_in_graph_inputs = False
err_msg += f"""\n
Found aliased unsharded param in graph inputs: {aliased_graph_inputs},
val.shape: {[node.meta["val"].shape for node in aliased_graph_inputs]},
"""
self.assertTrue(no_aliased_unsharded_params_in_graph_inputs, err_msg)
def _remove_fsdp2_unsharded_param_graph_input_usage_with_optional_checks(
self, model, *, bwd_resize_count_before_pass=None, fwd_fullgraph=False
):
def _run_with_checks(graph, orig_fn):
if (
self._is_bwd_fx_graph(graph)
and bwd_resize_count_before_pass is not None
):
self.assertEqual(
bwd_resize_count_before_pass,
self._get_resize_count_in_fx_graph(graph),
)
self._assert_no_aliased_unsharded_params_in_graph_inputs(model, graph)
orig_fn(graph)
if fwd_fullgraph:
return mock.patch.object(
comms,
"remove_fsdp2_unsharded_param_graph_input_usage",
functools.partial(
_run_with_checks,
orig_fn=comms.remove_fsdp2_unsharded_param_graph_input_usage,
),
)
else:
return contextlib.nullcontext()
def _check_fsdp_copy_and_resize_ops_count_in_graph(
self,
graph,
*,
fwd_copy_count,
fwd_resize_count,
bwd_copy_count,
bwd_resize_count,
):
def _check_count(copy_count, resize_count):
actual_copy_count = _count_op_in_graph(graph, torch.ops.fsdp.copy_.default)
self.assertEqual(
actual_copy_count,
copy_count,
f"Unexpected number of `fsdp.copy_` ops (expected {copy_count}, got {actual_copy_count}) in graph: {graph}",
)
actual_resize_count = _count_op_in_graph(
graph, torch.ops.inductor.resize_storage_bytes_.default
)
self.assertEqual(
actual_resize_count,
resize_count,
f"Unexpected number of `inductor.resize_storage_bytes_` ops (expected {resize_count}, got {actual_resize_count}) in graph: {graph}", # noqa: B950
)
if not torch._dynamo.compiled_autograd.in_compiled_autograd_region:
_check_count(fwd_copy_count, fwd_resize_count) # fwd graph
else:
_check_count(bwd_copy_count, bwd_resize_count) # bwd graph
def _reinplace_all_gather_with_optional_checks(self, fwd_fullgraph):
def _run_with_checks(graph, orig_fn):
if self.world_size > 1:
self.assertGreater(
_count_op_in_graph(
graph, torch.ops._c10d_functional.all_gather_into_tensor.default
),
0,
)
elif self.world_size == 1:
self.assertEqual(
_count_op_in_graph(
graph, torch.ops._c10d_functional.all_gather_into_tensor.default
),
0,
)
orig_fn(graph)
self.assertEqual(
_count_op_in_graph(
graph, torch.ops._c10d_functional.all_gather_into_tensor.default
),
0,
)
if self.world_size > 1:
self.assertGreater(
_count_op_in_graph(
graph,
torch.ops._c10d_functional.all_gather_into_tensor_out.default,
),
0,
)
else:
self.assertEqual(
_count_op_in_graph(
graph,
torch.ops._c10d_functional.all_gather_into_tensor_out.default,
),
0,
)
if fwd_fullgraph:
return mock.patch.object(
comms,
"reinplace_fsdp_all_gather",
functools.partial(
_run_with_checks,
orig_fn=comms.reinplace_fsdp_all_gather,
),
)
else:
return contextlib.nullcontext()
def _is_fwd_graph(self, snodes):
ag_copy_in_snode = None
for snode in snodes:
if is_fallback_op(snode.node, torch.ops.fsdp.all_gather_copy_in.default):
ag_copy_in_snode = snode
break
self.assertTrue(ag_copy_in_snode is not None)
if any(
dep.name.startswith("primals_")
for dep in ag_copy_in_snode.read_writes.reads
):
return True
else:
return False
def _is_bwd_fx_graph(self, graph):
for node in graph.nodes:
if (
node.op == "call_function"
and node.target
== torch.ops._c10d_functional.reduce_scatter_tensor.default
):
return True
return False
def _maybe_run_decide_global_ordering_of_comms_with_checks(self, fwd_fullgraph):
def _check_fsdp_ops_in_snodes(snodes, is_fwd_graph, expect=True):
assert_method = self.assertTrue if expect else self.assertFalse
common_ops = {
torch.ops.fsdp.all_gather_copy_in.default,
torch.ops._c10d_functional.all_gather_into_tensor_out.default,
torch.ops.fsdp.split_with_sizes_copy.default,
}
bwd_only_ops = {
torch.ops.fsdp.chunk_cat.default,
torch.ops._c10d_functional.reduce_scatter_tensor.default,
}
for op in common_ops:
assert_method(
_is_fallback_op_in_snodes(
snodes,
op,
),
msg=f"{op}",
)
if not is_fwd_graph:
for op in bwd_only_ops:
assert_method(
_is_fallback_op_in_snodes(
snodes,
op,
),
msg=f"{op}",
)
def _decide_global_ordering_of_comms_with_checks(
snodes, name_to_buf, name_to_fused_node, orig_fn
):
is_fwd_graph = self._is_fwd_graph(snodes)
_check_fsdp_ops_in_snodes(snodes, is_fwd_graph, expect=True)
new_snodes = orig_fn(snodes, name_to_buf, name_to_fused_node)
_check_fsdp_ops_in_snodes(new_snodes, is_fwd_graph, expect=False)
return new_snodes
if fwd_fullgraph:
return mock.patch.object(
comms,
"decide_global_ordering_of_comms",
functools.partial(
_decide_global_ordering_of_comms_with_checks,
orig_fn=comms.decide_global_ordering_of_comms,
),
)
else:
return contextlib.nullcontext()
def inductor_code_check_no_compute_op(self, file_check):
return (
file_check.check_not(" = aten.")
.check_not(" = extern_kernels.")
.check_not(" = triton_")
.check_not(" = torch.ops.")
.check_not(" = inductor_ops.")
.check_not(" aten.")
.check_not(" extern_kernels.")
.check_not(" triton_")
.check_not(" torch.ops.")
.check_not(" inductor_ops.")
)
def inductor_code_check_fsdp_all_gather(
self,
file_check,
overlapped_compute_op_str,
last_all_gather=False,
):
file_check = file_check.check("torch.ops.fsdp.all_gather_copy_in.")
file_check = self.inductor_code_check_no_compute_op(file_check)
file_check = file_check.check(
"torch.ops._c10d_functional.all_gather_into_tensor_out."
)
# Checks that AGWait is delayed, making the AG overlap with some compute op.
if overlapped_compute_op_str is not None:
file_check = file_check.check(f"{overlapped_compute_op_str}")
file_check = file_check.check("torch.ops._c10d_functional.wait_tensor.")
file_check = self.inductor_code_check_no_compute_op(file_check)
file_check = file_check.check("torch.ops.fsdp.split_with_sizes_copy.")
if not last_all_gather:
# Checks that there is no compute op between this AGWait and next AG.
file_check = self.inductor_code_check_no_compute_op(file_check)
return file_check
def inductor_code_check_fsdp_reduce_scatter(
self, file_check, overlapped_compute_op_str
):
file_check = file_check.check("torch.ops.fsdp.chunk_cat.")
file_check = self.inductor_code_check_no_compute_op(file_check)
file_check = file_check.check(
"torch.ops._c10d_functional.reduce_scatter_tensor."
)
# Checks that RSWait is delayed, making the RS overlap with some compute op.
if overlapped_compute_op_str is not None:
file_check = file_check.check(f"{overlapped_compute_op_str}")
file_check = file_check.check("torch.ops._c10d_functional.wait_tensor.")
return file_check
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
def test_compiled_autograd_ctx(self):
self.skipTestForOldSm()
with (
torch._dynamo.config.patch(skip_fsdp_hooks=False),
torch._functorch.config.patch(recompute_views=True),
):
inputs = torch.randn(8, 8)
model = torch.nn.Linear(8, 8)
fully_shard(model)
model_compiled = torch.compile(model, backend="inductor")
for i in range(10):
torch.compiler.set_stance(
"force_eager" if i < 1 else "default"
) # eager warmup for 1 iteration
with torch._dynamo.compiled_autograd._enable(
torch.compile(backend="inductor", fullgraph=True)
):
out = model_compiled(inputs)
out.sum().backward()
def _test_traceable_fsdp(
self,
model_init_fn,
input_creation_fn,
backend,
fwd_fullgraph,
*,
bwd_resize_count_before_inductor=None,
):
def fwd_bwd(model, inp):
out = model(inp)
loss = out.sum()
loss.backward()
return loss
def run_iters(
fwd_bwd_func,
optim,
n_iter=10,
compiled_autograd_backend=None,
):
torch.manual_seed(42)
losses = []
for i in range(n_iter):
# eager warmup for 1 iteration, so that all FSDP2 lazy-initialization is done in eager
torch.compiler.set_stance("force_eager" if i < 1 else "default")
inp = input_creation_fn()
loss = fwd_bwd_func(inp)
losses.append(loss.item())
optim.step()
optim.zero_grad(set_to_none=True)
return losses
def test_compiled():
model, optim = model_init_fn()
fwd_bwd_fn = functools.partial(fwd_bwd, model)
counters.clear()
with self._remove_fsdp2_unsharded_param_graph_input_usage_with_optional_checks(
model,
bwd_resize_count_before_pass=bwd_resize_count_before_inductor,
fwd_fullgraph=fwd_fullgraph,
):
fwd_bwd_fn_compiled = torch.compile(
fwd_bwd_fn,
backend=backend,
# NOTE: we can't set `fullgraph=True` here because we will always graph-break
# on `loss.backward()` call in `fwd_bwd()`. This is okay as long as
# it's the only graph-break in forward pass.
fullgraph=False,
)
res = run_iters(
fwd_bwd_fn_compiled,
optim,
compiled_autograd_backend=backend,
)
if fwd_fullgraph:
self.assertEqual(len(counters["graph_break"]), 1)
self.assertExpectedInline(
next(iter(counters["graph_break"].keys())),
"""\
Unsupported Tensor.backward() call
Explanation: Dynamo currently does not support tracing `Tensor.backward()`.
Hint: This graph break is fundamental - it is unlikely that Dynamo will ever be able to trace through your code. Consider finding a workaround.
Developer debug context: call_method TensorVariable() backward () {}
For more details about this graph break, please visit: https://meta-pytorch.github.io/compile-graph-break-site/gb/gb0123.html""", # noqa: B950
)
else:
self.assertGreater(len(counters["graph_break"]), 1)
return res
def test_eager():
model, optim = model_init_fn()
fwd_bwd_fn = functools.partial(fwd_bwd, model)
res = run_iters(fwd_bwd_fn, optim)
return res
torch._dynamo.reset()
torch._dynamo.compiled_autograd.reset()
with (
torch._dynamo.config.patch(
compiled_autograd=True,
compiled_autograd_kwargs_override={
"fullgraph": True,
},
inline_inbuilt_nn_modules=True,
skip_fsdp_hooks=False,
),
torch._functorch.config.patch(
enable_autograd_cache=False,
recompute_views=True,
),
torch._inductor.config.patch(
force_disable_caches=True,
reorder_for_compute_comm_overlap=True,
reorder_for_compute_comm_overlap_passes=[
"sink_waits",
"raise_comms",
"reorder_compute_for_overlap",
],
),
):
losses_compiled = test_compiled()
losses_eager = test_eager()
if not self.fake_pg:
for loss_compiled, loss_eager in zip(losses_compiled, losses_eager):
self.assertTrue(
torch.allclose(
torch.tensor(loss_compiled),
torch.tensor(loss_eager),
rtol=1e-5,
atol=1e-8,
),
f"{loss_compiled} vs {loss_eager}",
)
def _create_simple_mlp_factory_fns(self):
hidden_dim = 16
def model_init_fn():
torch.manual_seed(self.rank)
fsdp_config = {}
model = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim, device=device_type),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim, device=device_type),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim, device=device_type),
)
fully_shard(model, reshard_after_forward=True, **fsdp_config)
optim = torch.optim.SGD(model.parameters(), lr=1e-4)
return model, optim
def input_creation_fn():
torch.manual_seed(self.rank)
inp = torch.randn((2, hidden_dim), device=device_type, requires_grad=False)
return inp
return model_init_fn, input_creation_fn
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
def test_simple_mlp_fullgraph_backend_aot_eager(self):
self._test_traceable_fsdp(
*self._create_simple_mlp_factory_fns(), "aot_eager", fwd_fullgraph=True
)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
def test_simple_mlp_fullgraph_backend_aot_eager_decomp_partition(self):
self._test_traceable_fsdp(
*self._create_simple_mlp_factory_fns(),
"aot_eager_decomp_partition",
fwd_fullgraph=True,
)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
def test_simple_mlp_fullgraph_backend_inductor(self):
self.skipTestForOldSm()
self._test_traceable_fsdp(
*self._create_simple_mlp_factory_fns(), "inductor", fwd_fullgraph=True
)
def _create_nested_fully_shard_factory_fns(self, fwd_fullgraph):
hidden_dim = 16
class TestSubmodule(nn.Module):
def __init__(self, hidden_dim):
super().__init__()
self.param1 = nn.Parameter(
torch.zeros(
hidden_dim, hidden_dim, dtype=torch.float, device=device_type
)
)
self.param2 = nn.Parameter(
torch.zeros(hidden_dim, dtype=torch.float, device=device_type)
)
def forward(self, x):
ret = torch.matmul(x, self.param1)
if not fwd_fullgraph:
torch._dynamo.graph_break()
ret = ret * self.param2
ret = torch.relu(ret)
return ret
class TestModule(nn.Module):
def __init__(self, n_layers):
super().__init__()
self.layers = torch.nn.ModuleList()
for _ in range(n_layers):
self.layers.append(TestSubmodule(hidden_dim))
def forward(self, x):
# Intentionally reusing all layers a few times,
# to test "multiple all-gathers for the same parameter" case.
# Case 1: rerun the same layer twice
for layer_id in range(len(self.layers)):
for _ in range(2):
x = self.layers[layer_id](x)
# Case 2: iterate through all layers twice
for layer in self.layers:
x = layer(x)
for layer in self.layers:
x = layer(x)
return x
def model_init_fn():
torch.manual_seed(self.rank)
fsdp_config = {}
mesh = init_device_mesh(device_type.type, (self.world_size,))
model = TestModule(n_layers=3)
for mod in model.layers:
fully_shard(mod, mesh=mesh, reshard_after_forward=True, **fsdp_config)
model = fully_shard(
model, mesh=mesh, reshard_after_forward=True, **fsdp_config
)
optim = torch.optim.SGD(model.parameters(), lr=1e-4)
return model, optim
def input_creation_fn():
torch.manual_seed(self.rank)
inp = torch.randn((2, hidden_dim), device=device_type, requires_grad=False)
return inp
return model_init_fn, input_creation_fn
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
def test_nested_fully_shard_backend_aot_eager(self):
# TODO: fix fwd_fullgraph=False case
for fwd_fullgraph in [True]:
self._test_traceable_fsdp(
*self._create_nested_fully_shard_factory_fns(
fwd_fullgraph=fwd_fullgraph
),
"aot_eager",
fwd_fullgraph=fwd_fullgraph,
)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
def test_nested_fully_shard_backend_aot_eager_decomp_partition(self):
# TODO: fix fwd_fullgraph=False case
for fwd_fullgraph in [True]:
self._test_traceable_fsdp(
*self._create_nested_fully_shard_factory_fns(
fwd_fullgraph=fwd_fullgraph
),
"aot_eager_decomp_partition",
fwd_fullgraph=fwd_fullgraph,
)
def _test_nested_fully_shard_backend_inductor_fullgraph_True(self):
self.skipTestForOldSm()
for fwd_fullgraph in [True]:
with (
self._reinplace_all_gather_with_optional_checks(fwd_fullgraph),
torch._inductor.config.patch(
post_grad_custom_post_pass=(
functools.partial(
self._check_fsdp_copy_and_resize_ops_count_in_graph,
fwd_copy_count=0,
fwd_resize_count=0,
bwd_copy_count=0,
bwd_resize_count=0,
)
if fwd_fullgraph
else None
)
),
):
_, triton_codes = run_and_get_code(
lambda: self._test_traceable_fsdp(
*self._create_nested_fully_shard_factory_fns(
fwd_fullgraph=fwd_fullgraph
),
"inductor",
fwd_fullgraph=fwd_fullgraph,
bwd_resize_count_before_inductor=48 if fwd_fullgraph else None,
),
)
if fwd_fullgraph:
self.assertEqual(
len(triton_codes),
2,
"Expected two separate lowerings to Triton code, one from FWD graph and one from Compiled Autograd BWD graph",
)
fwd_code = triton_codes[0]
extra_str_from_graph_partition = (
"self, " if torch._inductor.config.graph_partition else ""
)
file_check = FileCheck().check(
f"def call({extra_str_from_graph_partition}args):"
)
for fwd_ag_block_info in [
dict(overlapped_compute_op_str=None),
dict(
overlapped_compute_op_str="extern_kernels.mm(",
),
dict(
overlapped_compute_op_str="extern_kernels.mm(",
),
dict(
overlapped_compute_op_str="extern_kernels.mm(",
),
dict(
overlapped_compute_op_str="extern_kernels.mm(",
),
dict(
overlapped_compute_op_str="extern_kernels.mm(",
),
dict(
overlapped_compute_op_str="extern_kernels.mm(",
),
dict(
overlapped_compute_op_str="extern_kernels.mm(",
),
dict(
overlapped_compute_op_str="extern_kernels.mm(",
last_all_gather=True,
),
]:
# file_check = self.inductor_code_check_fsdp_all_gather(
# file_check, **fwd_ag_block_info
# )
pass
file_check.run(fwd_code)
bwd_code = triton_codes[1]
file_check = FileCheck().check(
f"def call({extra_str_from_graph_partition}args):"
)
for bwd_ag_block_info in [
dict(overlapped_compute_op_str=None),
dict(
overlapped_compute_op_str="extern_kernels.mm(",
),
dict(
overlapped_compute_op_str="extern_kernels.mm(",
last_all_gather=True,
),
]:
# file_check = self.inductor_code_check_fsdp_all_gather(
# file_check, **bwd_ag_block_info
# )
pass
for bwd_rs_block_info in [
dict(overlapped_compute_op_str="extern_kernels.addmm("),
dict(
overlapped_compute_op_str=None
), # TODO: improve compute/comm overlap, so that `overlapped_compute_op_str` is not None
dict(overlapped_compute_op_str=None),
]:
# file_check = self.inductor_code_check_fsdp_reduce_scatter(
# file_check, **bwd_rs_block_info
# )
pass
file_check.run(bwd_code)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
def test_nested_fully_shard_backend_inductor_fullgraph_True(self):
self._test_nested_fully_shard_backend_inductor_fullgraph_True()
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@torch._inductor.config.patch("graph_partition", True)
def test_nested_fully_shard_backend_inductor_fullgraph_True_graph_partition(self):
self._test_nested_fully_shard_backend_inductor_fullgraph_True()
@unittest.skip("TODO: fix fwd_fullgraph=False case")
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
def test_nested_fully_shard_backend_inductor_fullgraph_False(self):
self.skipTestForOldSm()
_, triton_codes = run_and_get_code(
lambda: self._test_traceable_fsdp(
*self._create_nested_fully_shard_factory_fns(fwd_fullgraph=False),
"inductor",
fwd_fullgraph=False,
),
)
# TODO: when fwd_fullgraph=False and there is graph break in FWD graph,
# there are several recompiles, need to figure out why.
self.assertGreater(
len(triton_codes),
2,
"Expected at least 3 separate lowerings to Triton code, which means at least 1 graph break in FWD graph",
)
def _create_transformer_factory_fns(
self, all_requires_grad, *, activation_checkpoint=False
):
seq_len = 16
vocab_size = 8
n_layers = 3
def model_init_fn():
torch.manual_seed(self.rank)
fsdp_config = {}
mesh = init_device_mesh(device_type.type, (self.world_size,))
model_args = ModelArgs(
vocab_size=vocab_size,
n_layers=n_layers,
checkpoint_activations=activation_checkpoint,
)
model = Transformer(model_args)
if not all_requires_grad:
requires_grad_params = ["attention.wq", "attention.wv"]
requires_grad_param_count = 0
for k, v in model.named_parameters():
for substring in requires_grad_params:
if substring in k:
v.requires_grad_(True)
requires_grad_param_count += 1
else:
v.requires_grad_(False)
assert requires_grad_param_count == n_layers * len(requires_grad_params)
for _, mod in enumerate(model.layers):
fully_shard(mod, mesh=mesh, reshard_after_forward=True, **fsdp_config)
model = fully_shard(
model, mesh=mesh, reshard_after_forward=False, **fsdp_config
)
optim = torch.optim.SGD(model.parameters(), lr=1e-4)
return model, optim
def input_creation_fn():
torch.manual_seed(self.rank)
inp = torch.randint(
0, vocab_size, (2, seq_len), device=device_type, requires_grad=False
)
return inp
return model_init_fn, input_creation_fn
def _maybe_add_graph_break_to_sdpa(self, fwd_fullgraph):
def _sdpa_with_graph_break(*args, **kwargs):
torch._dynamo.graph_break()
return orig_F_scaled_dot_product_attention(*args, **kwargs)
if not fwd_fullgraph:
return mock.patch.object(
F,
"scaled_dot_product_attention",
_sdpa_with_graph_break,
)
else:
return contextlib.nullcontext()
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
def test_transformer_backend_aot_eager(self):
# TODO: fix fwd_fullgraph=False case
for fwd_fullgraph, all_requires_grad in itertools.product(
[True], [True, False]
):
with (
self._maybe_add_graph_break_to_sdpa(fwd_fullgraph),
self._reinplace_all_gather_with_optional_checks(fwd_fullgraph),
):
self._test_traceable_fsdp(
*self._create_transformer_factory_fns(
all_requires_grad=all_requires_grad
),
"aot_eager",
fwd_fullgraph=fwd_fullgraph,
)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
# TODO: native_dropout has worse accuracy after decomp, need to figure out why
@torch._inductor.config.patch(fallback_random=True)
def test_transformer_backend_aot_eager_decomp_partition(self):
# TODO: fix fwd_fullgraph=False case
for fwd_fullgraph, all_requires_grad in itertools.product(
[True], [True, False]
):
with self._maybe_add_graph_break_to_sdpa(fwd_fullgraph):
self._test_traceable_fsdp(
*self._create_transformer_factory_fns(
all_requires_grad=all_requires_grad
),
"aot_eager_decomp_partition",
fwd_fullgraph=fwd_fullgraph,
)
def _test_transformer_backend_inductor_fullgraph_True(self):
self.skipTestForOldSm()
for (
fwd_fullgraph,
all_requires_grad,
activation_checkpoint,
) in itertools.product([True], [True, False], [True, False]):
log.warning(
f"fwd_fullgraph={fwd_fullgraph}, all_requires_grad={all_requires_grad}, activation_checkpoint={activation_checkpoint}" # noqa: G004, G001, B950
)
with (
self._reinplace_all_gather_with_optional_checks(fwd_fullgraph),
torch._inductor.config.patch(
post_grad_custom_post_pass=(
functools.partial(
self._check_fsdp_copy_and_resize_ops_count_in_graph,
# NOTE: For the root unsharded params, we don't reshard after forward since for training,
# the parameters would be freed and all-gathered immediately. Hence we still have
# their resize and copy ops in the graph.
fwd_copy_count=4,
fwd_resize_count=4,
bwd_copy_count=0,
bwd_resize_count=4,
)
if fwd_fullgraph
else None
)
),
):
_, triton_codes = run_and_get_code(
lambda: self._test_traceable_fsdp(
*self._create_transformer_factory_fns(
all_requires_grad=all_requires_grad,
activation_checkpoint=activation_checkpoint,
),
"inductor",
fwd_fullgraph=fwd_fullgraph,
bwd_resize_count_before_inductor=76 if fwd_fullgraph else None,
),
)
if fwd_fullgraph:
self.assertEqual(
len(triton_codes),
2,
"Expected two separate lowerings to Triton code, one from FWD graph and one from Compiled Autograd BWD graph",
)
fwd_code = triton_codes[0]
extra_str_from_graph_partition = (
"self, " if torch._inductor.config.graph_partition else ""
)
file_check = FileCheck().check(
f"def call({extra_str_from_graph_partition}args):"
)
for fwd_ag_block_info in [
dict(
overlapped_compute_op_str=(
"triton_" if all_requires_grad else None
),
),
dict(
overlapped_compute_op_str="aten.native_dropout.",
),
dict(
overlapped_compute_op_str="aten._scaled_dot_product_efficient_attention.",
),
dict(
overlapped_compute_op_str="aten._scaled_dot_product_efficient_attention.",
last_all_gather=True,
),
]:
# file_check = self.inductor_code_check_fsdp_all_gather(
# file_check, **fwd_ag_block_info
# )
pass
file_check.run(fwd_code)
bwd_code = triton_codes[1]
file_check = FileCheck().check(
f"def call({extra_str_from_graph_partition}args):"
)
for bwd_ag_block_info in [
dict(
overlapped_compute_op_str="extern_kernels.mm(",
),
dict(
overlapped_compute_op_str="aten._scaled_dot_product_efficient_attention_backward.",
),
dict(
overlapped_compute_op_str="aten._scaled_dot_product_efficient_attention_backward.",
last_all_gather=True,
),
]:
# if bwd_ag_block_info is not None:
# file_check = self.inductor_code_check_fsdp_all_gather(
# file_check, **bwd_ag_block_info
# )
pass
for bwd_rs_block_info in [
(
dict(overlapped_compute_op_str="extern_kernels.mm(")
if all_requires_grad
else None
),
dict(
overlapped_compute_op_str=None
), # TODO: improve compute/comm overlap, so that `overlapped_compute_op_str` is not None
dict(overlapped_compute_op_str=None),
dict(overlapped_compute_op_str=None) if all_requires_grad else None,
]:
# if bwd_rs_block_info is not None:
# file_check = self.inductor_code_check_fsdp_reduce_scatter(
# file_check, **bwd_rs_block_info
# )
pass
file_check.run(bwd_code)
@unittest.skip('"Traceable FSDP2" is not being maintained anymore.')
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
# TODO: native_dropout causes CUDA IMA error, need to figure out why
@torch._inductor.config.patch(fallback_random=True)
def test_transformer_backend_inductor_fullgraph_True(self):
self._test_transformer_backend_inductor_fullgraph_True()
@unittest.skip('"Traceable FSDP2" is not being maintained anymore.')
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
# TODO: native_dropout causes CUDA IMA error, need to figure out why
@torch._inductor.config.patch(fallback_random=True)
@torch._inductor.config.patch("graph_partition", True)
def test_transformer_backend_inductor_fullgraph_True_graph_partition(self):
self._test_transformer_backend_inductor_fullgraph_True()
@unittest.skip("TODO: fix fwd_fullgraph=False case")
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
# TODO: native_dropout causes CUDA IMA error, need to figure out why
@torch._inductor.config.patch(fallback_random=True)
def test_transformer_backend_inductor_fullgraph_False(self):
self.skipTestForOldSm()
fwd_fullgraph = False
# TODO: fix numerical issue in activation_checkpoint=True case
for all_requires_grad, activation_checkpoint in itertools.product(
[True, False], [False]
):
log.warning(
f"fwd_fullgraph={fwd_fullgraph}, all_requires_grad={all_requires_grad}, activation_checkpoint={activation_checkpoint}" # noqa: G004, G001, B950
)
with self._maybe_add_graph_break_to_sdpa(fwd_fullgraph):
_, triton_codes = run_and_get_code(
lambda: self._test_traceable_fsdp(
*self._create_transformer_factory_fns(
all_requires_grad=all_requires_grad,
activation_checkpoint=activation_checkpoint,
),
"inductor",
fwd_fullgraph=fwd_fullgraph,
),
)
# TODO: when fwd_fullgraph=False and there is graph break in FWD graph,
# there are several recompiles, need to figure out why.
self.assertGreater(
len(triton_codes),
2,
"Expected at least 3 separate lowerings to Triton code, which means at least 1 graph break in FWD graph",
)
def test_dynamo_recompiles_on_fsdp_layers(self):
m = Mod()
for name, child in m.encoder.named_children():
if isinstance(child, torch.nn.Linear):
new_child = torch.compile(child)
setattr(m.encoder, name, new_child)
m = FSDP(m, sharding_strategy=ShardingStrategy.FULL_SHARD, use_orig_params=True)
inp = torch.randn(32, 784, device=device_type)
m(inp)
if __name__ == "__main__":
run_tests()
|
TestFullyShardCompile
|
python
|
tornadoweb__tornado
|
tornado/test/httpclient_test.py
|
{
"start": 30791,
"end": 31110
}
|
class ____(unittest.TestCase):
def test_str(self):
response = HTTPResponse( # type: ignore
HTTPRequest("http://example.com"), 200, buffer=BytesIO()
)
s = str(response)
self.assertTrue(s.startswith("HTTPResponse("))
self.assertIn("code=200", s)
|
HTTPResponseTestCase
|
python
|
tiangolo__fastapi
|
tests/test_generate_unique_id_function.py
|
{
"start": 493,
"end": 68299
}
|
class ____(BaseModel):
title: str
description: str
def test_top_level_generate_unique_id():
app = FastAPI(generate_unique_id_function=custom_generate_unique_id)
router = APIRouter()
@app.post("/", response_model=List[Item], responses={404: {"model": List[Message]}})
def post_root(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
@router.post(
"/router", response_model=List[Item], responses={404: {"model": List[Message]}}
)
def post_router(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
app.include_router(router)
client = TestClient(app)
response = client.get("/openapi.json")
data = response.json()
assert data == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/": {
"post": {
"summary": "Post Root",
"operationId": "foo_post_root",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_foo_post_root"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Foo Post Root",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Foo Post Root",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/router": {
"post": {
"summary": "Post Router",
"operationId": "foo_post_router",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_foo_post_router"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Foo Post Router",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Foo Post Router",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Body_foo_post_root": {
"title": "Body_foo_post_root",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"Body_foo_post_router": {
"title": "Body_foo_post_router",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"Item": {
"title": "Item",
"required": ["name", "price"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"price": {"title": "Price", "type": "number"},
},
},
"Message": {
"title": "Message",
"required": ["title", "description"],
"type": "object",
"properties": {
"title": {"title": "Title", "type": "string"},
"description": {"title": "Description", "type": "string"},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
def test_router_overrides_generate_unique_id():
app = FastAPI(generate_unique_id_function=custom_generate_unique_id)
router = APIRouter(generate_unique_id_function=custom_generate_unique_id2)
@app.post("/", response_model=List[Item], responses={404: {"model": List[Message]}})
def post_root(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
@router.post(
"/router", response_model=List[Item], responses={404: {"model": List[Message]}}
)
def post_router(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
app.include_router(router)
client = TestClient(app)
response = client.get("/openapi.json")
data = response.json()
assert data == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/": {
"post": {
"summary": "Post Root",
"operationId": "foo_post_root",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_foo_post_root"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Foo Post Root",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Foo Post Root",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/router": {
"post": {
"summary": "Post Router",
"operationId": "bar_post_router",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_bar_post_router"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Bar Post Router",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Bar Post Router",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Body_bar_post_router": {
"title": "Body_bar_post_router",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"Body_foo_post_root": {
"title": "Body_foo_post_root",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"Item": {
"title": "Item",
"required": ["name", "price"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"price": {"title": "Price", "type": "number"},
},
},
"Message": {
"title": "Message",
"required": ["title", "description"],
"type": "object",
"properties": {
"title": {"title": "Title", "type": "string"},
"description": {"title": "Description", "type": "string"},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
def test_router_include_overrides_generate_unique_id():
app = FastAPI(generate_unique_id_function=custom_generate_unique_id)
router = APIRouter(generate_unique_id_function=custom_generate_unique_id2)
@app.post("/", response_model=List[Item], responses={404: {"model": List[Message]}})
def post_root(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
@router.post(
"/router", response_model=List[Item], responses={404: {"model": List[Message]}}
)
def post_router(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
app.include_router(router, generate_unique_id_function=custom_generate_unique_id3)
client = TestClient(app)
response = client.get("/openapi.json")
data = response.json()
assert data == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/": {
"post": {
"summary": "Post Root",
"operationId": "foo_post_root",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_foo_post_root"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Foo Post Root",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Foo Post Root",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/router": {
"post": {
"summary": "Post Router",
"operationId": "bar_post_router",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_bar_post_router"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Bar Post Router",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Bar Post Router",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Body_bar_post_router": {
"title": "Body_bar_post_router",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"Body_foo_post_root": {
"title": "Body_foo_post_root",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"Item": {
"title": "Item",
"required": ["name", "price"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"price": {"title": "Price", "type": "number"},
},
},
"Message": {
"title": "Message",
"required": ["title", "description"],
"type": "object",
"properties": {
"title": {"title": "Title", "type": "string"},
"description": {"title": "Description", "type": "string"},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
def test_subrouter_top_level_include_overrides_generate_unique_id():
app = FastAPI(generate_unique_id_function=custom_generate_unique_id)
router = APIRouter()
sub_router = APIRouter(generate_unique_id_function=custom_generate_unique_id2)
@app.post("/", response_model=List[Item], responses={404: {"model": List[Message]}})
def post_root(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
@router.post(
"/router", response_model=List[Item], responses={404: {"model": List[Message]}}
)
def post_router(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
@sub_router.post(
"/subrouter",
response_model=List[Item],
responses={404: {"model": List[Message]}},
)
def post_subrouter(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
router.include_router(sub_router)
app.include_router(router, generate_unique_id_function=custom_generate_unique_id3)
client = TestClient(app)
response = client.get("/openapi.json")
data = response.json()
assert data == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/": {
"post": {
"summary": "Post Root",
"operationId": "foo_post_root",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_foo_post_root"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Foo Post Root",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Foo Post Root",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/router": {
"post": {
"summary": "Post Router",
"operationId": "baz_post_router",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_baz_post_router"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Baz Post Router",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Baz Post Router",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/subrouter": {
"post": {
"summary": "Post Subrouter",
"operationId": "bar_post_subrouter",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_bar_post_subrouter"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Bar Post Subrouter",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Bar Post Subrouter",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Body_bar_post_subrouter": {
"title": "Body_bar_post_subrouter",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"Body_baz_post_router": {
"title": "Body_baz_post_router",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"Body_foo_post_root": {
"title": "Body_foo_post_root",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"Item": {
"title": "Item",
"required": ["name", "price"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"price": {"title": "Price", "type": "number"},
},
},
"Message": {
"title": "Message",
"required": ["title", "description"],
"type": "object",
"properties": {
"title": {"title": "Title", "type": "string"},
"description": {"title": "Description", "type": "string"},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
def test_router_path_operation_overrides_generate_unique_id():
app = FastAPI(generate_unique_id_function=custom_generate_unique_id)
router = APIRouter(generate_unique_id_function=custom_generate_unique_id2)
@app.post("/", response_model=List[Item], responses={404: {"model": List[Message]}})
def post_root(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
@router.post(
"/router",
response_model=List[Item],
responses={404: {"model": List[Message]}},
generate_unique_id_function=custom_generate_unique_id3,
)
def post_router(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
app.include_router(router)
client = TestClient(app)
response = client.get("/openapi.json")
data = response.json()
assert data == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/": {
"post": {
"summary": "Post Root",
"operationId": "foo_post_root",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_foo_post_root"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Foo Post Root",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Foo Post Root",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/router": {
"post": {
"summary": "Post Router",
"operationId": "baz_post_router",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_baz_post_router"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Baz Post Router",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Baz Post Router",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Body_baz_post_router": {
"title": "Body_baz_post_router",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"Body_foo_post_root": {
"title": "Body_foo_post_root",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"Item": {
"title": "Item",
"required": ["name", "price"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"price": {"title": "Price", "type": "number"},
},
},
"Message": {
"title": "Message",
"required": ["title", "description"],
"type": "object",
"properties": {
"title": {"title": "Title", "type": "string"},
"description": {"title": "Description", "type": "string"},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
def test_app_path_operation_overrides_generate_unique_id():
app = FastAPI(generate_unique_id_function=custom_generate_unique_id)
router = APIRouter(generate_unique_id_function=custom_generate_unique_id2)
@app.post(
"/",
response_model=List[Item],
responses={404: {"model": List[Message]}},
generate_unique_id_function=custom_generate_unique_id3,
)
def post_root(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
@router.post(
"/router",
response_model=List[Item],
responses={404: {"model": List[Message]}},
)
def post_router(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
app.include_router(router)
client = TestClient(app)
response = client.get("/openapi.json")
data = response.json()
assert data == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/": {
"post": {
"summary": "Post Root",
"operationId": "baz_post_root",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_baz_post_root"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Baz Post Root",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Baz Post Root",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/router": {
"post": {
"summary": "Post Router",
"operationId": "bar_post_router",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_bar_post_router"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Bar Post Router",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Bar Post Router",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Body_bar_post_router": {
"title": "Body_bar_post_router",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"Body_baz_post_root": {
"title": "Body_baz_post_root",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"Item": {
"title": "Item",
"required": ["name", "price"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"price": {"title": "Price", "type": "number"},
},
},
"Message": {
"title": "Message",
"required": ["title", "description"],
"type": "object",
"properties": {
"title": {"title": "Title", "type": "string"},
"description": {"title": "Description", "type": "string"},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
def test_callback_override_generate_unique_id():
app = FastAPI(generate_unique_id_function=custom_generate_unique_id)
callback_router = APIRouter(generate_unique_id_function=custom_generate_unique_id2)
@callback_router.post(
"/post-callback",
response_model=List[Item],
responses={404: {"model": List[Message]}},
generate_unique_id_function=custom_generate_unique_id3,
)
def post_callback(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
@app.post(
"/",
response_model=List[Item],
responses={404: {"model": List[Message]}},
generate_unique_id_function=custom_generate_unique_id3,
callbacks=callback_router.routes,
)
def post_root(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
@app.post(
"/tocallback",
response_model=List[Item],
responses={404: {"model": List[Message]}},
)
def post_with_callback(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
client = TestClient(app)
response = client.get("/openapi.json")
data = response.json()
assert data == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/": {
"post": {
"summary": "Post Root",
"operationId": "baz_post_root",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_baz_post_root"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Baz Post Root",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Baz Post Root",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"callbacks": {
"post_callback": {
"/post-callback": {
"post": {
"summary": "Post Callback",
"operationId": "baz_post_callback",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_baz_post_callback"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Baz Post Callback",
"type": "array",
"items": {
"$ref": "#/components/schemas/Item"
},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Baz Post Callback",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
}
}
},
}
},
"/tocallback": {
"post": {
"summary": "Post With Callback",
"operationId": "foo_post_with_callback",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_foo_post_with_callback"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Foo Post With Callback",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Foo Post With Callback",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Body_baz_post_callback": {
"title": "Body_baz_post_callback",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"Body_baz_post_root": {
"title": "Body_baz_post_root",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"Body_foo_post_with_callback": {
"title": "Body_foo_post_with_callback",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"Item": {
"title": "Item",
"required": ["name", "price"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"price": {"title": "Price", "type": "number"},
},
},
"Message": {
"title": "Message",
"required": ["title", "description"],
"type": "object",
"properties": {
"title": {"title": "Title", "type": "string"},
"description": {"title": "Description", "type": "string"},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
def test_warn_duplicate_operation_id():
def broken_operation_id(route: APIRoute):
return "foo"
app = FastAPI(generate_unique_id_function=broken_operation_id)
@app.post("/")
def post_root(item1: Item):
return item1 # pragma: nocover
@app.post("/second")
def post_second(item1: Item):
return item1 # pragma: nocover
@app.post("/third")
def post_third(item1: Item):
return item1 # pragma: nocover
client = TestClient(app)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
client.get("/openapi.json")
assert len(w) >= 2
duplicate_warnings = [
warning for warning in w if issubclass(warning.category, UserWarning)
]
assert len(duplicate_warnings) > 0
assert "Duplicate Operation ID" in str(duplicate_warnings[0].message)
|
Message
|
python
|
ansible__ansible
|
test/units/plugins/action/test_gather_facts.py
|
{
"start": 1121,
"end": 3775
}
|
class ____(unittest.TestCase):
task = MagicMock(Task)
play_context = MagicMock()
play_context.check_mode = False
connection = MagicMock()
fake_loader = DictDataLoader({
})
templar = TemplateEngine(loader=fake_loader)
def setUp(self):
pass
def tearDown(self):
pass
@patch.object(module_common, '_get_collection_metadata', return_value={})
def test_network_gather_facts_smart_facts_module(self, mock_collection_metadata):
self.fqcn_task_vars = {'ansible_network_os': 'ios'}
self.task.action = 'gather_facts'
self.task.async_val = False
self.task.args = {}
plugin = GatherFactsAction(self.task, self.connection, self.play_context, loader=None, templar=self.templar, shared_loader_obj=None)
get_module_args = MagicMock(return_value={})
plugin._get_module_args = get_module_args
plugin._execute_module = MagicMock(return_value={})
res = plugin.run(task_vars=self.fqcn_task_vars)
# assert the gather_facts config is 'smart'
facts_modules = C.config.get_config_value('FACTS_MODULES', variables=self.fqcn_task_vars)
self.assertEqual(facts_modules, ['smart'])
# assert the correct module was found
self.assertEqual(get_module_args.call_count, 1)
self.assertEqual(
get_module_args.call_args.args,
('ansible.legacy.ios_facts', {'ansible_network_os': 'ios'},)
)
@patch.object(module_common, '_get_collection_metadata', return_value={})
def test_network_gather_facts_smart_facts_module_fqcn(self, mock_collection_metadata):
self.fqcn_task_vars = {'ansible_network_os': 'cisco.ios.ios'}
self.task.action = 'gather_facts'
self.task.async_val = False
self.task.args = {}
plugin = GatherFactsAction(self.task, self.connection, self.play_context, loader=None, templar=self.templar, shared_loader_obj=None)
get_module_args = MagicMock(return_value={})
plugin._get_module_args = get_module_args
plugin._execute_module = MagicMock(return_value={})
res = plugin.run(task_vars=self.fqcn_task_vars)
# assert the gather_facts config is 'smart'
facts_modules = C.config.get_config_value('FACTS_MODULES', variables=self.fqcn_task_vars)
self.assertEqual(facts_modules, ['smart'])
# assert the correct module was found
self.assertEqual(get_module_args.call_count, 1)
self.assertEqual(
get_module_args.call_args.args,
('cisco.ios.ios_facts', {'ansible_network_os': 'cisco.ios.ios'},)
)
|
TestNetworkFacts
|
python
|
prompt-toolkit__python-prompt-toolkit
|
src/prompt_toolkit/layout/containers.py
|
{
"start": 14705,
"end": 23903
}
|
class ____(_Split):
"""
Several layouts, one stacked left/right of the other. ::
+---------+----------+
| | |
| | |
+---------+----------+
By default, this doesn't display a vertical line between the children, but
if this is something you need, then create a HSplit as follows::
VSplit(children=[ ... ], padding_char='|',
padding=1, padding_style='#ffff00')
:param children: List of child :class:`.Container` objects.
:param window_too_small: A :class:`.Container` object that is displayed if
there is not enough space for all the children. By default, this is a
"Window too small" message.
:param align: `HorizontalAlign` value.
:param width: When given, use this width instead of looking at the children.
:param height: When given, use this height instead of looking at the children.
:param z_index: (int or None) When specified, this can be used to bring
element in front of floating elements. `None` means: inherit from parent.
:param style: A style string.
:param modal: ``True`` or ``False``.
:param key_bindings: ``None`` or a :class:`.KeyBindings` object.
:param padding: (`Dimension` or int), size to be used for the padding.
:param padding_char: Character to be used for filling in the padding.
:param padding_style: Style to applied to the padding.
"""
def __init__(
self,
children: Sequence[AnyContainer],
window_too_small: Container | None = None,
align: HorizontalAlign = HorizontalAlign.JUSTIFY,
padding: AnyDimension = 0,
padding_char: str | None = None,
padding_style: str = "",
width: AnyDimension = None,
height: AnyDimension = None,
z_index: int | None = None,
modal: bool = False,
key_bindings: KeyBindingsBase | None = None,
style: str | Callable[[], str] = "",
) -> None:
super().__init__(
children=children,
window_too_small=window_too_small,
padding=padding,
padding_char=padding_char,
padding_style=padding_style,
width=width,
height=height,
z_index=z_index,
modal=modal,
key_bindings=key_bindings,
style=style,
)
self.align = align
self._children_cache: SimpleCache[tuple[Container, ...], list[Container]] = (
SimpleCache(maxsize=1)
)
self._remaining_space_window = Window() # Dummy window.
def preferred_width(self, max_available_width: int) -> Dimension:
if self.width is not None:
return to_dimension(self.width)
dimensions = [
c.preferred_width(max_available_width) for c in self._all_children
]
return sum_layout_dimensions(dimensions)
def preferred_height(self, width: int, max_available_height: int) -> Dimension:
if self.height is not None:
return to_dimension(self.height)
# At the point where we want to calculate the heights, the widths have
# already been decided. So we can trust `width` to be the actual
# `width` that's going to be used for the rendering. So,
# `divide_widths` is supposed to use all of the available width.
# Using only the `preferred` width caused a bug where the reported
# height was more than required. (we had a `BufferControl` which did
# wrap lines because of the smaller width returned by `_divide_widths`.
sizes = self._divide_widths(width)
children = self._all_children
if sizes is None:
return Dimension()
else:
dimensions = [
c.preferred_height(s, max_available_height)
for s, c in zip(sizes, children)
]
return max_layout_dimensions(dimensions)
def reset(self) -> None:
for c in self.children:
c.reset()
@property
def _all_children(self) -> list[Container]:
"""
List of child objects, including padding.
"""
def get() -> list[Container]:
result: list[Container] = []
# Padding left.
if self.align in (HorizontalAlign.CENTER, HorizontalAlign.RIGHT):
result.append(Window(width=Dimension(preferred=0)))
# The children with padding.
for child in self.children:
result.append(child)
result.append(
Window(
width=self.padding,
char=self.padding_char,
style=self.padding_style,
)
)
if result:
result.pop()
# Padding right.
if self.align in (HorizontalAlign.CENTER, HorizontalAlign.LEFT):
result.append(Window(width=Dimension(preferred=0)))
return result
return self._children_cache.get(tuple(self.children), get)
def _divide_widths(self, width: int) -> list[int] | None:
"""
Return the widths for all columns.
Or None when there is not enough space.
"""
children = self._all_children
if not children:
return []
# Calculate widths.
dimensions = [c.preferred_width(width) for c in children]
preferred_dimensions = [d.preferred for d in dimensions]
# Sum dimensions
sum_dimensions = sum_layout_dimensions(dimensions)
# If there is not enough space for both.
# Don't do anything.
if sum_dimensions.min > width:
return None
# Find optimal sizes. (Start with minimal size, increase until we cover
# the whole width.)
sizes = [d.min for d in dimensions]
child_generator = take_using_weights(
items=list(range(len(dimensions))), weights=[d.weight for d in dimensions]
)
i = next(child_generator)
# Increase until we meet at least the 'preferred' size.
preferred_stop = min(width, sum_dimensions.preferred)
while sum(sizes) < preferred_stop:
if sizes[i] < preferred_dimensions[i]:
sizes[i] += 1
i = next(child_generator)
# Increase until we use all the available space.
max_dimensions = [d.max for d in dimensions]
max_stop = min(width, sum_dimensions.max)
while sum(sizes) < max_stop:
if sizes[i] < max_dimensions[i]:
sizes[i] += 1
i = next(child_generator)
return sizes
def write_to_screen(
self,
screen: Screen,
mouse_handlers: MouseHandlers,
write_position: WritePosition,
parent_style: str,
erase_bg: bool,
z_index: int | None,
) -> None:
"""
Render the prompt to a `Screen` instance.
:param screen: The :class:`~prompt_toolkit.layout.screen.Screen` class
to which the output has to be written.
"""
if not self.children:
return
children = self._all_children
sizes = self._divide_widths(write_position.width)
style = parent_style + " " + to_str(self.style)
z_index = z_index if self.z_index is None else self.z_index
# If there is not enough space.
if sizes is None:
self.window_too_small.write_to_screen(
screen, mouse_handlers, write_position, style, erase_bg, z_index
)
return
# Calculate heights, take the largest possible, but not larger than
# write_position.height.
heights = [
child.preferred_height(width, write_position.height).preferred
for width, child in zip(sizes, children)
]
height = max(write_position.height, min(write_position.height, max(heights)))
#
ypos = write_position.ypos
xpos = write_position.xpos
# Draw all child panes.
for s, c in zip(sizes, children):
c.write_to_screen(
screen,
mouse_handlers,
WritePosition(xpos, ypos, s, height),
style,
erase_bg,
z_index,
)
xpos += s
# Fill in the remaining space. This happens when a child control
# refuses to take more space and we don't have any padding. Adding a
# dummy child control for this (in `self._all_children`) is not
# desired, because in some situations, it would take more space, even
# when it's not required. This is required to apply the styling.
remaining_width = write_position.xpos + write_position.width - xpos
if remaining_width > 0:
self._remaining_space_window.write_to_screen(
screen,
mouse_handlers,
WritePosition(xpos, ypos, remaining_width, height),
style,
erase_bg,
z_index,
)
|
VSplit
|
python
|
getsentry__sentry
|
tests/apidocs/endpoints/teams/test_index.py
|
{
"start": 136,
"end": 856
}
|
class ____(APIDocsTestCase):
def setUp(self) -> None:
self.create_team(organization=self.organization)
self.url = reverse(
"sentry-api-0-organization-teams",
kwargs={"organization_id_or_slug": self.organization.slug},
)
self.login_as(user=self.user)
def test_get(self) -> None:
response = self.client.get(self.url)
request = RequestFactory().get(self.url)
self.validate_schema(request, response)
def test_post(self) -> None:
data = {"name": "foo"}
response = self.client.post(self.url, data)
request = RequestFactory().post(self.url, data)
self.validate_schema(request, response)
|
TeamsIndexDocs
|
python
|
kamyu104__LeetCode-Solutions
|
Python/sum-of-imbalance-numbers-of-all-subarrays.py
|
{
"start": 850,
"end": 1375
}
|
class ____(object):
def sumImbalanceNumbers(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = 0
for right in xrange(len(nums)):
lookup = {nums[right]}
curr = 0
for left in reversed(xrange(right)):
if nums[left] not in lookup:
lookup.add(nums[left])
curr += 1-(nums[left]-1 in lookup)-(nums[left]+1 in lookup)
result += curr
return result
|
Solution2
|
python
|
django__django
|
django/views/generic/base.py
|
{
"start": 5925,
"end": 7224
}
|
class ____:
"""A mixin that can be used to render a template."""
template_name = None
template_engine = None
response_class = TemplateResponse
content_type = None
def render_to_response(self, context, **response_kwargs):
"""
Return a response, using the `response_class` for this view, with a
template rendered with the given context.
Pass response_kwargs to the constructor of the response class.
"""
response_kwargs.setdefault("content_type", self.content_type)
return self.response_class(
request=self.request,
template=self.get_template_names(),
context=context,
using=self.template_engine,
**response_kwargs,
)
def get_template_names(self):
"""
Return a list of template names to be used for the request. Must return
a list. May not be called if render_to_response() is overridden.
"""
if self.template_name is None:
raise ImproperlyConfigured(
"TemplateResponseMixin requires either a definition of "
"'template_name' or an implementation of 'get_template_names()'"
)
else:
return [self.template_name]
|
TemplateResponseMixin
|
python
|
Farama-Foundation__Gymnasium
|
gymnasium/wrappers/vector/array_conversion.py
|
{
"start": 489,
"end": 5072
}
|
class ____(VectorWrapper, gym.utils.RecordConstructorArgs):
"""Wraps a vector environment returning Array API compatible arrays so that it can be interacted with through a specific framework.
Popular Array API frameworks include ``numpy``, ``torch``, ``jax.numpy``, ``cupy`` etc. With this wrapper, you can convert outputs from your environment to
any of these frameworks. Conversely, actions are automatically mapped back to the environment framework, if possible without moving the
data or device transfers.
Notes:
A vectorized version of :class:`gymnasium.wrappers.ArrayConversion`
Example:
>>> import gymnasium as gym # doctest: +SKIP
>>> envs = gym.make_vec("JaxEnv-vx", 3) # doctest: +SKIP
>>> envs = ArrayConversion(envs, xp=np) # doctest: +SKIP
"""
def __init__(
self,
env: VectorEnv,
env_xp: ModuleType | str,
target_xp: ModuleType | str,
env_device: Device | None = None,
target_device: Device | None = None,
):
"""Wrapper class to change inputs and outputs of environment to any Array API framework.
Args:
env: The Array API compatible environment to wrap
env_xp: The Array API framework the environment is on
target_xp: The Array API framework to convert to
env_device: The device the environment is on
target_device: The device on which Arrays should be returned
"""
gym.utils.RecordConstructorArgs.__init__(self)
VectorWrapper.__init__(self, env)
self._env_xp = env_xp
self._target_xp = target_xp
self._env_device = env_device
self._target_device = target_device
def step(
self, actions: ActType
) -> tuple[ObsType, ArrayType, ArrayType, ArrayType, dict]:
"""Transforms the action to the specified xp module array type.
Args:
actions: The action to perform
Returns:
A tuple containing xp versions of the next observation, reward, termination, truncation, and extra info.
"""
actions = array_conversion(actions, xp=self._env_xp, device=self._env_device)
obs, reward, terminated, truncated, info = self.env.step(actions)
return (
array_conversion(obs, xp=self._target_xp, device=self._target_device),
array_conversion(reward, xp=self._target_xp, device=self._target_device),
array_conversion(
terminated, xp=self._target_xp, device=self._target_device
),
array_conversion(truncated, xp=self._target_xp, device=self._target_device),
array_conversion(info, xp=self._target_xp, device=self._target_device),
)
def reset(
self,
*,
seed: int | list[int] | None = None,
options: dict[str, Any] | None = None,
) -> tuple[ObsType, dict[str, Any]]:
"""Resets the environment returning xp-based observation and info.
Args:
seed: The seed for resetting the environment
options: The options for resetting the environment, these are converted to xp arrays.
Returns:
xp-based observations and info
"""
if options:
options = array_conversion(
options, xp=self._env_xp, device=self._env_device
)
return array_conversion(
self.env.reset(seed=seed, options=options),
xp=self._target_xp,
device=self._target_device,
)
def __getstate__(self):
"""Returns the object pickle state with args and kwargs."""
env_xp_name = self._env_xp.__name__.replace("array_api_compat.", "")
target_xp_name = self._target_xp.__name__.replace("array_api_compat.", "")
env_device = self._env_device
target_device = self._target_device
return {
"env_xp_name": env_xp_name,
"target_xp_name": target_xp_name,
"env_device": env_device,
"target_device": target_device,
"env": self.env,
}
def __setstate__(self, d):
"""Sets the object pickle state using d."""
self.env = d["env"]
self._env_xp = module_name_to_namespace(d["env_xp_name"])
self._target_xp = module_name_to_namespace(d["target_xp_name"])
self._env_device = d["env_device"]
self._target_device = d["target_device"]
|
ArrayConversion
|
python
|
pytorch__pytorch
|
torch/nn/modules/activation.py
|
{
"start": 791,
"end": 2409
}
|
class ____(Module):
r"""Thresholds each element of the input Tensor.
Threshold is defined as:
.. math::
y =
\begin{cases}
x, &\text{ if } x > \text{threshold} \\
\text{value}, &\text{ otherwise }
\end{cases}
Args:
threshold: The value to threshold at
value: The value to replace with
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Threshold.png
Examples::
>>> m = nn.Threshold(0, 0.5)
>>> input = torch.arange(-3, 3)
>>> output = m(input)
"""
__constants__ = ["threshold", "value", "inplace"]
threshold: float
value: float
inplace: bool
def __init__(self, threshold: float, value: float, inplace: bool = False) -> None:
super().__init__()
self.threshold = threshold
self.value = value
self.inplace = inplace
# TODO: check in THNN (if inplace == True, then assert value <= threshold)
def forward(self, input: Tensor) -> Tensor:
"""
Runs the forward pass.
"""
return F.threshold(input, self.threshold, self.value, self.inplace)
def extra_repr(self) -> str:
"""
Return the extra representation of the module.
"""
inplace_str = ", inplace=True" if self.inplace else ""
return f"threshold={self.threshold}, value={self.value}{inplace_str}"
|
Threshold
|
python
|
sphinx-doc__sphinx
|
sphinx/util/logging.py
|
{
"start": 18106,
"end": 19746
}
|
class ____:
"""Stream writer storing last 10 messages in memory to save trackback"""
def __init__(self, app: Sphinx, stream: IO[str]) -> None:
self._app = app
def write(self, data: str) -> None:
self._app.messagelog.append(data)
def setup(
app: Sphinx, status: IO[str], warning: IO[str], *, verbosity: int = 0
) -> None:
"""Setup root logger for Sphinx"""
log_level = VERBOSITY_MAP[max(verbosity, 0)]
logger = logging.getLogger(NAMESPACE)
logger.setLevel(logging.DEBUG)
logger.propagate = False
# clear all handlers
for handler in logger.handlers[:]:
logger.removeHandler(handler)
info_handler = NewLineStreamHandler(SafeEncodingWriter(status))
info_handler.addFilter(InfoFilter())
info_handler.addFilter(InfoLogRecordTranslator(app))
info_handler.setLevel(log_level)
info_handler.setFormatter(ColorizeFormatter())
warning_handler = WarningStreamHandler(SafeEncodingWriter(warning))
if app._exception_on_warning:
warning_handler.addFilter(_RaiseOnWarningFilter())
warning_handler.addFilter(WarningSuppressor(app))
warning_handler.addFilter(WarningLogRecordTranslator(app))
warning_handler.addFilter(OnceFilter())
warning_handler.setLevel(logging.WARNING)
warning_handler.setFormatter(ColorizeFormatter())
messagelog_handler = logging.StreamHandler(LastMessagesWriter(app, status))
messagelog_handler.addFilter(InfoFilter())
messagelog_handler.setLevel(log_level)
logger.addHandler(info_handler)
logger.addHandler(warning_handler)
logger.addHandler(messagelog_handler)
|
LastMessagesWriter
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 850780,
"end": 851239
}
|
class ____(sgqlc.types.Type):
"""The value of a milestone field in a Project item."""
__schema__ = github_schema
__field_names__ = ("field", "milestone")
field = sgqlc.types.Field(sgqlc.types.non_null("ProjectV2FieldConfiguration"), graphql_name="field")
"""The field that contains this value."""
milestone = sgqlc.types.Field("Milestone", graphql_name="milestone")
"""Milestone value of a field"""
|
ProjectV2ItemFieldMilestoneValue
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-facebook-marketing/unit_tests/integration/request_builder.py
|
{
"start": 1014,
"end": 5471
}
|
class ____:
@classmethod
def get_ad_endpoint(cls, access_token: str, account_id: str) -> RequestBuilder:
return cls(access_token=access_token, resource="ads").with_account_id(account_id)
@classmethod
def get_campaign_endpoint(cls, access_token: str, account_id: str) -> RequestBuilder:
return cls(access_token=access_token, resource="campaigns").with_account_id(account_id)
@classmethod
def get_ad_sets_endpoint(cls, access_token: str, account_id: str) -> RequestBuilder:
return cls(access_token=access_token, resource="adsets").with_account_id(account_id)
@classmethod
def get_account_endpoint(cls, access_token: str, account_id: str) -> RequestBuilder:
return cls(access_token=access_token).with_account_id(account_id)
@classmethod
def get_videos_endpoint(cls, access_token: str, account_id: str) -> RequestBuilder:
return cls(access_token=access_token, resource="advideos").with_account_id(account_id)
@classmethod
def get_insights_endpoint(cls, access_token: str, account_id: str) -> RequestBuilder:
return cls(access_token=access_token, resource="insights").with_account_id(account_id)
@classmethod
def get_execute_batch_endpoint(cls, access_token: str) -> RequestBuilder:
return cls(access_token=access_token)
@classmethod
def get_insights_download_endpoint(cls, access_token: str, job_id: str) -> RequestBuilder:
return cls(access_token=access_token, resource=f"{job_id}/insights")
def __init__(self, access_token: str, resource: Optional[str] = "") -> None:
self._account_id = None
self._resource = resource
self._query_params = {"access_token": access_token}
self._body = None
def with_account_id(self, account_id: str) -> RequestBuilder:
self._account_id = account_id
return self
def with_limit(self, limit: int) -> RequestBuilder:
self._query_params["limit"] = limit
return self
def with_summary(self) -> RequestBuilder:
self._query_params["summary"] = "true"
return self
def with_fields(self, fields: List[str]) -> RequestBuilder:
self._query_params["fields"] = self._get_formatted_fields(fields)
return self
def with_next_page_token(self, next_page_token: str) -> RequestBuilder:
self._query_params["after"] = next_page_token
return self
def with_body(self, body: Union[str, bytes, Mapping[str, Any]]) -> RequestBuilder:
self._body = body
return self
def with_filtering(self, filters: List[Dict[str, Any]]):
self._query_params["filtering"] = self._get_formatted_filters(filters)
return self
def build(self) -> HttpRequest:
return HttpRequest(
url=f"https://graph.facebook.com/{API_VERSION}/{self._account_sub_path()}{self._resource}",
query_params=self._query_params,
body=self._body,
)
def _account_sub_path(self) -> str:
return f"act_{self._account_id}/" if self._account_id else ""
@staticmethod
def _get_formatted_fields(fields: List[str]) -> str:
return ",".join(fields)
@staticmethod
def _get_formatted_filters(filters: List[Dict[str, Any]]) -> str:
"""
Used to create an acceptable by fb query param from list of dict filters in string format
From:
[{"field": "ad.effective_status", "operator": "IN", "value": ["ACTIVE", "ARCHIVED"]}, {"field": "ad.updated_time", "operator": "GREATER_THAN", "value": 1672531200}]
To:
'[{"field":"ad.effective_status","operator":"IN","value":["ACTIVE","ARCHIVED"]},' '{"field":"ad.updated_time","operator":"GREATER_THAN","value":1672531200}]'
"""
field_filter = []
field_filters = []
for f in filters:
for key, value in f.items():
if isinstance(value, list):
value = ",".join([f'"{s}"' for s in value])
field_filter.append(f'"{key}":[{value}]')
elif isinstance(value, int):
field_filter.append(f'"{key}":{value}')
else:
field_filter.append(f'"{key}":"{value}"')
field_filters.append("{" + f'{",".join(field_filter)}' + "}")
field_filter = []
field_filters_str = f'[{",".join(field_filters)}]'
return field_filters_str
|
RequestBuilder
|
python
|
tensorflow__tensorflow
|
tensorflow/python/framework/device.py
|
{
"start": 2962,
"end": 5961
}
|
class ____(object):
"""Wraps a device specification (DeviceSpec or str) with merge functionality.
When called, this class will merge a node_def with its own spec. It also
exposes a `shortcut_string_merge` method which can significantly improve
performance of device placement.
"""
__slots__ = ["_spec"]
def __init__(self, spec):
if isinstance(spec, device_spec.DeviceSpecV2):
self._spec = spec
elif isinstance(spec, device_spec.DeviceSpecV1):
# Capture a snapshot of spec.
self._spec = spec.__class__.from_string(spec.to_string())
else:
self._spec = DeviceSpec.from_string(spec)
def __call__(self, node_def):
# In general a user may create a device function which takes into account
# arbitrary properties of an op. (For instance dynamically placing ops based
# on type.) So even though the standard DeviceSpec route only uses the
# device attribute, we take an entire node_def to maintain a consistent
# signature with general device functions.
current_device = DeviceSpec.from_string(node_def.device or "")
return self._spec.make_merged_spec(current_device)
def shortcut_string_merge(self, node_def):
"""Merge a node def without materializing a full DeviceSpec object.
Often a device merge is invoked in order to generate a string which can be
passed into the c api. In such a case, we can cache the
node_def.device -> merge_result_string
map, and in most cases avoid:
- Materializing a copy of self._spec (In the case of DeviceSpecV1)
- Materializing a DeviceSpec for node_def.device
- A DeviceSpec.merge_from invocation
In practice the cache hit rate for this function is very high, because the
number of invocations when iterating through the device stack is much
larger than the number of devices.
Args:
node_def: An Operation (or Operation-like) to merge device constraints
with self._spec
Returns:
A string containing the merged device specification.
"""
device = node_def.device or ""
merge_key = (self._spec, device)
result = _string_merge_cache.get(merge_key)
if result is None:
# This update is not atomic, however because the merge is stateless
# we don't need to lock when updating the cache.
result = self.__call__(node_def).to_string()
_string_merge_cache[merge_key] = result
return result
def __repr__(self):
return "{} (spec: {})".format(
super(MergeDevice, self).__repr__(), self._spec.to_string())
@property
def is_null_merge(self):
"""Indicate whether the wrapped spec is empty.
In the degenerate case where self._spec is an empty specification, a caller
may wish to skip a merge step entirely. (However this class does not have
enough information to make that determination.)
Returns:
A boolean indicating whether a device merge will be trivial.
"""
return not bool(self._spec.to_string())
|
MergeDevice
|
python
|
kamyu104__LeetCode-Solutions
|
Python/largest-1-bordered-square.py
|
{
"start": 33,
"end": 930
}
|
class ____(object):
def largest1BorderedSquare(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
top, left = [a[:] for a in grid], [a[:] for a in grid]
for i in xrange(len(grid)):
for j in xrange(len(grid[0])):
if not grid[i][j]:
continue
if i:
top[i][j] = top[i-1][j] + 1
if j:
left[i][j] = left[i][j-1] + 1
for l in reversed(xrange(1, min(len(grid), len(grid[0]))+1)):
for i in xrange(len(grid)-l+1):
for j in xrange(len(grid[0])-l+1):
if min(top[i+l-1][j],
top[i+l-1][j+l-1],
left[i][j+l-1],
left[i+l-1][j+l-1]) >= l:
return l*l
return 0
|
Solution
|
python
|
PrefectHQ__prefect
|
tests/server/models/test_deployments.py
|
{
"start": 482,
"end": 15185
}
|
class ____:
async def test_create_deployment_succeeds(self, session, flow):
deployment = await models.deployments.create_deployment(
session=session,
deployment=schemas.core.Deployment(
name="My Deployment",
flow_id=flow.id,
parameters={"foo": "bar"},
tags=["foo", "bar"],
),
)
assert deployment.name == "My Deployment"
assert deployment.flow_id == flow.id
assert deployment.parameters == {"foo": "bar"}
assert deployment.tags == ["foo", "bar"]
assert deployment.global_concurrency_limit is None
async def test_creating_a_deployment_with_existing_work_queue_is_ok(
self, session, flow, work_queue
):
# work_queue fixture creates a work queue with name "wq-1"
wq = await models.work_queues.read_work_queue_by_name(
session=session, name=work_queue.name
)
assert wq == work_queue
await models.deployments.create_deployment(
session=session,
deployment=schemas.core.Deployment(
name="d1",
work_queue_name=work_queue.name,
flow_id=flow.id,
),
)
await session.commit()
async def test_creating_a_deployment_does_not_create_work_queue(
self, session, flow
):
# There was an issue where create_deployment always created a work queue when its name was provided.
# This test ensures that this no longer happens. See: https://github.com/PrefectHQ/prefect/pull/9046
wq = await models.work_queues.read_work_queue_by_name(
session=session, name="wq-1"
)
assert wq is None
await models.deployments.create_deployment(
session=session,
deployment=schemas.core.Deployment(
name="d1",
work_queue_name="wq-1",
flow_id=flow.id,
),
)
await session.commit()
wq = await models.work_queues.read_work_queue_by_name(
session=session, name="wq-1"
)
assert wq is None
async def test_create_deployment_with_work_pool(self, session, flow, work_queue):
deployment = await models.deployments.create_deployment(
session=session,
deployment=schemas.core.Deployment(
name="My Deployment",
flow_id=flow.id,
work_queue_id=work_queue.id,
),
)
assert deployment.work_queue_id == work_queue.id
async def test_create_deployment_updates_existing_deployment(
self,
session,
flow,
):
openapi_schema = {
"title": "Parameters",
"type": "object",
"properties": {
"foo": {"title": "foo", "default": "Will", "type": "string"}
},
}
deployment = await models.deployments.create_deployment(
session=session,
deployment=schemas.core.Deployment(
name="My Deployment",
flow_id=flow.id,
parameter_openapi_schema=openapi_schema,
),
)
original_update_time = deployment.updated
assert deployment.name == "My Deployment"
assert deployment.flow_id == flow.id
assert deployment.parameters == {}
assert deployment.parameter_openapi_schema == openapi_schema
assert deployment.tags == []
await anyio.sleep(1) # Sleep so update time is easy to differentiate
schedule = schemas.schedules.IntervalSchedule(
interval=datetime.timedelta(days=1)
)
openapi_schema["properties"]["new"] = {
"title": "new",
"default": True,
"type": "bool",
}
deployment = await models.deployments.create_deployment(
session=session,
deployment=schemas.core.Deployment(
name="My Deployment",
flow_id=flow.id,
schedules=[schemas.core.DeploymentSchedule(schedule=schedule)],
parameters={"foo": "bar"},
parameter_openapi_schema=openapi_schema,
tags=["foo", "bar"],
),
)
assert deployment.name == "My Deployment"
assert deployment.flow_id == flow.id
assert not deployment.paused
assert len(deployment.schedules) == 1
assert deployment.schedules[0].schedule == schedule
assert deployment.parameters == {"foo": "bar"}
assert deployment.parameter_openapi_schema == openapi_schema
assert deployment.tags == ["foo", "bar"]
assert deployment.updated > original_update_time
async def test_create_deployment_with_schedule(self, session, flow, flow_function):
schedule = schemas.schedules.IntervalSchedule(
interval=datetime.timedelta(days=1)
)
deployment = await models.deployments.create_deployment(
session=session,
deployment=schemas.core.Deployment(
name="My Deployment",
flow_id=flow.id,
schedules=[schemas.core.DeploymentSchedule(schedule=schedule)],
),
)
assert deployment.name == "My Deployment"
assert deployment.flow_id == flow.id
assert len(deployment.schedules) == 1
assert deployment.schedules[0].schedule == schedule
async def test_create_deployment_with_created_by(self, session, flow):
created_by = schemas.core.CreatedBy(
id=uuid4(), type="A-TYPE", display_value="creator-of-things"
)
deployment = await models.deployments.create_deployment(
session=session,
deployment=schemas.core.Deployment(
name="My New Deployment",
flow_id=flow.id,
created_by=created_by,
tags=["tag1"],
),
)
assert deployment.created_by
assert deployment.created_by.id == created_by.id
assert deployment.created_by.display_value == created_by.display_value
assert deployment.created_by.type == created_by.type
# created_by unaffected by upsert
new_created_by = schemas.core.CreatedBy(
id=uuid4(), type="B-TYPE", display_value="other-creator-of-things"
)
updated_deployment = await models.deployments.create_deployment(
session=session,
deployment=schemas.core.Deployment(
name="My New Deployment",
flow_id=flow.id,
created_by=new_created_by,
tags=["tag2"],
),
)
# confirm upsert
assert deployment.id == updated_deployment.id
assert updated_deployment.tags == ["tag2"]
# confirm created_by unaffected
assert updated_deployment.created_by.id == created_by.id
assert updated_deployment.created_by.display_value == created_by.display_value
assert updated_deployment.created_by.type == created_by.type
async def test_create_deployment_with_updated_by(self, session, flow):
updated_by = schemas.core.UpdatedBy(
id=uuid4(), type="A-TYPE", display_value="updator-of-things"
)
deployment = await models.deployments.create_deployment(
session=session,
deployment=schemas.core.Deployment(
name="My New Deployment",
flow_id=flow.id,
updated_by=updated_by,
),
)
assert deployment.updated_by
assert deployment.updated_by.id == updated_by.id
assert deployment.updated_by.display_value == updated_by.display_value
assert deployment.updated_by.type == updated_by.type
# updated_by updated via upsert
new_updated_by = schemas.core.UpdatedBy(
id=uuid4(), type="B-TYPE", display_value="other-updator-of-things"
)
updated_deployment = await models.deployments.create_deployment(
session=session,
deployment=schemas.core.Deployment(
name="My New Deployment",
flow_id=flow.id,
updated_by=new_updated_by,
),
)
# confirm updated_by upsert
assert deployment.id == updated_deployment.id
assert updated_deployment.updated_by.id == new_updated_by.id
assert (
updated_deployment.updated_by.display_value == new_updated_by.display_value
)
assert updated_deployment.updated_by.type == new_updated_by.type
async def test_create_deployment_with_concurrency_limit(
self, session: AsyncSession, flow: orm_models.Flow
):
deployment = await models.deployments.create_deployment(
session=session,
deployment=schemas.core.Deployment(
name="My Deployment",
flow_id=flow.id,
concurrency_limit=2,
),
)
assert deployment is not None
assert deployment._concurrency_limit == 2
assert deployment.global_concurrency_limit is not None
assert deployment.global_concurrency_limit.limit == 2
async def test_create_deployment_with_global_concurrency_limit_id(
self, session: AsyncSession, flow: orm_models.Flow
):
# Create a global concurrency limit first
global_limit = await models.concurrency_limits_v2.create_concurrency_limit(
session=session,
concurrency_limit=schemas.core.ConcurrencyLimitV2(
name="test-limit",
limit=3,
),
)
deployment = await models.deployments.create_deployment(
session=session,
deployment=schemas.core.Deployment(
name="My Deployment",
flow_id=flow.id,
concurrency_limit_id=global_limit.id,
),
)
assert deployment is not None
assert deployment.concurrency_limit_id == global_limit.id
assert deployment.global_concurrency_limit.id == global_limit.id
assert deployment.global_concurrency_limit.limit == 3
async def test_create_deployment_with_nonexistent_concurrency_limit_id(
self, session: AsyncSession, flow: orm_models.Flow
):
nonexistent_id = uuid4()
with pytest.raises(sa.exc.IntegrityError):
await models.deployments.create_deployment(
session=session,
deployment=schemas.core.Deployment(
name="My Deployment",
flow_id=flow.id,
concurrency_limit_id=nonexistent_id,
),
)
async def test_create_deployment_retains_concurrency_limit_if_not_provided_on_upsert(
self,
session: AsyncSession,
deployment: orm_models.Deployment,
):
"""Ensure that old prefect clients that don't know about concurrency limits can still use them server-side.
This means that if a deployment has a concurrency limit (possibly created through the Cloud UI), but the client
is an old version that doesn't know about concurrency limits, then when using `prefect deploy`, the old client
should not remove the concurrency limit from the existing deployment.
"""
await models.deployments.update_deployment(
session,
deployment.id,
schemas.actions.DeploymentUpdate(concurrency_limit=5),
)
await session.commit()
await session.refresh(deployment)
gcl_id = deployment.concurrency_limit_id
updated_deployment = await models.deployments.create_deployment(
session,
schemas.core.Deployment(
id=deployment.id,
name=deployment.name,
flow_id=deployment.flow_id,
# no explicit concurrency_limit set
),
)
assert updated_deployment is not None
assert updated_deployment.global_concurrency_limit is not None
assert updated_deployment.global_concurrency_limit.limit == 5
assert updated_deployment.concurrency_limit_id == gcl_id
assert updated_deployment._concurrency_limit == 5
assert (
await models.concurrency_limits_v2.read_concurrency_limit(session, gcl_id)
is not None
), "Expected the concurrency limit to still exist, but it does not"
async def test_create_deployment_can_remove_concurrency_limit_on_upsert(
self,
session: AsyncSession,
deployment: orm_models.Deployment,
):
await models.deployments.update_deployment(
session,
deployment.id,
schemas.actions.DeploymentUpdate(concurrency_limit=5),
)
await session.commit()
assert deployment.global_concurrency_limit is not None
assert deployment.global_concurrency_limit.limit == 5
gcl_id = deployment.concurrency_limit_id
updated_deployment = await models.deployments.create_deployment(
session=session,
deployment=schemas.core.Deployment(
id=deployment.id,
name=deployment.name,
flow_id=deployment.flow_id,
concurrency_limit=None,
),
)
assert updated_deployment.global_concurrency_limit is None
assert updated_deployment.concurrency_limit_id is None
assert updated_deployment._concurrency_limit is None
assert (
await models.concurrency_limits_v2.read_concurrency_limit(session, gcl_id)
is None
), "Expected the concurrency limit to be deleted, but it was not"
async def test_create_deployment_with_concurrency_options(self, session, flow):
concurrency_options = schemas.core.ConcurrencyOptions(
collision_strategy="ENQUEUE",
)
deployment = await models.deployments.create_deployment(
session=session,
deployment=schemas.core.Deployment(
name="My Deployment",
flow_id=flow.id,
concurrency_limit=42,
concurrency_options=concurrency_options,
),
)
assert deployment._concurrency_limit == 42
assert deployment.global_concurrency_limit.limit == 42
assert (
deployment.concurrency_options.collision_strategy
== concurrency_options.collision_strategy
)
|
TestCreateDeployment
|
python
|
pydantic__pydantic
|
pydantic/v1/errors.py
|
{
"start": 3810,
"end": 3940
}
|
class ____(PydanticTypeError):
code = 'none.not_allowed'
msg_template = 'none is not an allowed value'
|
NoneIsNotAllowedError
|
python
|
Lightning-AI__lightning
|
src/lightning/pytorch/trainer/connectors/signal_connector.py
|
{
"start": 637,
"end": 1211
}
|
class ____:
def __init__(self, signal_handlers: Union[list[_HANDLER], _HANDLER]) -> None:
if not isinstance(signal_handlers, list):
signal_handlers = [signal_handlers]
self.signal_handlers = signal_handlers
def __call__(self, signum: _SIGNUM, frame: FrameType) -> None:
for signal_handler in self.signal_handlers:
if isinstance(signal_handler, int):
signal_handler = signal.getsignal(signal_handler)
if callable(signal_handler):
signal_handler(signum, frame)
|
_HandlersCompose
|
python
|
huggingface__transformers
|
src/transformers/models/video_llama_3/modular_video_llama_3.py
|
{
"start": 10470,
"end": 14304
}
|
class ____(SiglipAttention):
def __init__(self, config):
super().__init__(config)
self.num_key_value_groups = 1
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
del self.scale
del self.dropout
def forward(
self,
hidden_states: torch.Tensor,
cu_seqlens: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""
Args:
hidden_states (`torch.Tensor`):
Input to the layer of shape `(seq_len, embed_dim)`.
cu_seqlens (`torch.Tensor` of shape `(num_images_or_videos + 1,)`):
The cumulative sequence lengths of each image or video feature.
position_embeddings (`tuple(torch.Tensor, torch.Tensor)` of shape `(num_patches, head_dim // 2)`):
The cosine and sine position embeddings for vision attention.
"""
seq_length = hidden_states.shape[0]
query_states = self.q_proj(hidden_states).view(seq_length, self.num_heads, self.head_dim)
key_states = self.k_proj(hidden_states).view(seq_length, self.num_heads, self.head_dim)
value_states = self.v_proj(hidden_states).view(seq_length, self.num_heads, self.head_dim)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb_vision(query_states, key_states, cos, sin)
query_states = query_states.transpose(0, 1).unsqueeze(0)
key_states = key_states.transpose(0, 1).unsqueeze(0)
value_states = value_states.transpose(0, 1).unsqueeze(0)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
if self.config._attn_implementation == "flash_attention_2":
# Flash Attention 2: Use cu_seqlens for variable length attention
max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max()
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask=None,
scaling=self.scaling,
dropout=0.0 if not self.training else self.attention_dropout,
cu_seq_lens_q=cu_seqlens,
cu_seq_lens_k=cu_seqlens,
max_length_q=max_seqlen,
max_length_k=max_seqlen,
is_causal=False,
**kwargs,
)
else:
# Other implementations: Process each chunk separately
lengths = cu_seqlens[1:] - cu_seqlens[:-1]
splits = [
torch.split(tensor, lengths.tolist(), dim=2) for tensor in (query_states, key_states, value_states)
]
attn_outputs, attn_weights = [], []
for q, k, v in zip(*splits):
attn_output, attn_weight = attention_interface(
self,
q,
k,
v,
attention_mask=None,
scaling=self.scaling,
dropout=0.0 if not self.training else self.attention_dropout,
is_causal=False,
**kwargs,
)
attn_outputs.append(attn_output)
attn_weights.append(attn_weight)
attn_output = torch.cat(attn_outputs, dim=1)
attn_output = attn_output.reshape(seq_length, -1).contiguous()
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
|
VideoLlama3VisionAttention
|
python
|
huggingface__transformers
|
src/transformers/models/olmo3/modeling_olmo3.py
|
{
"start": 19460,
"end": 22558
}
|
class ____(Olmo3PreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = Olmo3Model(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
Example:
```python
>>> from transformers import AutoTokenizer, Olmo3ForCausalLM
>>> model = Olmo3ForCausalLM.from_pretrained("meta-olmo3/Olmo3-2-7b-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-olmo3/Olmo3-2-7b-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = ["Olmo3ForCausalLM", "Olmo3Model", "Olmo3PreTrainedModel"]
|
Olmo3ForCausalLM
|
python
|
pytorch__pytorch
|
test/torch_np/numpy_tests/linalg/test_linalg.py
|
{
"start": 23348,
"end": 23438
}
|
class ____(SVDHermitianCases, SVDBaseTests, TestCase):
hermitian = True
|
TestSVDHermitian
|
python
|
pypa__pip
|
src/pip/_internal/models/index.py
|
{
"start": 22,
"end": 1030
}
|
class ____:
"""Represents a Package Index and provides easier access to endpoints"""
__slots__ = ["url", "netloc", "simple_url", "pypi_url", "file_storage_domain"]
def __init__(self, url: str, file_storage_domain: str) -> None:
super().__init__()
self.url = url
self.netloc = urllib.parse.urlsplit(url).netloc
self.simple_url = self._url_for_path("simple")
self.pypi_url = self._url_for_path("pypi")
# This is part of a temporary hack used to block installs of PyPI
# packages which depend on external urls only necessary until PyPI can
# block such packages themselves
self.file_storage_domain = file_storage_domain
def _url_for_path(self, path: str) -> str:
return urllib.parse.urljoin(self.url, path)
PyPI = PackageIndex("https://pypi.org/", file_storage_domain="files.pythonhosted.org")
TestPyPI = PackageIndex(
"https://test.pypi.org/", file_storage_domain="test-files.pythonhosted.org"
)
|
PackageIndex
|
python
|
django__django
|
tests/db_functions/text/test_replace.py
|
{
"start": 157,
"end": 2604
}
|
class ____(TestCase):
@classmethod
def setUpTestData(cls):
Author.objects.create(name="George R. R. Martin")
Author.objects.create(name="J. R. R. Tolkien")
def test_replace_with_empty_string(self):
qs = Author.objects.annotate(
without_middlename=Replace(F("name"), Value("R. R. "), Value("")),
)
self.assertQuerySetEqual(
qs,
[
("George R. R. Martin", "George Martin"),
("J. R. R. Tolkien", "J. Tolkien"),
],
transform=lambda x: (x.name, x.without_middlename),
ordered=False,
)
def test_case_sensitive(self):
qs = Author.objects.annotate(
same_name=Replace(F("name"), Value("r. r."), Value(""))
)
self.assertQuerySetEqual(
qs,
[
("George R. R. Martin", "George R. R. Martin"),
("J. R. R. Tolkien", "J. R. R. Tolkien"),
],
transform=lambda x: (x.name, x.same_name),
ordered=False,
)
def test_replace_expression(self):
qs = Author.objects.annotate(
same_name=Replace(
Concat(Value("Author: "), F("name")), Value("Author: "), Value("")
),
)
self.assertQuerySetEqual(
qs,
[
("George R. R. Martin", "George R. R. Martin"),
("J. R. R. Tolkien", "J. R. R. Tolkien"),
],
transform=lambda x: (x.name, x.same_name),
ordered=False,
)
def test_update(self):
Author.objects.update(
name=Replace(F("name"), Value("R. R. "), Value("")),
)
self.assertQuerySetEqual(
Author.objects.all(),
[
("George Martin"),
("J. Tolkien"),
],
transform=lambda x: x.name,
ordered=False,
)
def test_replace_with_default_arg(self):
# The default replacement is an empty string.
qs = Author.objects.annotate(same_name=Replace(F("name"), Value("R. R. ")))
self.assertQuerySetEqual(
qs,
[
("George R. R. Martin", "George Martin"),
("J. R. R. Tolkien", "J. Tolkien"),
],
transform=lambda x: (x.name, x.same_name),
ordered=False,
)
|
ReplaceTests
|
python
|
walkccc__LeetCode
|
solutions/1208. Get Equal Substrings Within Budget/1208.py
|
{
"start": 0,
"end": 269
}
|
class ____:
def equalSubstring(self, s: str, t: str, maxCost: int) -> int:
j = 0
for i in range(len(s)):
maxCost -= abs(ord(s[i]) - ord(t[i]))
if maxCost < 0:
maxCost += abs(ord(s[j]) - ord(t[j]))
j += 1
return len(s) - j
|
Solution
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 860660,
"end": 861052
}
|
class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field(ProjectV2SortBy, graphql_name="node")
"""The item at the end of the edge."""
|
ProjectV2SortByEdge
|
python
|
scikit-image__scikit-image
|
src/skimage/transform/_geometric.py
|
{
"start": 53606,
"end": 61705
}
|
class ____(_GeometricTransform):
"""Piecewise affine transformation.
Control points are used to define the mapping. The transform is based on
a Delaunay triangulation of the points to form a mesh. Each triangle is
used to find a local affine transform.
Attributes
----------
affines : list of AffineTransform objects
Affine transformations for each triangle in the mesh.
inverse_affines : list of AffineTransform objects
Inverse affine transformations for each triangle in the mesh.
Examples
--------
>>> import numpy as np
>>> import skimage as ski
Define a transformation by estimation:
>>> src = [[-12.3705, -10.5075],
... [-10.7865, 15.4305],
... [8.6985, 10.8675],
... [11.4975, -9.5715],
... [7.8435, 7.4835],
... [-5.3325, 6.5025],
... [6.7905, -6.3765],
... [-6.1695, -0.8235]]
>>> dst = [[0, 0],
... [0, 5800],
... [4900, 5800],
... [4900, 0],
... [4479, 4580],
... [1176, 3660],
... [3754, 790],
... [1024, 1931]]
>>> tform = ski.transform.PiecewiseAffineTransform.from_estimate(src, dst)
Calling the transform applies the transformation to the points:
>>> np.allclose(tform(src), dst)
True
You can apply the inverse transform:
>>> np.allclose(tform.inverse(dst), src)
True
The estimation can fail - for example, if all the input or output points
are the same. If this happens, you will get a transform that is not
"truthy" - meaning that ``bool(tform)`` is ``False``:
>>> # A successfully estimated model is truthy (applying ``bool()``
>>> # gives ``True``):
>>> if tform:
... print("Estimation succeeded.")
Estimation succeeded.
>>> # Not so for a degenerate transform with identical points.
>>> bad_src = [[1, 1]] * 6 + src[6:]
>>> bad_tform = ski.transform.PiecewiseAffineTransform.from_estimate(
... bad_src, dst)
>>> if not bad_tform:
... print("Estimation failed.")
Estimation failed.
Trying to use this failed estimation transform result will give a suitable
error:
>>> bad_tform.params # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
FailedEstimationAccessError: No attribute "params" for failed estimation ...
"""
def __init__(self):
self._tesselation = None
self._inverse_tesselation = None
self.affines = None
self.inverse_affines = None
@classmethod
def from_estimate(cls, src, dst):
"""Estimate the transformation from a set of corresponding points.
Number of source and destination coordinates must match.
Parameters
----------
src : (N, D) array_like
Source coordinates.
dst : (N, D) array_like
Destination coordinates.
Returns
-------
tf : Self or ``FailedEstimation``
An instance of the transformation if the estimation succeeded.
Otherwise, we return a special ``FailedEstimation`` object to
signal a failed estimation. Testing the truth value of the failed
estimation object will return ``False``. E.g.
.. code-block:: python
tf = PiecewiseAffineTransform.from_estimate(...)
if not tf:
raise RuntimeError(f"Failed estimation: {tf}")
"""
return super().from_estimate(src, dst)
def _estimate(self, src, dst):
src = np.asarray(src)
dst = np.asarray(dst)
N, D = src.shape
# forward piecewise affine
# triangulate input positions into mesh
self._tesselation = spatial.Delaunay(src)
fail_matrix = np.full((D + 1, D + 1), np.nan)
# find affine mapping from source positions to destination
self.affines = []
messages = []
for i, tri in enumerate(self._tesselation.simplices):
affine = AffineTransform.from_estimate(src[tri, :], dst[tri, :])
if not affine:
messages.append(f'Failure at forward simplex {i}: {affine}')
affine = AffineTransform(fail_matrix.copy())
self.affines.append(affine)
# inverse piecewise affine
# triangulate input positions into mesh
self._inverse_tesselation = spatial.Delaunay(dst)
# find affine mapping from source positions to destination
self.inverse_affines = []
for i, tri in enumerate(self._inverse_tesselation.simplices):
affine = AffineTransform.from_estimate(dst[tri, :], src[tri, :])
if not affine:
messages.append(f'Failure at inverse simplex {i}: {affine}')
affine = AffineTransform(fail_matrix.copy())
self.inverse_affines.append(affine)
return '; '.join(messages) if messages else None
def __call__(self, coords):
"""Apply forward transformation.
Coordinates outside of the mesh will be set to `- 1`.
Parameters
----------
coords : (N, D) array_like
Source coordinates.
Returns
-------
coords : (N, 2) array
Transformed coordinates.
"""
coords = np.asarray(coords)
out = np.empty_like(coords, np.float64)
# determine triangle index for each coordinate
simplex = self._tesselation.find_simplex(coords)
# coordinates outside of mesh
out[simplex == -1, :] = -1
for index in range(len(self._tesselation.simplices)):
# affine transform for triangle
affine = self.affines[index]
# all coordinates within triangle
index_mask = simplex == index
out[index_mask, :] = affine(coords[index_mask, :])
return out
@property
def inverse(self):
"""Return a transform object representing the inverse."""
tform = type(self)()
# Copy parameters (None or list) for safety.
tform._tesselation = copy(self._inverse_tesselation)
tform._inverse_tesselation = copy(self._tesselation)
tform.affines = copy(self.inverse_affines)
tform.inverse_affines = copy(self.affines)
return tform
@classmethod
def identity(cls, dimensionality=None):
"""Identity transform
Parameters
----------
dimensionality : optional
This transform does not use the `dimensionality` parameter, so the
value is ignored. The parameter exists for compatibility with
other transforms.
Returns
-------
tform : transform
Transform such that ``np.all(tform(pts) == pts)``.
"""
return cls()
@_deprecate_estimate
def estimate(self, src, dst):
"""Estimate the transformation from a set of corresponding points.
Number of source and destination coordinates must match.
Parameters
----------
src : (N, D) array_like
Source coordinates.
dst : (N, D) array_like
Destination coordinates.
Returns
-------
success : bool
True, if all pieces of the model are successfully estimated.
"""
return self._estimate(src, dst) is None
def _euler_rotation_matrix(angles, degrees=False):
"""Produce an Euler rotation matrix from the given intrinsic rotation angles
for the axes x, y and z.
Parameters
----------
angles : array of float, shape (3,)
The transformation angles in radians.
degrees : bool, optional
If True, then the given angles are assumed to be in degrees. Default is False.
Returns
-------
R : array of float, shape (3, 3)
The Euler rotation matrix.
"""
return spatial.transform.Rotation.from_euler(
'XYZ', angles=angles, degrees=degrees
).as_matrix()
|
PiecewiseAffineTransform
|
python
|
ansible__ansible
|
test/units/parsing/vault/test_vault.py
|
{
"start": 2794,
"end": 4529
}
|
class ____(unittest.TestCase):
def test(self):
vaulttext_envelope = u"""$ANSIBLE_VAULT;1.1;AES256
33363965326261303234626463623963633531343539616138316433353830356566396130353436
3562643163366231316662386565383735653432386435610a306664636137376132643732393835
63383038383730306639353234326630666539346233376330303938323639306661313032396437
6233623062366136310a633866373936313238333730653739323461656662303864663666653563
3138"""
b_vaulttext_envelope = to_bytes(vaulttext_envelope, errors='strict', encoding='utf-8')
b_vaulttext, b_version, cipher_name, vault_id = vault.parse_vaulttext_envelope(b_vaulttext_envelope)
res = vault.parse_vaulttext(b_vaulttext)
self.assertIsInstance(res[0], bytes)
self.assertIsInstance(res[1], bytes)
self.assertIsInstance(res[2], bytes)
def test_non_hex(self):
vaulttext_envelope = u"""$ANSIBLE_VAULT;1.1;AES256
3336396J326261303234626463623963633531343539616138316433353830356566396130353436
3562643163366231316662386565383735653432386435610a306664636137376132643732393835
63383038383730306639353234326630666539346233376330303938323639306661313032396437
6233623062366136310a633866373936313238333730653739323461656662303864663666653563
3138"""
b_vaulttext_envelope = to_bytes(vaulttext_envelope, errors='strict', encoding='utf-8')
b_vaulttext, b_version, cipher_name, vault_id = vault.parse_vaulttext_envelope(b_vaulttext_envelope)
self.assertRaisesRegex(vault.AnsibleVaultFormatError,
'.*Vault format unhexlify error.*Non-hexadecimal digit found',
vault.parse_vaulttext,
b_vaulttext_envelope)
|
TestParseVaulttext
|
python
|
django__django
|
tests/model_fields/models.py
|
{
"start": 1267,
"end": 1587
}
|
class ____(models.Model):
CHOICES = {
"Group 1": {
1: "First",
2: "Second",
},
"Group 2": (
(3, "Third"),
(4, "Fourth"),
),
0: "Other",
5: _("translated"),
}
c = models.IntegerField(choices=CHOICES, null=True)
|
Whiz
|
python
|
jina-ai__jina
|
tests/docker_compose/multiprotocol-gateway/multiprotocol_gateway.py
|
{
"start": 308,
"end": 365
}
|
class ____(BaseModel):
protocol: str
|
DummyResponseModel
|
python
|
Textualize__rich
|
benchmarks/benchmarks.py
|
{
"start": 3921,
"end": 4486
}
|
class ____:
def setup(self):
self.console = Console(
file=StringIO(), color_system="truecolor", legacy_windows=False, width=100
)
def time_pretty(self):
pretty = Pretty(snippets.PYTHON_DICT)
self.console.print(pretty)
def time_pretty_indent_guides(self):
pretty = Pretty(snippets.PYTHON_DICT, indent_guides=True)
self.console.print(pretty)
def time_pretty_justify_center(self):
pretty = Pretty(snippets.PYTHON_DICT, justify="center")
self.console.print(pretty)
|
PrettySuite
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.