n_words
int64
3
1.95k
n_ast_errors
int64
0
2
complexity
int64
1
151
nloc
int64
2
546
path
stringlengths
8
125
id
int64
280
339k
commit_message
stringlengths
3
18.1k
repo
stringlengths
3
28
ast_levels
int64
4
28
language
stringclasses
1 value
vocab_size
int64
3
677
file_name
stringlengths
5
67
code
stringlengths
101
24k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
2.76k
token_counts
int64
7
3.77k
url
stringlengths
31
61
n_whitespaces
int64
4
13.9k
random_cut
stringlengths
21
13.9k
n_identifiers
int64
1
157
n_ast_nodes
int64
10
3.6k
fun_name
stringlengths
3
72
9
0
1
8
scripts-dev/check_pydantic_models.py
249,383
Reject non-strict types in Pydantic models (#13502)
synapse
10
Python
9
check_pydantic_models.py
def test_annotation_without_strict_raises(self) -> None: with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): run_test_snippet( )
ba8938b090c7e1908cfa4feac75f08f3bc1183e8
23
https://github.com/matrix-org/synapse.git
53
def test_annotation_without_strict_raises(self) -> None: with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): run_test_snippet(
6
43
test_annotation_without_strict_raises
62
0
1
15
pandas/tests/series/indexing/test_setitem.py
163,050
TST: tests for setitem-like casting issues (#45154)
pandas
11
Python
29
test_setitem.py
def test_37477(): # fixed by GH#45121 orig = DataFrame({"A": [1, 2, 3], "B": [3, 4, 5]}) expected = DataFrame({"A": [1, 2, 3], "B": [3, 1.2, 5]}) df = orig.copy() df.at[1, "B"] = 1.2 tm.assert_frame_equal(df, expected) df = orig.copy() df.loc[1, "B"] = 1.2 tm.assert_frame_equal(df, expected) df = orig.copy() df.iat[1, 1] = 1.2 tm.assert_frame_equal(df, expected) df = orig.copy() df.iloc[1, 1] = 1.2 tm.assert_frame_equal(df, expected)
d70b95bc0e17d18bbefee8ac8a07e4fa5f33513c
166
https://github.com/pandas-dev/pandas.git
106
def test_37477(): # fixed by GH#45121 orig = DataFrame({"A": [1, 2, 3], "B": [3, 4, 5]}) expe
12
243
test_37477
36
0
1
11
tests/util/test_treecache.py
250,034
Add missing types to tests.util. (#14597) Removes files under tests.util from the ignored by list, then fully types all tests/util/*.py files.
synapse
11
Python
25
test_treecache.py
def test_pop_twolevel(self) -> None: cache = TreeCache() cache[("a", "a")] = "AA" cache[("a", "b")] = "AB" cache[("b", "a")] = "BA" self.assertEqual(cache.pop(("a", "a")), "AA") self.assertEqual(cache.get(("a", "a")), None) self.assertEqual(cache.get(("a", "b")), "AB") self.assertEqual(cache.pop(("b", "a")), "BA") self.assertEqual(cache.pop(("b", "a")), None) self.assertEqual(len(cache), 1)
acea4d7a2ff61b5beda420b54a8451088060a8cd
138
https://github.com/matrix-org/synapse.git
105
def test_pop_twolevel(self) -> None: cache = TreeCache() cache[("a", "a")] = "AA" cache[("a", "b")] = "AB" cache[("b", "a")] = "BA" self.assertEqual(cache.pop(("a", "a")), "AA") self.assertEqual(cache.get(("a", "a")), None)
8
249
test_pop_twolevel
49
0
1
20
tests/test_event_auth.py
248,552
EventAuthTestCase: build events for the right room version In practice, when we run the auth rules, all of the events have the right room version. Let's stop building Room V1 events for these tests and use the right version.
synapse
10
Python
30
test_event_auth.py
def test_random_users_cannot_send_state_before_first_pl(self): creator = "@creator:example.com" joiner = "@joiner:example.com" auth_events = [ _create_event(RoomVersions.V1, creator), _join_event(RoomVersions.V1, creator), _join_event(RoomVersions.V1, joiner), ] # creator should be able to send state event_auth.check_auth_rules_for_event( RoomVersions.V1, _random_state_event(RoomVersions.V1, creator), auth_events, ) # joiner should not be able to send state self.assertRaises( AuthError, event_auth.check_auth_rules_for_event, RoomVersions.V1, _random_state_event(RoomVersions.V1, joiner), auth_events, )
2959184a42398277ff916206235b844a8f7be5d7
89
https://github.com/matrix-org/synapse.git
247
def test_random_users_cannot_send_state_before_first_pl(self): creator = "@creator:example.com" joiner = "@joiner:example.com" auth_events = [ _create_event(RoomVersions.V1, creator), _join_event(RoomVersions.V1, creator),
14
135
test_random_users_cannot_send_state_before_first_pl
33
0
2
8
scapy/contrib/automotive/scanner/enumerator.py
209,880
Improve reduce function for Automotive Scanner Enumerators (#3740)
scapy
11
Python
28
enumerator.py
def _get_retry_iterator(self, state): # type: (EcuState) -> Iterable[Packet] retry_entry = self._retry_pkt[state] if isinstance(retry_entry, Packet): log_automotive.debug("Provide retry packet") return [retry_entry] else: log_automotive.debug("Provide retry iterator") # assume self.retry_pkt is a generator or list return retry_entry
799f272bc04c361841d01e9c0087950e0eb86610
43
https://github.com/secdev/scapy.git
115
def _get_retry_iterator(self, state): # type: (EcuState) -> Iterable[Packet] retry_entry = self._retry_pkt[state] if isinstance(retry_entry, Packet): log_automotive.debug("Provide retry packet") return [retry_entry] else: log_automotive.debug("Provide retry iterator") # assume self.retry_pkt is a gene
9
74
_get_retry_iterator
152
0
10
37
src/transformers/trainer.py
35,175
fix bug for the log of RNG states are not properly loaded exception. (#15638) Co-authored-by: muz <muzhi1991@limuzhideMBP-2.lan>
transformers
18
Python
92
trainer.py
def _load_rng_state(self, checkpoint): # Load RNG states from `checkpoint` if checkpoint is None: return local_rank = xm.get_local_ordinal() if is_torch_tpu_available() else self.args.local_rank if local_rank != -1: rng_file = os.path.join(checkpoint, f"rng_state_{local_rank}.pth") if not os.path.isfile(os.path.join(checkpoint, rng_file)): logger.info( f"Didn't find an RNG file for process {local_rank}, if you are resuming a training that " "wasn't launched in a distributed fashion, reproducibility is not guaranteed." ) return else: rng_file = os.path.join(checkpoint, "rng_state.pth") if not os.path.isfile(rng_file): logger.info( "Didn't find an RNG file, if you are resuming a training that was launched in a distributed " "fashion, reproducibility is not guaranteed." ) return checkpoint_rng_state = torch.load(rng_file) random.setstate(checkpoint_rng_state["python"]) np.random.set_state(checkpoint_rng_state["numpy"]) torch.random.set_rng_state(checkpoint_rng_state["cpu"]) if torch.cuda.is_available(): if self.args.local_rank != -1: torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"]) else: try: torch.cuda.random.set_rng_state_all(checkpoint_rng_state["cuda"]) except Exception as e: logger.info( f"Didn't manage to set back the RNG states of the GPU because of the following error:\n {e}" "\nThis won't yield the same results as if the training had not been interrupted." ) if is_torch_tpu_available(): xm.set_rng_state(checkpoint_rng_state["xla"])
e314c19a3ff52b39f33453ab6c7f7b3c6c12413e
226
https://github.com/huggingface/transformers.git
630
def _load_rng_state(self, checkpoint): # Load RNG states from `checkpoint` if checkpoint is None: return local_rank = xm.get_local_ordinal() if is_torch_tpu_available() else self.args.local_rank if local_rank != -1: rng_file = os.path.join(checkpoint, f"rng_state_{local_rank}.pth") if not os.path.isfile(os.path.join(checkpoint, rng_file)): logger.info( f"Didn't find an RNG file for process {local_rank}, if you are resuming a training that " "wasn't launched in a distributed fashion, reproducibility is not guaranteed." ) return else: rng_file = os.path.join(checkpoint, "rng_state.pth") if not os.path.isfile(rng_file): logger.info( "Didn't find an RNG file, if you are resuming a training that was launched in a distributed " "fashion, reproducibility is not guaranteed." ) return checkpoint_rng_state = torch.load(rng_file) random.setstate(checkpoint_rng_state["python"]) np.random.set_state(checkpoint_rng_state["numpy"]) torch.random.set_rng_state(checkpoint_rng_state["cpu"]) if torch.cuda.is_available(): if self.args.local_rank != -1: torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"]) else:
28
402
_load_rng_state
42
0
3
14
wagtail/images/__init__.py
75,022
Reformat with black
wagtail
12
Python
36
__init__.py
def get_image_model(): from django.apps import apps model_string = get_image_model_string() try: return apps.get_model(model_string, require_ready=False) except ValueError: raise ImproperlyConfigured( "WAGTAILIMAGES_IMAGE_MODEL must be of the form 'app_label.model_name'" ) except LookupError: raise ImproperlyConfigured( "WAGTAILIMAGES_IMAGE_MODEL refers to model '%s' that has not been installed" % model_string )
d10f15e55806c6944827d801cd9c2d53f5da4186
47
https://github.com/wagtail/wagtail.git
128
def get_image_model(): from django.apps import apps model_string = get_image_model_string() try: return apps.get_model(model_string, require_ready=False) except ValueError: raise ImproperlyConfigured( "WAGTAILIMAGES_IMAGE_MODEL must be of the form 'app_label.model_name'" ) except LookupError: raise I
10
83
get_image_model
25
0
2
5
test/lib/ansible_test/_internal/config.py
266,768
ansible-test - Code cleanup and refactoring. (#77169) * Remove unnecessary PyCharm ignores. * Ignore intentional undefined attribute usage. * Add missing type hints. Fix existing type hints. * Fix docstrings and comments. * Use function to register completion handler. * Pass strings to display functions. * Fix CompositeAction handling of dest argument. * Use consistent types in expressions/assignments. * Use custom function to keep linters happy. * Add missing raise for custom exception. * Clean up key/value type handling in cloud plugins. * Use dataclass instead of dict for results. * Add custom type_guard function to check lists. * Ignore return type that can't be checked (yet). * Avoid changing types on local variables.
ansible
10
Python
25
config.py
def only_targets(self, target_type): # type: (t.Type[THostConfig]) -> t.List[THostConfig] if not self.targets: raise Exception('There must be one or more targets.') assert type_guard(self.targets, target_type) return t.cast(t.List[THostConfig], self.targets)
a06fa496d3f837cca3c437ab6e9858525633d147
44
https://github.com/ansible/ansible.git
65
def only_targets(self, target_type): # type: (t.Type[THostConfig]) -> t.List[THostConfig] if not self.targets: raise Exception('There must be
10
72
only_targets
13
0
2
8
test/prototype_transforms_kernel_infos.py
193,916
[prototype] Switch to `spatial_size` (#6736) * Change `image_size` to `spatial_size` * Fix linter * Fixing more tests. * Adding get_num_channels_video and get_spatial_size_* kernels for video, masks and bboxes. * Refactor get_spatial_size * Reduce the usage of `query_chw` where possible * Rename `query_chw` to `query_spatial_size` * Adding `get_num_frames` dispatcher and kernel. * Adding jit-scriptability tests
vision
12
Python
13
prototype_transforms_kernel_infos.py
def sample_inputs_rotate_bounding_box(): for bounding_box_loader in make_bounding_box_loaders(): yield ArgsKwargs( bounding_box_loader, format=bounding_box_loader.format, spatial_size=bounding_box_loader.spatial_size, angle=_ROTATE_ANGLES[0], )
4d4711d970f5cbd0a9e1adb465dca2703c8efbfd
36
https://github.com/pytorch/vision.git
73
def sample_inputs_rotate_bounding_box(): for bounding_box_loader in make_bounding_box_loaders(): yield ArgsKwargs( bounding_box_loader,
8
54
sample_inputs_rotate_bounding_box
138
0
1
54
test/test_outputs.py
179,364
Format The Codebase - black formatting - isort formatting
gradio
15
Python
62
test_outputs.py
def test_as_component(self): y = "happy" label_output = gr.outputs.Label() label = label_output.postprocess(y) self.assertDictEqual(label, {"label": "happy"}) self.assertEqual(label_output.deserialize(y), y) self.assertEqual(label_output.deserialize(label), y) with tempfile.TemporaryDirectory() as tmpdir: to_save = label_output.save_flagged(tmpdir, "label_output", label, None) self.assertEqual(to_save, y) y = {3: 0.7, 1: 0.2, 0: 0.1} label_output = gr.outputs.Label() label = label_output.postprocess(y) self.assertDictEqual( label, { "label": 3, "confidences": [ {"label": 3, "confidence": 0.7}, {"label": 1, "confidence": 0.2}, {"label": 0, "confidence": 0.1}, ], }, ) label_output = gr.outputs.Label(num_top_classes=2) label = label_output.postprocess(y) self.assertDictEqual( label, { "label": 3, "confidences": [ {"label": 3, "confidence": 0.7}, {"label": 1, "confidence": 0.2}, ], }, ) with self.assertRaises(ValueError): label_output.postprocess([1, 2, 3]) with tempfile.TemporaryDirectory() as tmpdir: to_save = label_output.save_flagged(tmpdir, "label_output", label, None) self.assertEqual(to_save, '{"3": 0.7, "1": 0.2}') self.assertEqual( label_output.restore_flagged(tmpdir, to_save, None), { "label": "3", "confidences": [ {"label": "3", "confidence": 0.7}, {"label": "1", "confidence": 0.2}, ], }, ) with self.assertRaises(ValueError): label_output = gr.outputs.Label(type="unknown") label_output.deserialize([1, 2, 3])
cc0cff893f9d7d472788adc2510c123967b384fe
385
https://github.com/gradio-app/gradio.git
772
def test_as_component(self): y = "happy" label_output = gr.outputs.Label() label = label_output.postprocess(y) self.assertDictEqual(label, {"label": "happy"}) self.assertEqual(label_output.deserialize(y), y)
22
606
test_as_component
34
0
3
19
tests/orion/api/test_work_queues.py
58,319
Add work queue backend
prefect
27
Python
29
test_work_queues.py
async def scheduled_flow_runs(self, session, deployment, work_queue, work_queue_2): for i in range(3): for wq in [work_queue, work_queue_2]: await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=deployment.flow_id, deployment_id=deployment.id, work_queue_name=wq.name, state=schemas.states.State( type="SCHEDULED", timestamp=pendulum.now("UTC").add(minutes=i), state_details=dict( scheduled_time=pendulum.now("UTC").add(minutes=i) ), ), ), ) await session.commit()
2649fa325433aa219d6569ed77ef018f79480479
127
https://github.com/PrefectHQ/prefect.git
399
async def scheduled_flow_runs(self, session, deployment, work_queue, work_queue_2): for i in range(3): for wq in [work_queue, work_queue_2]: await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=deployment.flow_id, deployment_id=deployment.id, work_queue_name=wq.name, state=schemas.states.State( type="SCHEDULED", timestamp=pendulum.now("UTC").add(minutes=i), state_details=dict( scheduled_time=pendulum.now("UTC").add(minutes=i) ), ), ),
34
193
scheduled_flow_runs
23
0
1
19
pandas/tests/io/xml/test_to_xml.py
164,112
TST: Remove unused fixtures (#45692) * TST: Remove unused fixtures * Undo a removed fixture * Add back other fixtures * Undo a file * Try undoing this? * Revert "Try undoing this?" This reverts commit 0e56cb04f5e8cb1f7b2ac4c5e6191485bb2fe1ab.
pandas
12
Python
18
test_to_xml.py
def test_attrs_cols_prefix(parser): expected = output = geom_df.to_xml( attr_cols=["index", "shape", "degrees", "sides"], namespaces={"doc": "http://example.xom"}, prefix="doc", parser=parser, ) output = equalize_decl(output) assert output == expected
f46df091df3afea25a273f491d1f6b2c7d20b32c
53
https://github.com/pandas-dev/pandas.git
66
def test_attrs_cols_prefix(parser): expected = output = geom_df.to_xml( attr_cols=["index", "shape", "degrees", "sides"], namespaces={"doc": "http://example.xom"}, prefix="doc", parser=p
10
97
test_attrs_cols_prefix
26
0
3
8
bootloader/waflib/Tools/c_preproc.py
263,295
Bootloader: Building: Unpack waf's lib archive. Doing so makes it easier to modify. This is a temporary measure until the next waf version is released (although I'm tempted to keep it since it's much more IDE completion friendly).
pyinstaller
14
Python
19
c_preproc.py
def filter_comments(self, node): code = node.read() if use_trigraphs: for (a, b) in trig_def: code = code.split(a).join(b) code = re_nl.sub('', code) code = re_cpp.sub(repl, code) return re_lines.findall(code)
64ccb7aea824fbec57f7ed1bbe483ec486183c13
66
https://github.com/pyinstaller/pyinstaller.git
86
def filter_comments(self, node): c
17
104
filter_comments
80
0
1
23
tests/integration_tests/test_mlflow.py
8,454
Config Object (#2426) * Fixed loss instances across features * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed binary OneOfImplementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Flake 8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix custom loss components * Fix gbm category * Remove config object code, out of scope * Fixed more tests * Fixed incorrect text preproc default, added clip to category feature level * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes additional tests * Cache jsonschema validator to reduce memory pressure * Fix imports * Skip neuropod test * Added upgrade audio to default preproc back compat and cleaned up * Small nits * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Change backfill constant for audio * Add docstring to compute feature hash * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Unused import * Another backfill constant change * Unused import * remove default population functions * Added config object test * rewired build_inputs * rewired combiner in ecd, added logic to config object * Refactored ecd.py * Fixing up merge_with_defaults, need metadata changes in master * Refactored defaults section and mega upgraded config obj * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed some formatting * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed feature col, proc col, and render config from defaults.py * Fix duplicate import * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added config initializer to merge defaults flow * Refactored update_config_with_metadata * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added dict conversion method to config object and refactored merge config function in config_utils * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Refactored until preproc entrypoint * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed update_config_with_metadata * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Removed load config base feature method - no longer necessary * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Formatting * Fixed input size assignment * Temp fix * Fixed pretrained encoder path referencing temp until preproc refactor * Solved the WORST BUG EVER * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Switch reduce_input to None for sequence tagger * Fixed another one * Fixed typo * Various test fixes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Flake 8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed excess defaults params issue * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Minor fixes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed some defaults tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed more tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed more tests * Formatting * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * More test fixes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed defaults tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix more tests * Flake 8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix more tests * Fixed more tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed more tests * Fixed more tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixing ghost tests attempt * Deep copy to smash the ghost failures * Copied top level modules now too * Started fixing hyperopt * Fixed Hyperopt Issues * Flake 8 * Remove commented out code * Address Piero feedback * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Flake 8 * Removed merge with defaults * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed various issues with preprocessing and splitting positioning * Fixed hyperopt issues * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Refactored api pipeline to use all config obj references * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed more tests * Flake 8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix more tests * Fixed auto tune learning rate and batch size * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed sequence feature tests * Fixed image feature test * Fixed last test * flake 8 * Marshmallowify Config object, remove manual to dict method, add Factory method constructors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Validate config within config object * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * All Travis feedback addressed * Using all new constructors now * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * removed from class attributes * Added deep copies back and piped repr inheritance * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Format * Small error fix, moved back compat into Config Object * Flake8 * Docstring for hyperopt defaults method * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Address Joppe feedback * Revert "Address Joppe feedback" This reverts commit 42f1665ef917d062a010550bb960594c355285ff. * Fix tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Flake8 * fix test * Small improvement * Changed repr for input features, added feature enabling/disabling * Added feature enabling/disabling, and better reprs for SDK dev * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Flake 8 * Added rich to requirements.txt * Add some more CO tests and comment more on CO code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix explain issue * Julian feedback * Added TODOs for future refactor PRs * Fix explain test failure, test shared state improvement and bug fix, remove unncessary code from convert_submodules * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * implement Daniel's feedback * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix residual errors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Error fix * Using mixins now so no loose attributes on defaults, fixed height width schema restrictions * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Removed unnecessary filtering from defaults schema logic * Piero's simplification and cleanup * Flake 8 * Fix test and update docstrings from Pieros change * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Address most of Justin's feedback * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix tests and more feedback implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Address feedback * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Renamed files to correspond to ModelConfig class name * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Missing constant import * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed incorrect merge conflict resolution * Flake8 * Fix remaining tests (except old models training from trainer type removal) * Fixed old models not validating trainer type * Add output_feature=False to test_hyperopt_ray.py * Implement Kabir's feedback * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Travis Addair <tgaddair@gmail.com> Co-authored-by: w4nderlust <w4nderlust@gmail.com>
ludwig
11
Python
63
test_mlflow.py
def test_export_mlflow_local(tmpdir): epochs = 2 batch_size = 8 num_examples = 32 input_features = [sequence_feature(reduce_output="sum")] output_features = [category_feature(vocab_size=2, reduce_input="sum", output_feature=True)] config = { "input_features": input_features, "output_features": output_features, "combiner": {"type": "concat", "output_size": 14}, TRAINER: {"epochs": epochs, "batch_size": batch_size}, } data_csv = generate_data( input_features, output_features, os.path.join(tmpdir, "train.csv"), num_examples=num_examples ) exp_name = "mlflow_test" output_dir = os.path.join(tmpdir, "output") model = LudwigModel(config, backend=FakeRemoteBackend()) _, _, output_directory = model.train(training_set=data_csv, experiment_name=exp_name, output_directory=output_dir) model_path = os.path.join(output_directory, "model") output_path = os.path.join(tmpdir, "data/results/mlflow") export_mlflow(model_path, output_path) assert set(os.listdir(output_path)) == {"MLmodel", "model", "conda.yaml"}
4d2d81f9fdefc52eea6a9bf0826a6f2ffc8d681b
198
https://github.com/ludwig-ai/ludwig.git
165
def test_export_mlflow_local(tmpdir): epochs = 2 batch_size = 8 num_examples = 32 input_features = [sequence_feature(reduce_output="sum")] output_features = [category_feature(vocab_size=2, reduce_input="sum", output_feature=True)] config = { "input_features": input_features, "output_features": output_features, "combiner": {"type": "concat", "output_size": 14}, TRAINER: {"epochs": epochs, "batch_size": batch_size}, } data_csv = generate_data(
36
327
test_export_mlflow_local
169
0
20
39
rest_framework/utils/encoders.py
48,674
Refactor: Replace try/except with contextlib.suppress() (#8676)
django-rest-framework
15
Python
110
encoders.py
def default(self, obj): # For Date Time string spec, see ECMA 262 # https://ecma-international.org/ecma-262/5.1/#sec-15.9.1.15 if isinstance(obj, Promise): return force_str(obj) elif isinstance(obj, datetime.datetime): representation = obj.isoformat() if representation.endswith('+00:00'): representation = representation[:-6] + 'Z' return representation elif isinstance(obj, datetime.date): return obj.isoformat() elif isinstance(obj, datetime.time): if timezone and timezone.is_aware(obj): raise ValueError("JSON can't represent timezone-aware times.") representation = obj.isoformat() return representation elif isinstance(obj, datetime.timedelta): return str(obj.total_seconds()) elif isinstance(obj, decimal.Decimal): # Serializers will coerce decimals to strings by default. return float(obj) elif isinstance(obj, uuid.UUID): return str(obj) elif isinstance(obj, QuerySet): return tuple(obj) elif isinstance(obj, bytes): # Best-effort for binary blobs. See #4187. return obj.decode() elif hasattr(obj, 'tolist'): # Numpy arrays and array scalars. return obj.tolist() elif (coreapi is not None) and isinstance(obj, (coreapi.Document, coreapi.Error)): raise RuntimeError( 'Cannot return a coreapi object from a JSON view. ' 'You should be using a schema renderer instead for this view.' ) elif hasattr(obj, '__getitem__'): cls = (list if isinstance(obj, (list, tuple)) else dict) with contextlib.suppress(Exception): return cls(obj) elif hasattr(obj, '__iter__'): return tuple(item for item in obj) return super().default(obj)
c10f2266222c434485889b08cc1463acdb8fa169
291
https://github.com/encode/django-rest-framework.git
597
def default(self, obj): # For Date Time string spec, see ECMA 262 # https://ecma-international.org/ecma-262/5.1/#sec-15.9.1.15 if isinstance(obj, Promise): return force_str(obj) elif isinstance(obj, datetime.datetime): representation = obj.isoformat() if representation.endswith('+00:00'): representation = representation[:-6] + 'Z' return representation elif isinstance(obj, datetime.date): return obj.isoformat() elif isinstance(obj, datetime.time): if timezone and timezone.is_aware(obj): raise ValueError("JSON can't represent timezone-aware times.") representation = obj.isoformat() return represen
41
473
default
18
0
2
7
python/ray/serve/tests/test_healthcheck.py
144,837
[serve] Improve health check failure semantics (#22297)
ray
11
Python
17
test_healthcheck.py
def test_user_defined_method_fails(serve_instance): Patient.deploy() h = Patient.get_handle() actor = ray.get(h.remote()) ray.get(h.set_should_fail.remote()) wait_for_condition(check_new_actor_started, handle=h, original_actors=actor) ray.get([h.remote() for _ in range(100)])
610930ae6aeafb37be75851a8c1b9ff39d5f7d22
72
https://github.com/ray-project/ray.git
35
def test_user_defined_method_fails(serve_instance): Patient.deploy() h = Patient.get_handle() actor = ray.get(h.remote()) ray.get(h.set_should_fail.remote()) wait_fo
17
117
test_user_defined_method_fails
37
0
2
12
.venv/lib/python3.8/site-packages/pip/_internal/wheel_builder.py
61,436
upd; format
transferlearning
11
Python
33
wheel_builder.py
def _clean_one_legacy(req, global_options): # type: (InstallRequirement, List[str]) -> bool clean_args = make_setuptools_clean_args( req.setup_py_path, global_options=global_options, ) logger.info('Running setup.py clean for %s', req.name) try: call_subprocess(clean_args, cwd=req.source_dir) return True except Exception: logger.error('Failed cleaning build dir for %s', req.name) return False
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
59
https://github.com/jindongwang/transferlearning.git
96
def _clean_one_legacy(req, global_options): # type: (InstallRequirement, List[str]) -> bool clean_args = make_setuptools_clean_args( req.setup_py_path, global_options=global_options, ) lo
14
95
_clean_one_legacy
44
1
1
7
python/ray/train/tests/test_huggingface_gpu.py
137,364
[Train] `HuggingFacePredictor` & docs improvements (#30945) This PR introduces 2 changes: Removes a confusing suggestion in the docstring of HuggingFaceCheckpoint - checkpoints created using from_checkpoint will not work for prediction as intended. Adds use_gpu argument and logic to automatically use GPU if one is available to HuggingFacePredictor. Signed-off-by: Antoni Baum <antoni.baum@protonmail.com>
ray
11
Python
39
test_huggingface_gpu.py
def create_checkpoint(): with tempfile.TemporaryDirectory() as tmpdir: model_config = AutoConfig.from_pretrained(model_checkpoint) model = AutoModelForCausalLM.from_config(model_config) tokenizer = AutoTokenizer.from_pretrained(tokenizer_checkpoint) checkpoint = HuggingFaceCheckpoint.from_model(model, tokenizer, path=tmpdir) # Serialize to dict so we can remove the temporary directory return HuggingFaceCheckpoint.from_dict(checkpoint.to_dict()) # TODO(ml-team): Add np.ndarray to batch_type @pytest.mark.parametrize("batch_type", [pd.DataFrame]) @pytest.mark.parametrize("device", [None, 0])
81237e05838757dde196688a20631daad48010dd
@pytest.mark.parametrize("batch_type", [pd.DataFrame]) @pytest.mark.parametrize("device", [None, 0])
62
https://github.com/ray-project/ray.git
85
def create_checkpoint(): with tempfile.TemporaryDirectory() as tmpdir: model_config = AutoConfig.from_pretrained(model_checkpoint) model = AutoModelForCausalLM.from_c
25
153
create_checkpoint
33
0
1
6
tests/sentry/search/events/test_builder.py
91,455
ref: replace self.assertRaises with pytest.raises (#35685) * add flake8 plugin to detect assertRaises * ref: replace self.assertRaises with pytest.raises * non-sed fixes
sentry
10
Python
28
test_builder.py
def test_limit_validation(self): # 51 is ok MetricsQueryBuilder(self.params, limit=51) # None is ok, defaults to 50 query = MetricsQueryBuilder(self.params) assert query.limit.limit == 50 # anything higher should throw an error with pytest.raises(IncompatibleMetricsQuery): MetricsQueryBuilder(self.params, limit=10_000)
284e980df0018f8baee659999268bdd4c7d08255
49
https://github.com/getsentry/sentry.git
92
def test_limit_validation(self): # 51 is ok MetricsQueryBuilder(self.params, limit=51) # None is ok, defaults to 50 query = MetricsQueryBuilder(self.params) assert query.limit.limit == 50 # anything higher should throw an error with pytest.raises(IncompatibleMe
9
82
test_limit_validation
20
0
1
10
tests/infrastructure/test_docker_container.py
58,882
Update default infrastructure command to be set at runtime Add commands to Docker container tests with no command
prefect
12
Python
19
test_docker_container.py
def test_adds_docker_host_gateway_on_linux(mock_docker_client, monkeypatch): monkeypatch.setattr("sys.platform", "linux") DockerContainer( command=["echo", "hello"], ).run() mock_docker_client.containers.create.assert_called_once() call_extra_hosts = mock_docker_client.containers.create.call_args[1].get( "extra_hosts" ) assert call_extra_hosts == {"host.docker.internal": "host-gateway"}
c02383e4a879c95586cfbc19787904da2d4be22b
64
https://github.com/PrefectHQ/prefect.git
54
def test_adds_docker_host_gateway_on_linux(mock_docker_client, monkeypatch): monkeypatch.setattr("sys.platform", "linux") DockerContainer( command=["echo", "hello"], ).run() mock_docker_client.containers.create.assert_called_once() call_extra_hosts = mock_docker_client.containers.create.call_args[1].get( "extra_host
13
113
test_adds_docker_host_gateway_on_linux
21
0
1
10
modin/pandas/indexing.py
155,050
FIX-#3764: Ensure df.loc with a scalar out of bounds appends to df (#3765) Co-authored-by: Devin Petersohn <devin-petersohn@users.noreply.github.com> Co-authored-by: Bill Wang <billiam@ponder.io> Co-authored-by: Vasily Litvinov <fam1ly.n4me@yandex.ru>
modin
12
Python
18
indexing.py
def _set_item_existing_loc(self, row_loc, col_loc, item): row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc) self._setitem_positional( row_lookup, col_lookup, item, axis=self._determine_setitem_axis( row_lookup, col_lookup, is_scalar(row_loc), is_scalar(col_loc) ), )
11ba4811e6db11740e11fd33d3cdfba8ce5bec54
56
https://github.com/modin-project/modin.git
119
def _set_item_existing_loc(self, row_loc, col_loc, item): row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc) self._setitem_positional( row_lookup,
12
81
_set_item_existing_loc
9
0
2
5
mindsdb/integrations/mysql_handler/mysql_handler/mysql_handler.py
114,412
test: move testing logic into unittest modules; CI still pending
mindsdb
10
Python
8
mysql_handler.py
def check_status(self): try: return self.connection.is_connected() except Exception: return False
76a30708e24bca37169df44d8b31573c7b5beb43
20
https://github.com/mindsdb/mindsdb.git
44
def check_status(self): try: return self.connection.is_connected() except Exception: return Fals
5
34
check_status
66
1
5
29
wagtail/admin/views/pages/moderation.py
72,515
Reformat with black
wagtail
19
Python
52
moderation.py
def reject_moderation(request, revision_id): revision = get_object_or_404(PageRevision, id=revision_id) if not revision.page.permissions_for_user(request.user).can_publish(): raise PermissionDenied if not revision.submitted_for_moderation: messages.error( request, _("The page '{0}' is not currently awaiting moderation.").format( revision.page.specific_deferred.get_admin_display_title() ), ) return redirect("wagtailadmin_home") if request.method == "POST": revision.reject_moderation(user=request.user) messages.success( request, _("Page '{0}' rejected for publication.").format( revision.page.specific_deferred.get_admin_display_title() ), buttons=[ messages.button( reverse("wagtailadmin_pages:edit", args=(revision.page.id,)), _("Edit"), ) ], ) if not send_moderation_notification(revision, "rejected", request.user): messages.error(request, _("Failed to send rejection notifications")) return redirect("wagtailadmin_home") @require_GET
d10f15e55806c6944827d801cd9c2d53f5da4186
@require_GET
174
https://github.com/wagtail/wagtail.git
332
def reject_moderation(request, revision_id): revision = get_object_or_404(PageRevision, id=revision_id) if not revision.page.permissions_for_user(request.user).can_publish(): raise PermissionDenied if not revision.submitted_for_moderation: messages.error( request, _("The page '{0}' is not currently awaiting moderation.").format( revision.page.specific_deferred.get_admin_display_title() ), ) return redirect("wagtailadmin_home") if request.method == "POST": revision.reject_moderation(user=request.user) messages.success( request, _("Page '{0}' rejected for publication.").format( revision.page.specific_deferred.get_admin_display_title() ), buttons=[ messages.button( reverse("wagtailadmin_pages:edit", args=(revision.page.id,)), _("Edit"), ) ], ) if not send_moderation_notification(revision, "rejected", request.user): messages.error(request, _("Failed to send rejection notifications")) return redirect("wagtailadmin_home")
28
290
reject_moderation
40
0
1
19
tests/components/calendar/test_trigger.py
296,840
Add initial implementation of a calendar trigger (#68674) * Add initial implementation of calendar trigger This is an initial implementation of a calendar trigger, that supports triggering on calendar start time. See architecture proposal in: https://github.com/home-assistant/architecture/discussions/700 * Address reviewer feedback * Use f-strings for all tests * Apply suggestions from code review Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Remove logging f-strings, and move to main code * Remove mypy ignore * Apply suggestions from code review Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Update calendar triggers to use new calendar data model * Update tests/components/calendar/test_trigger.py Co-authored-by: Franck Nijhof <frenck@frenck.nl> * Rewrite tests using freezegun Rewrite tests using freezegun and improve edge case handling, and use utc consistently for all alarms. * Update homeassistant/components/calendar/trigger.py Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Update homeassistant/components/calendar/trigger.py Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Increase test coverage based on pr feedback Co-authored-by: Martin Hjelmare <marhje52@gmail.com> Co-authored-by: Franck Nijhof <frenck@frenck.nl>
core
12
Python
36
test_trigger.py
async def test_event_payload(hass, calls, fake_schedule): event_data = fake_schedule.create_event( start=datetime.datetime.fromisoformat("2022-04-19 11:00:00+00:00"), end=datetime.datetime.fromisoformat("2022-04-19 11:30:00+00:00"), description="Description", location="Location", ) await create_automation(hass, EVENT_START) assert len(calls()) == 0 await fake_schedule.fire_until( datetime.datetime.fromisoformat("2022-04-19 11:15:00+00:00") ) assert calls() == [ { "platform": "calendar", "event": EVENT_START, "calendar_event": event_data, } ]
a2c74b978664b627bafc4a43b26aa2be7b15b229
98
https://github.com/home-assistant/core.git
149
async def test_event_payload(hass, calls, fake_schedule): event_data = fake_schedule.create_event( start=datetime.datetime.fromisoformat("2022-04-19 11:00:00+00:00"), end=datetime.datetime.fromisoformat("2022-04-19 11:30:00+00:00"), description="Description", location="Location", ) await create_automation(hass, EVENT_START) assert len(calls()) == 0 await fake_schedule.fire_until( datetime.datetime.fromisoformat("2022-04-19 11:15:00+00:00") ) assert calls() == [ { "platform":
16
169
test_event_payload
76
0
7
28
packages/syft/src/syft/core/tensor/nn/model.py
1,861
update domain update script to add branch name during hagrid launch add loss to parameter list in model publish print loss during model training
PySyft
15
Python
56
model.py
def publish(self, deduct_epsilon_for_user, get_budget_for_user, ledger, sigma): print("Publish Model Weights") # relative from ..autodp.gamma_tensor import GammaTensor parameters = {} for i, layer in enumerate(self.layers): print("Layer", str(layer)) print("Before Publish") for param in layer.params: print(param.shape, end=" ") print() if hasattr(layer, "params"): parameters[str(layer) + str(i)] = [ param.publish( deduct_epsilon_for_user=deduct_epsilon_for_user, get_budget_for_user=get_budget_for_user, ledger=ledger, sigma=sigma, ) if isinstance(param, (GammaTensor)) else param for param in layer.params ] print("After Publish") for param in parameters[str(layer) + str(i)]: print(param.shape, end=" ") print() parameters["loss"] = self.aggregated_loss return parameters
b480217f5bc07d97a691bfed74eb7489667788dd
178
https://github.com/OpenMined/PySyft.git
477
def publish(self, deduct_epsilon_for_user, get_budget_for_user, ledger, sigma): print("Publish Model Weights") # relative from ..autodp.gamma_tensor import GammaTensor parameters = {} for i, layer in enumerate(self.layers): print("Layer", str(layer)) print("Before Publish") for param in layer.params: print(param.shape, end=" ") print() if hasattr(layer, "params"): parameters[str(layer
23
285
publish
98
0
1
39
kubernetes_tests/test_kubernetes_pod_operator_backcompat.py
47,749
KubernetesPodOperator should patch "already checked" always (#22734) When not configured to delete pods, at end of task execution the current behavior is to patch the pod as "already checked", but only if pod not successful. We should also patch when successful so it isn't "reattached" to after a task clear.
airflow
14
Python
73
test_kubernetes_pod_operator_backcompat.py
def test_volume_mount(self): with patch.object(PodManager, 'log') as mock_logger: volume_mount = VolumeMount( 'test-volume', mount_path='/tmp/test_volume', sub_path=None, read_only=False ) volume_config = {'persistentVolumeClaim': {'claimName': 'test-volume'}} volume = Volume(name='test-volume', configs=volume_config) args = [ "echo \"retrieved from mount\" > /tmp/test_volume/test.txt " "&& cat /tmp/test_volume/test.txt" ] k = KubernetesPodOperator( namespace='default', image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=args, labels={"foo": "bar"}, volume_mounts=[volume_mount], volumes=[volume], is_delete_operator_pod=False, name="test", task_id="task", in_cluster=False, do_xcom_push=False, ) context = create_context(k) k.execute(context=context) mock_logger.info.assert_any_call('retrieved from mount') actual_pod = self.api_client.sanitize_for_serialization(k.pod) expected_pod = copy(self.expected_pod) expected_pod['spec']['containers'][0]['args'] = args expected_pod['spec']['containers'][0]['volumeMounts'] = [ {'name': 'test-volume', 'mountPath': '/tmp/test_volume', 'readOnly': False} ] expected_pod['spec']['volumes'] = [ {'name': 'test-volume', 'persistentVolumeClaim': {'claimName': 'test-volume'}} ] expected_pod['metadata']['labels']['already_checked'] = 'True' assert expected_pod == actual_pod
c3d883a971a8e4e65ccc774891928daaaa0f4442
254
https://github.com/apache/airflow.git
579
def test_volume_mount(self): with patch.object(PodManager, 'log') as mock_logger: volume_mount = VolumeMount( 'test-volume', mount_path='/tmp/test_volume', sub_path=None, read_only=False ) volume_config = {'persistentVolumeClaim': {'claimName': 'test-volume'}} volume = Volume(name='test-volume', configs=volume_config) args = [ "echo \"retrieved from mount\" > /tmp/test_volume/test.txt " "&& cat /tmp/test_volume/test.txt" ] k = KubernetesPodOperator( namespace='default', image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=args, labels={"foo": "bar"}, volume_mounts=[volume_mount], volumes=[volume], is_delete_operator_pod=False, name="test", task_id="task", in_cluster=False, do_xcom_push=False, ) context = create_context(k) k.execute(context=context) mock_logger.info.assert_any_call('retrieved from mount') actual_pod = self.api_client.sanitize_for_serialization(k.pod) expected_pod = copy(self.expected_pod) expected_pod['spec']['containers'][0]['args'] = args
41
453
test_volume_mount
61
0
2
8
d2l/mxnet.py
253,762
Refactor Multihead Attn, Self Attn, and Transformer (#2096) * multihead attn * self attn and pos encoding * simplify * before EncoderBlock * before tmencoder * before decoder block * before training * transformer code * rm seq2seq encoder old * fix bahdanau attn map * transformer done, perf tuned * clean super
d2l-en
12
Python
47
mxnet.py
def forward(self, X, valid_lens): # Since positional encoding values are between -1 and 1, the embedding # values are multiplied by the square root of the embedding dimension # to rescale before they are summed up X = self.pos_encoding(self.embedding(X) * math.sqrt(self.num_hiddens)) self.attention_weights = [None] * len(self.blks) for i, blk in enumerate(self.blks): X = blk(X, valid_lens) self.attention_weights[ i] = blk.attention.attention.attention_weights return X
f0be7e672bc0a7c77005d5c79452d796cfe1a06b
81
https://github.com/d2l-ai/d2l-en.git
146
def forward(self, X, valid_lens): # Since positional encoding values are between -1 and 1, the embedding # values are multiplied by the square root of the embedding dimension # to rescale before they are summed up X = self.pos_encoding(self.embedding(X) * math.sqrt(self.num_hiddens)) self.attention_weights = [None] * len(self.blks) for i, blk in enumerate(self.blks): X = blk(X, valid_lens) self.attention_weights[ i] = blk.attention.attention.attention_wei
16
127
forward
27
0
1
9
tests/unit/executor_test_base.py
116,152
executor base test
mindsdb
10
Python
21
executor_test_base.py
def clear_db(db): # drop db.Base.metadata.drop_all(db.engine) # create db.Base.metadata.create_all(db.engine) # fill with data r = db.Integration(name='files', data={}, engine='files') db.session.add(r) r = db.Integration(name='views', data={}, engine='views') db.session.add(r) db.session.commit() return db
d304fa61c43e5248c0cb111d5553db653be92cff
92
https://github.com/mindsdb/mindsdb.git
103
def clear_db(db): # drop db.Base.metadata.drop_all(db.engine) # create db.Base.metadata.create_all(db.engine) # fill with data
14
155
clear_db
18
0
2
5
mitmproxy/net/udp.py
250,972
[dns] rewrite of udp, merge dnsserver>proxyserver
mitmproxy
10
Python
15
udp.py
def resume_writing(self) -> None: assert self._paused > 0 self._paused = self._paused - 1 if self._paused == 0: self._can_write.set()
ef3f9e492e8f1d197ddab24bf5f80a76d2fe566d
36
https://github.com/mitmproxy/mitmproxy.git
49
def resume_writing(self) -> None: assert self._paused > 0 self._paused = self._paused - 1 if self._pa
5
58
resume_writing
124
0
5
27
configs/rotate/tools/slicebase.py
211,333
Refactor rbox (#6704) * refactor rbox * modify the code of save results * fix some problem * add .gitignore in dataset/dota * fix test anno path
PaddleDetection
19
Python
63
slicebase.py
def get_poly4_from_poly5(self, poly): distances = [ cal_line_length((poly[i * 2], poly[i * 2 + 1]), (poly[(i + 1) * 2], poly[(i + 1) * 2 + 1])) for i in range(int(len(poly) / 2 - 1)) ] distances.append( cal_line_length((poly[0], poly[1]), (poly[8], poly[9]))) pos = np.array(distances).argsort()[0] count = 0 out_poly = [] while count < 5: if (count == pos): out_poly.append( (poly[count * 2] + poly[(count * 2 + 2) % 10]) / 2) out_poly.append( (poly[(count * 2 + 1) % 10] + poly[(count * 2 + 3) % 10]) / 2) count = count + 1 elif (count == (pos + 1) % 5): count = count + 1 continue else: out_poly.append(poly[count * 2]) out_poly.append(poly[count * 2 + 1]) count = count + 1 return out_poly
e55e41945d42db787a0f7c557d53d06a6b24536b
258
https://github.com/PaddlePaddle/PaddleDetection.git
449
def get_poly4_from_poly5(self, poly): distances = [ cal_line_length((poly[i * 2], poly[i * 2 + 1]), (poly[(i + 1) * 2], poly[(i + 1) * 2 + 1])) for i in range(int(len(poly) / 2 - 1)) ] distances.append( cal_line_length((poly[0], poly[1]), (poly[8], poly[9]))) pos = np.array(distances).argsort()[0] count = 0 out_poly = [] while count < 5: if (count == pos): out_poly.append( (poly[count * 2] + poly[(count * 2 + 2) % 10]) / 2) out_poly.append( (poly[(count * 2 + 1) % 10] + poly[(count * 2 + 3) % 10]) / 2) count = count + 1 elif (count == (pos + 1) % 5): count = count + 1 continue else
16
389
get_poly4_from_poly5
12
0
1
4
sklearn/utils/tests/test_estimator_html_repr.py
260,606
FIX Show a HTML repr for meta-estimatosr with invalid parameters (#24015) Co-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>
scikit-learn
10
Python
10
test_estimator_html_repr.py
def test_invalid_parameters_in_stacking(): stacker = StackingClassifier(estimators=[]) html_output = estimator_html_repr(stacker) assert html.escape(str(stacker)) in html_output
84c6421a9067de7d1b54b7a6d8e21ce38e1f0eca
32
https://github.com/scikit-learn/scikit-learn.git
24
def test_invalid_parameters_in_stacking(): stacker = StackingClassifier(estimators=[]) html_output = estimator_html_rep
9
56
test_invalid_parameters_in_stacking
205
0
3
68
python/ccxt/coinex.py
17,144
1.71.68 [ci skip]
ccxt
18
Python
119
coinex.py
def fetch_markets(self, params={}): response = self.publicGetMarketInfo(params) # # { # "code": 0, # "data": { # "WAVESBTC": { # "name": "WAVESBTC", # "min_amount": "1", # "maker_fee_rate": "0.001", # "taker_fee_rate": "0.001", # "pricing_name": "BTC", # "pricing_decimal": 8, # "trading_name": "WAVES", # "trading_decimal": 8 # } # } # } # markets = self.safe_value(response, 'data', {}) result = [] keys = list(markets.keys()) for i in range(0, len(keys)): key = keys[i] market = markets[key] id = self.safe_string(market, 'name') tradingName = self.safe_string(market, 'trading_name') baseId = tradingName quoteId = self.safe_string(market, 'pricing_name') base = self.safe_currency_code(baseId) quote = self.safe_currency_code(quoteId) symbol = base + '/' + quote if tradingName == id: symbol = id result.append({ 'id': id, 'symbol': symbol, 'base': base, 'quote': quote, 'settle': None, 'baseId': baseId, 'quoteId': quoteId, 'settleId': None, 'type': 'spot', 'spot': True, 'margin': None, 'swap': False, 'future': False, 'option': False, 'active': None, 'contract': False, 'linear': None, 'inverse': None, 'taker': self.safe_number(market, 'taker_fee_rate'), 'maker': self.safe_number(market, 'maker_fee_rate'), 'contractSize': None, 'expiry': None, 'expiryDatetime': None, 'strike': None, 'optionType': None, 'precision': { 'price': self.safe_integer(market, 'pricing_decimal'), 'amount': self.safe_integer(market, 'trading_decimal'), }, 'limits': { 'leverage': { 'min': None, 'max': None, }, 'amount': { 'min': self.safe_number(market, 'min_amount'), 'max': None, }, 'price': { 'min': None, 'max': None, }, 'cost': { 'min': None, 'max': None, }, }, 'info': market, }) return result
c9b141d8b46d6bc771d9305e403440654bbe03b2
352
https://github.com/ccxt/ccxt.git
1,520
def fetch_markets(self, params={}): response = self.publicGetMarketInfo(params) # # { # "code": 0, # "data": { # "WAVESBTC": { # "name": "WAVESBTC", # "min_amount": "1", # "maker_fee_rate": "0.001", # "taker_fee_rate": "0.001", # "pricing_name": "BTC", # "pricing_decimal": 8, # "trading_name": "WAVES", # "trading_decimal": 8 # } # } # } # markets = self.safe_value(response, 'data', {}) result = [] keys = list(markets.keys()) for i in range(0, len(keys)): key = keys[i] market = markets[key] id = self.safe_string(market, 'name') tradingName = self.safe_string(market, 'trading_name') baseId = tradingName quoteId = self.safe_string(market, 'pricing_name') base = self.safe_currency_code(baseId) quote = self.safe_currency_code(quoteId) symbol = base + '/' + quote if tradingName == id: symbol = id result.append({ 'id': id, 'symbol': symbol,
27
619
fetch_markets
86
0
10
22
tests/models/layoutlmv3/test_modeling_tf_layoutlmv3.py
33,184
[LayoutLMv3] Add TensorFlow implementation (#18678) Co-authored-by: Esben Toke Christensen <esben.christensen@visma.com> Co-authored-by: Lasse Reedtz <lasse.reedtz@visma.com> Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Co-authored-by: Joao Gante <joaofranciscocardosogante@gmail.com>
transformers
18
Python
55
test_modeling_tf_layoutlmv3.py
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict: inputs_dict = copy.deepcopy(inputs_dict) if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict = { k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1)) if isinstance(v, tf.Tensor) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32) elif model_class in get_values(TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING): inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING): inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in get_values(TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING): inputs_dict["labels"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=tf.int32 ) return inputs_dict
de8548ebf3242305d0f9792dacb6f86b196a3a33
250
https://github.com/huggingface/transformers.git
348
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict: inputs_dict = copy.deepcopy(inputs_dict) if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict = { k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1)) if isinstance(v, tf.Tensor) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32) elif model_class in get_values(TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING): inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING): inputs_dict["labels"] = tf.zeros(self
30
383
_prepare_for_class
50
0
3
17
tests/test_modeling_utils.py
336,014
[SDE] Merge to unconditional model (#89) * up * more * uP * make dummy test pass * save intermediate * p * p * finish * finish * finish
diffusers
18
Python
36
test_modeling_utils.py
def test_score_sde_ve_pipeline(self): model = UNetUnconditionalModel.from_pretrained("fusing/ffhq_ncsnpp", sde=True) torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) scheduler = ScoreSdeVeScheduler.from_config("fusing/ffhq_ncsnpp") sde_ve = ScoreSdeVePipeline(model=model, scheduler=scheduler) torch.manual_seed(0) image = sde_ve(num_inference_steps=2) if model.device.type == "cpu": expected_image_sum = 3384805632.0 expected_image_mean = 1076.000732421875 else: expected_image_sum = 3382849024.0 expected_image_mean = 1075.3787841796875 assert (image.abs().sum() - expected_image_sum).abs().cpu().item() < 1e-2 assert (image.abs().mean() - expected_image_mean).abs().cpu().item() < 1e-4
ba3c9a9a3a9cf76e4ff8292b66d7cc3206732627
165
https://github.com/huggingface/diffusers.git
181
def test_score_sde_ve_pipeline(self): model = UNetUncondi
27
257
test_score_sde_ve_pipeline
33
0
2
15
test/lib/ansible_test/_internal/host_profiles.py
268,481
Add `use_rsa_sha2_algorithms` option for paramiko (#78789) Fixes #76737 Fixes #77673 Co-authored-by: Matt Clay <matt@mystile.com>
ansible
14
Python
29
host_profiles.py
def get_inventory_variables(self) -> dict[str, t.Optional[t.Union[str, int]]]: core_ci = self.wait_for_instance() connection = core_ci.connection variables: dict[str, t.Optional[t.Union[str, int]]] = dict( ansible_connection=self.config.connection, ansible_pipelining='yes', ansible_host=connection.hostname, ansible_port=connection.port, ansible_user=connection.username, ansible_ssh_private_key_file=core_ci.ssh_key.key, ansible_paramiko_use_rsa_sha2_algorithms='no', ansible_network_os=f'{self.config.collection}.{self.config.platform}' if self.config.collection else self.config.platform, ) return variables
76b746655a36807fa9198064ca9fe7c6cc00083a
122
https://github.com/ansible/ansible.git
163
def get_inventory_variables(self) -> dict[str, t.Optional[t.Union[str, int]]]: core_ci = self.wait_for_instance() connection = core_ci.connection variables: dict[str, t.Optional[t.Union[str, int]]] = dict( ansible_connection=self.config.connection, ansible_pipelining='yes', ansible_host=connection.hostname, ansible_port=connection.port, ansible_user=connection.username, ansible_ssh_private_key_file=core_ci.ssh_key.key, ansible_paramiko_use_rsa_sha2_algorithms='no', ansible_network_os
28
199
get_inventory_variables
31
0
1
5
rest_api/test/test_rest_api.py
257,559
API tests (#2738) * clean up tests and run earlier * use change detection * better naming, skip ES * more cleanup * fix job name * dummy commit to trigger the CI * mock away the PDF converter * make the test compatible with 3.7 * removed leftover * always run the api tests, use a matrix for the OS * refactor all the tests * remove outdated dependency * pylint * new abstract method * adjust for older python versions * rename pipeline file * address PR comments
haystack
17
Python
28
test_rest_api.py
def test_file_upload_with_wrong_meta(client): file_to_upload = {"files": (Path(__file__).parent / "samples" / "pdf" / "sample_pdf_1.pdf").open("rb")} response = client.post(url="/file-upload", files=file_to_upload, data={"meta": "1"}) assert 500 == response.status_code # Ensure the `convert` method was never called MockPDFToTextConverter.mocker.convert.assert_not_called()
82df677ebf853340d331ff0868304cc958307ee0
67
https://github.com/deepset-ai/haystack.git
45
def test_file_upload_with_wrong_meta(client): file_to_upload = {"files": (Path(__file__).parent / "samples" / "pdf" / "sample_pdf_1.pdf").open("rb")} response = client.post(url="/file-upload", files=file_to_upload, data={"meta": "1"}) assert 500 == response.status_code # E
17
121
test_file_upload_with_wrong_meta
8
0
1
3
homeassistant/components/skybell/sensor.py
288,657
Add strict typing to Skybell (#79800)
core
8
Python
8
sensor.py
def native_value(self) -> StateType | datetime: return self.entity_description.value_fn(self._device)
9850709b37fdfa704ac3db4c45a2660880a7ca65
21
https://github.com/home-assistant/core.git
22
def native_value(self) -> StateType | datetime:
7
36
native_value
51
0
5
13
label_studio/projects/models.py
178,057
fix: DEV-3164: Remove potential data exposure from logs (#2828) * Remove potential data exposure from logs * Bump converter & tools pip versions Co-authored-by: nik <nik@heartex.net>
label-studio
13
Python
36
models.py
def _get_annotation_key(self, result): result_type = result.get('type', None) if result_type in ('relation', 'pairwise', None): return None if 'from_name' not in result or 'to_name' not in result: logger.error( 'Unexpected annotation.result format: "from_name" or "to_name" not found', extra={'sentry_skip': True}, ) return None result_from_name = result['from_name'] key = get_annotation_tuple(result_from_name, result['to_name'], result_type or '') return key
5a0415ea99e3ef95bdbb2d6b62577c0c868b9540
81
https://github.com/heartexlabs/label-studio.git
166
def _get_annotation_key(self, result): result_type = result.get('type', None) if result_type in ('relation', 'pairwise', None): return None if 'from_name' not in result or 'to_name' not in result: logger.error( 'Unexpected annotation.result format: "from_name" or "to_name" not found', extra={'sentry_skip': True}, ) return None result_from_name = result['from_name'] key = get_annotation_
11
138
_get_annotation_key
33
0
1
9
tests/snuba/api/endpoints/test_organization_events.py
94,829
fix(tests): Fix dnd backend test flakes (#37916) This PR fixes 3 major flakes: Fixes SENTRY-TESTS-3J5: Just sort the project id order Fixes SENTRY-TESTS-3HQ: Flakes because we calculate the retention in the test once and the value returned in the response is calculated a little while after. We don't need to test for seconds granularity so replacing seconds to 0. Fixes SENTRY-TESTS-3J0: Successively calling before_now results in some flakes particularly in tests that are calculating aggregates on transaction.duration. Introduced a load_data method that takes a datetime object timestamp and a timedelta duration calculates the offset based on timestamp to get start_timestamp.
sentry
10
Python
26
test_organization_events.py
def test_stack_wildcard_condition(self): data = self.load_data(platform="javascript") data["timestamp"] = self.ten_mins_ago self.store_event(data=data, project_id=self.project.id) query = {"field": ["stack.filename", "message"], "query": "stack.filename:*.js"} response = self.do_request(query) assert response.status_code == 200, response.content assert len(response.data["data"]) == 1 assert response.data["meta"]["fields"]["message"] == "string"
ab993b32614bb83d17d10e1041817e43dd6f5980
99
https://github.com/getsentry/sentry.git
88
def test_stack_wildcard_condition(self): data = self.load_data(platform="javascript") data["timestamp"] = self.ten_mins_ago self.store_event(data=data, project_id=self.project.id) query = {"field": ["stack.filename", "message"], "query": "stack.filename:*.js"} response = self.do_request(query) assert response.status_code == 200, response.content assert len(response.data["data"]) == 1 assert respons
16
172
test_stack_wildcard_condition
112
1
1
32
tests/integration_tests/test_hyperopt_ray_horovod.py
6,877
Fix ray hyperopt (#1999) * WIP fix ray hyperopt * Fixed kwargs * Updated the nones * Placement groups * Updated test cpus * Test with dynamic resource allocation * Using 0 CPUs for evaluation and using dask annotate * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updates to ray backend and hyperopt execution * Added dask global config * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Uncommented tests * Disabled async hyperband tests * Responded to comments * Fixed all hyperopt horovod tests to use 10 CPUs * Moved dask config setting to ray backend * Calculate stats for distributed datasets (#2016) * Fixed tests, responded to comments * Responded to comments * Updated horovod hyperopt tests to be consistent with the hyperopt refactor, added a df_engine attribute to RayPredictor * Added parentheses on pandas * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: Travis Addair <tgaddair@gmail.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
ludwig
13
Python
81
test_hyperopt_ray_horovod.py
def test_hyperopt_run_hyperopt(csv_filename, ray_mock_dir): input_features = [number_feature(), number_feature()] output_features = [binary_feature()] csv_filename = os.path.join(ray_mock_dir, "dataset.csv") dataset_csv = generate_data(input_features, output_features, csv_filename, num_examples=100) dataset_parquet = create_data_set_to_use("parquet", dataset_csv) config = { "input_features": input_features, "output_features": output_features, "combiner": {"type": "concat", "num_fc_layers": 2}, TRAINER: {"epochs": 4, "learning_rate": 0.001}, "backend": {"type": "ray", **RAY_BACKEND_KWARGS}, } output_feature_name = output_features[0]["name"] hyperopt_configs = { "parameters": { "trainer.learning_rate": { "space": "loguniform", "lower": 0.001, "upper": 0.1, }, output_feature_name + ".output_size": {"space": "randint", "lower": 2, "upper": 32}, output_feature_name + ".num_fc_layers": {"space": "randint", "lower": 2, "upper": 6}, }, "goal": "minimize", "output_feature": output_feature_name, "validation_metrics": "loss", "executor": {"type": "ray", "num_samples": 2}, "search_alg": {"type": "variant_generator"}, } # add hyperopt parameter space to the config config["hyperopt"] = hyperopt_configs run_hyperopt(config, dataset_parquet, ray_mock_dir) @spawn
b59ce782e675d1c4511fad9f13b12fc3f2f02e90
@spawn
229
https://github.com/ludwig-ai/ludwig.git
322
def test_hyperopt_run_hyperopt(csv_filename, ray_mock_dir): input_features = [number_feature(), number_feature()] output_features = [binary_feature()] csv_filename = os.path.join(ray_mock_dir, "dataset.csv") dataset_csv = generate_data(input_features, output_features, csv_filename, num_examples=100) dataset_parquet = create_data_set_to_use("parquet", dataset_csv) config = { "input_features": input_features, "output_features": output_features, "combiner": {"type": "concat", "num_fc_layers": 2}, TRAINER: {"epochs": 4, "learning_rate": 0.001}, "backend": {"type": "ray", **RAY_BACKEND_KWARGS}, } output_feature_name = output_features[0]["name"] hyperopt_config
22
404
test_hyperopt_run_hyperopt
89
0
9
27
code/default/gae_proxy/local/web_control.py
218,969
v4.6.0 compactiable with python 2.7.
XX-Net
15
Python
56
web_control.py
def req_importip_handler(self): req = urlparse(self.path).query reqs = parse_qs(req, keep_blank_values=True) data = '' if reqs['cmd'] == ['importip']: count = 0 ip_list = self.postvars['ipList'][0] lines = ip_list.split("\n") for line in lines: addresses = line.split('|') for ip in addresses: ip = ip.strip() if not utils.check_ip_valid(ip): continue if front.ip_manager.add_ip(ip, 100, "google.com", "gws"): count += 1 data = '{"res":"%s"}' % count front.ip_manager.save(force=True) elif reqs['cmd'] == ['exportip']: data = '{"res":"' for ip in front.ip_manager.ip_list: if front.ip_manager.ip_dict[ip]['fail_times'] > 0: continue data += "%s|" % ip data = data[0: len(data) - 1] data += '"}' self.send_response_nc('text/html', data)
0820c040ec2815f40bd0e469e27c2bf4d2cc33bc
196
https://github.com/XX-net/XX-Net.git
422
def req_importip_handler(self): req = urlparse(self.path).query reqs = parse_qs(req, keep_blank_values=True) data = '' if reqs['cmd'] == ['importip']: count = 0 ip_list = self.postvars['ipList'][0] lines = ip_list.split("\n") for line in lines: addresses = line.split('|') for ip in addresses: ip = ip.strip() if not utils.check_ip_valid(ip): continue if front.ip_manager.add_ip(ip, 100, "google.com", "gws"): count += 1 data = '{"res":"%s"}' % c
29
336
req_importip_handler
29
0
1
7
tests/blocks/test_core.py
55,912
Block capabilities (PrefectHQ/orion#1898) * Add capabilities to BlockSchemas * Remove type field from BlockSchemas * Create postgres migration, bump API version
prefect
10
Python
21
test_core.py
async def test_block_load(self, test_block, block_document): my_block = await test_block.load(block_document.name) assert my_block._block_document_name == block_document.name assert my_block._block_document_id == block_document.id assert my_block._block_type_id == block_document.block_type_id assert my_block._block_schema_id == block_document.block_schema_id assert my_block.foo == "bar"
168483e9cf038a3629f880f838b5aa9291a48411
58
https://github.com/PrefectHQ/prefect.git
70
async def test_block_load(self, test_block, block_document): my_block = await test_block.load(block_document.name) assert my_block._block_document_name == block_document.name assert my_block._block_document_id == block_document.id a
15
91
test_block_load
13
0
1
15
tests/components/blebox/test_climate.py
297,156
Blebox add thermoBox to climate (#81090) Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
core
8
Python
13
test_climate.py
async def test_reding_hvac_actions(saunabox, hass, caplog): caplog.set_level(logging.ERROR) feature_mock, entity_id = saunabox await async_setup_entity(hass, entity_id)
923fa473e171fcdf396556ea200612e378f9b0a5
108
https://github.com/home-assistant/core.git
25
async def test_reding_hvac_actions(saunabox, hass, caplog): caplog.set_level(logging.ERROR) feature_mock, entity_id = saunabox await async_setup_entity(hass, entity_id)
10
50
test_reding_hvac_actions
37
0
2
15
saleor/plugins/tests/test_manager.py
27,323
Revert "Add fix for multiplied prices on Avatax side (#9699)" (#9750) This reverts commit 5dc3a30ef3bb8dfce67ede276fa465e2c420d003.
saleor
13
Python
30
test_manager.py
def test_manager_calculates_order_line_total(order_line, plugins): currency = order_line.order.currency expected_total = ( TaxedMoney(Money("1.0", currency), Money("1.0", currency)) if plugins else quantize_price(order_line.unit_price * order_line.quantity, currency) ) taxed_total = ( PluginsManager(plugins=plugins) .calculate_order_line_total( order_line.order, order_line, order_line.variant, order_line.variant.product ) .price_with_discounts ) assert expected_total == taxed_total
ab7e4e203fd23a5fec1d27d0774905c52c509dc3
84
https://github.com/saleor/saleor.git
114
def test_manager_calculates_order_line_total(order_line, plugins): currency = order_line.order.currency expected_total = ( TaxedMoney(Money("1.0", currency), Money("1.0", currency)) if plugins else quantize_price(order_line.unit_price * order_line.quantity, currency) ) taxed_total = ( PluginsManager(plugins=plugins) .cal
17
127
test_manager_calculates_order_line_total
41
0
2
9
thumbor/filters/blur.py
191,063
Reformat to 80 chars and mypy.ini
thumbor
9
Python
30
blur.py
def apply_blur(mode, data, size, radius, sigma=0): if sigma == 0: sigma = radius radius = min(radius, MAX_RADIUS) matrix, matrix_size = generate_1d_matrix(sigma, radius) data = _convolution.apply( mode, data, size[0], size[1], matrix, matrix_size, True ) return _convolution.apply(mode, data, size[0], size[1], matrix, 1, True)
301124c5b377fa56b940d298900dbc5816dbc24e
92
https://github.com/thumbor/thumbor.git
72
def apply_blur(mode, data, size, radius, sigma=0): if sigma
13
124
apply_blur
9
0
1
4
wagtail/documents/tests/test_views.py
74,876
Reformat with black
wagtail
12
Python
9
test_views.py
def test_content(self): self.assertEqual( b"".join(self.get().streaming_content), b"A boring example document" )
d10f15e55806c6944827d801cd9c2d53f5da4186
26
https://github.com/wagtail/wagtail.git
33
def test_content(self): self.assertEqual(
6
43
test_content
27
0
1
8
test/test_components.py
179,939
blocks-components-tests - move gradio/test_data to test/test_data/media_data
gradio
13
Python
23
test_components.py
def test_tokenize(self): x_wav = media_data.BASE64_AUDIO audio_input = gr.Audio() tokens, _, _ = audio_input.tokenize(x_wav) self.assertEquals(len(tokens), audio_input.interpretation_segments) x_new = audio_input.get_masked_inputs(tokens, [[1] * len(tokens)])[0] similarity = SequenceMatcher(a=x_wav["data"], b=x_new).ratio() self.assertGreater(similarity, 0.9)
070b8a96b5b8448e306bd40f2b12d44b759afd48
93
https://github.com/gradio-app/gradio.git
75
def test_tokenize(self): x_wav = media_data.BASE64_AUDIO audio_input = gr.Audio() tokens, _, _ = audio_input.tokenize(x_wav) self.assertEquals(len(tokens), audio_input.interpretation_segments) x_new = audio_input.get_masked_inputs(tokens, [[1
22
143
test_tokenize
16
0
2
3
django/core/signing.py
204,789
Refs #33476 -- Reformatted code with Black.
django
9
Python
14
signing.py
def signature(self, value, key=None): key = key or self.key return base64_hmac(self.salt + "signer", value, key, algorithm=self.algorithm)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
37
https://github.com/django/django.git
29
def signature(self, value, key=None): key = key or self.key return b
7
55
signature
52
0
1
9
sympy/tensor/tests/test_tensor.py
200,564
Tests for subs and xreplace with dummy index conflicts see https://github.com/sympy/sympy/issues/24337
sympy
14
Python
37
test_tensor.py
def test_TensMul_subs(): R3 = TensorIndexType('R3', dim=3) p, q, r = tensor_indices("p q r", R3) K = TensorHead("K", [R3]) V = TensorHead("V", [R3]) C0 = TensorIndex(R3.dummy_name + "_0", R3, True) assert ( K(p)*V(r)*K(-p) ).subs({V(r): K(q)*K(-q)}) == K(p)*K(q)*K(-q)*K(-p) assert ( K(p)*V(r)*K(-p) ).xreplace({V(r): K(q)*K(-q)}) == K(p)*K(q)*K(-q)*K(-p) assert ( K(p)*V(r) ).xreplace({p: C0, V(r): K(q)*K(-q)}) == K(C0)*K(q)*K(-q)
e31e048fe4834f7259193c5e13e7e7b0d5fcd230
236
https://github.com/sympy/sympy.git
79
def test_TensMul_subs(): R3 = TensorIndexType('R3', dim=3) p, q, r = tensor_indices("p q r", R3) K = TensorHead("K", [R3]) V = TensorHead("V", [R3])
16
386
test_TensMul_subs
89
1
5
12
python/ray/tests/kuberay/test_autoscaling_config.py
135,330
[autoscaler][kuberay] Never request more than maxReplicas worker pods (#29770) Partially addresses ray-project/kuberay#560, in which it was observed that "replicas" was being set higher than "maxReplicas" in the KubeRay CR. Applies a surface-level fix by making sure that the autoscaler does not set replicas higher the maxReplicas when creating nodes. Signed-off-by: Dmitri Gekhtman <dmitri.m.gekhtman@gmail.com>
ray
12
Python
57
test_autoscaling_config.py
def test_cr_image_consistency(): cr = get_basic_ray_cr() group_specs = [cr["spec"]["headGroupSpec"]] + cr["spec"]["workerGroupSpecs"] # Head, CPU group, GPU group. assert len(group_specs) == 3 ray_containers = [ group_spec["template"]["spec"]["containers"][0] for group_spec in group_specs ] # All Ray containers in the example config have "ray-" in their name. assert all("ray-" in ray_container["name"] for ray_container in ray_containers) # All Ray images are from the Ray repo. assert all( "rayproject/ray" in ray_container["image"] for ray_container in ray_containers ) # All Ray images are the same. assert len({ray_container["image"] for ray_container in ray_containers}) == 1 @pytest.mark.parametrize("exception", [Exception, requests.HTTPError]) @pytest.mark.parametrize("num_exceptions", range(6))
9c9977f814facdebc1828fa576531fc95f553172
@pytest.mark.parametrize("exception", [Exception, requests.HTTPError]) @pytest.mark.parametrize("num_exceptions", range(6))
101
https://github.com/ray-project/ray.git
143
def test_cr_image_consistency(): cr = get_basic_ray_cr() group_specs = [cr["spec"]["headGroupSpec"]] + cr["spec"]["workerGroupSpecs"] # Head, CPU group, GPU group. assert len(group_specs) == 3 ray_containers = [ group_spec["template"]["spec"]["containers"][0] for group_spec in group_specs ] # All Ray containers in the example config have "ray-" in their name. assert all("ray-" in ray_container["name"] for ray_container in ray_containers) # All Ray images are from the Ray repo. assert all( "rayproject/
16
229
test_cr_image_consistency
19
1
1
3
tests/freqai/test_freqai_interface.py
151,238
skip darwin in RL tests, remove example scripts, improve doc
freqtrade
8
Python
18
test_freqai_interface.py
def is_mac() -> bool: machine = platform.system() return "Darwin" in machine @pytest.mark.parametrize('model', [ 'LightGBMRegressor', 'XGBoostRegressor', 'CatboostRegressor', 'ReinforcementLearner', 'ReinforcementLearner_multiproc' ])
eeebb78a5c772b0c3e569fd476587facb1f8a9dc
@pytest.mark.parametrize('model', [ 'LightGBMRegressor', 'XGBoostRegressor', 'CatboostRegressor', 'ReinforcementLearner', 'ReinforcementLearner_multiproc' ])
17
https://github.com/freqtrade/freqtrade.git
41
def is_mac() -> bool: machine = platform.system() return "Darwin" in machine @pytest.mark.parametrize('model', [ 'LightGBMReg
8
71
is_mac
65
0
6
20
src/sentry/eventstore/models.py
85,387
feat(perf_issues): Add `GroupEvent` and split some functionality in `Event` into a base class. (#38143) Since we can now have events with multiple groups, we can no longer rely on the `Event.group` property. This pr adds in a `GroupEvent` subclass that should be passed around wherever we expect an event to have a single `Group` associated with it. `Event` has been split up into `BaseEvent` and `Event`. We will deprecate and remove uses of `group_id` and `group` in the `Event` class going forward. If we need an event with a `Group`, we can use `build_group_events` to fetch all `GroupEvents` associated with the `Event`, or `for_group` if we just need a specific `Event`/`Group` pairing. Going forward, the plan is to store all groups in the `groups` property. This means that error events being sent via eventstream will have their group included in `groups` as well. We'll need to update the errors processor in snuba to look there instead of `group_id`. This seems cleaner long term, instead of having both `group_id` and `group_ids` passed through. To figure out where we need to use `build_group_events` and `for_group` we can do a mix of searching the codebase and commenting out the `group_id` and `group` properties and see how CI goes.
sentry
14
Python
43
models.py
def groups(self) -> Sequence[Group]: from sentry.models import Group if getattr(self, "_groups_cache"): return self._groups_cache if self._group_ids is not None: group_ids = self._group_ids else: snuba_group_id = self.group_id # TODO: Replace `snuba_group_id` with this once we deprecate `group_id`. # snuba_group_id = self._snuba_data.get(self._get_column_name(Columns.GROUP_ID)) snuba_group_ids = self._snuba_data.get(self._get_column_name(Columns.GROUP_IDS)) group_ids = [] if snuba_group_id: group_ids.append(snuba_group_id) if snuba_group_ids: group_ids.extend(snuba_group_ids) if group_ids: groups = list(Group.objects.filter(id__in=group_ids)) else: groups = [] self._groups_cache = groups return groups
6aaaf5089b2c39757883179df5a8512db3b0c716
118
https://github.com/getsentry/sentry.git
271
def groups(self) -> Sequence[Group]: from sentry.models import Group if getattr(self, "_groups_cache"): return self._groups_cache if self._group_ids is not None: group_ids = self._group_ids else: snuba_group_id = self.group_id # TODO: Replace `snuba_group_id` with this once we deprecate `group_id`. # snuba_group_id = self._snuba_data.get(self._get_column_name(C
24
194
groups
12
0
1
4
src/datasets/iterable_dataset.py
105,911
Multiprocessed dataset builder [WIP] (#5107) * multiprocessing-compatible naming scheme and refactor * multiprocessed shard writing for GeneratorBasedBuilder * multiprocessed shard writing for ArrowBasedBuilder * style * multiprocessed dataset loading * compatibility with non-sharded datasets * bugfix * bugfix * removed unused import * fixed bad ordering * less misleading tqdm * fix gen_kwargs distribution + read shards * minor * minor2 * support beam datasets * docstrings + minor * add iflatmap_unordered for parallel write & progress updates * use 1 tqdm bar receiving updates from subprocesses * docs * add test_iflatmap_unordered * style * test arrow_reader.py * fix test_iflatmap_unordered * add Beam test_download_and_prepare_sharded * test gen_kwargs distribution * test download_and_prepare with num_proc * style * improve test * don't close the pool * fix multiprocessing on windows * keep multiprocessing disabled by default * again + docs * more docs * more docs * some var renaming * style * Apply suggestions from code review Co-authored-by: Mario Šaško <mariosasko777@gmail.com> * Apply suggestions from code review Co-authored-by: Mario Šaško <mariosasko777@gmail.com> * added utils/sharding.py * style * style Co-authored-by: Quentin Lhoest <lhoest.q@gmail.com> Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com> Co-authored-by: Mario Šaško <mariosasko777@gmail.com>
datasets
9
Python
11
iterable_dataset.py
def __iter__(self): rng = deepcopy(self.generator) kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs) yield from self.generate_examples_fn(**kwargs_with_shuffled_shards)
2945690ea731f85a356220a71cdc630281c676f4
33
https://github.com/huggingface/datasets.git
40
def __iter__(self): rng = deepcopy(self.generator) kwargs_with_shuff
9
56
__iter__
126
0
1
18
tests/utils/test_cache.py
187,056
chore: remove unnecessary collection.OrderedDict - Replace collection.OrderedDict with builtins.dict where possible: Python 3.7+ ensures the correct order in builtins.dict objects and is no longer an implementation detail of cpython. - Fix OrderedDict type annotation in streamlink.utils.cache.LRUCache - Add unit test for streamlink.utils.cache.LRUCache
streamlink
11
Python
64
test_cache.py
def test_lru_cache(): cache = LRUCache(num=3) assert cache.get("foo") is None, "Getter returns None for unknown items" cache.set("foo", "FOO") assert list(cache.cache.items()) == [("foo", "FOO")], "Setter adds new items" assert cache.get("foo") == "FOO", "Getter returns correct value of known items" cache.set("bar", "BAR") cache.set("baz", "BAZ") cache.set("qux", "QUX") assert list(cache.cache.items()) == [("bar", "BAR"), ("baz", "BAZ"), ("qux", "QUX")], "Setter respects max queue size" cache.get("bar") assert list(cache.cache.items()) == [("baz", "BAZ"), ("qux", "QUX"), ("bar", "BAR")], "Getter moves known items to the end" cache.get("unknown") assert list(cache.cache.items()) == [("baz", "BAZ"), ("qux", "QUX"), ("bar", "BAR")], "Getter keeps order on unknown items" cache.set("foo", "FOO") assert list(cache.cache.items()) == [("qux", "QUX"), ("bar", "BAR"), ("foo", "FOO")], "Setter moves new items to the end" cache.set("qux", "QUUX") assert list(cache.cache.items()) == [("bar", "BAR"), ("foo", "FOO"), ("qux", "QUUX")], "Setter moves known items to the end"
6325c74e6869b45051ec111e4243d77cc536ba66
280
https://github.com/streamlink/streamlink.git
176
def test_lru_cache(): cache = LRUCache(num=3) assert cache.get("foo") is None, "Getter returns None for unknown items" cache.set("foo", "FOO") assert list(cache.cache.items()) == [("foo", "FOO")
8
515
test_lru_cache
9
0
2
2
fastai/gen_doc/nbtest.py
190,355
Upgrading to support latest Pytorch version
DeOldify
9
Python
9
nbtest.py
def get_qualname(elt): return elt.__qualname__ if hasattr(elt, '__qualname__') else fn_name(elt)
4fc3616712edb19179b17dd270ad6cf63abf99c2
21
https://github.com/jantic/DeOldify.git
11
def get_qualname(elt): return elt.__qualname__
5
34
get_qualname
75
0
1
31
networkx/generators/small.py
176,242
Use from_dict_of_lists instead of make_small_graph in generators.small (#5267) * Add test for digraph creation behavior. * Use from_dict_of_lists instead of make_small_graph * Make sure generators don't support digraph. * Rm redundant create_using check.
networkx
11
Python
66
small.py
def truncated_cube_graph(create_using=None): G = nx.from_dict_of_lists( { 0: [1, 2, 4], 1: [11, 14], 2: [3, 4], 3: [6, 8], 4: [5], 5: [16, 18], 6: [7, 8], 7: [10, 12], 8: [9], 9: [17, 20], 10: [11, 12], 11: [14], 12: [13], 13: [21, 22], 14: [15], 15: [19, 23], 16: [17, 18], 17: [20], 18: [19], 19: [23], 20: [21], 21: [22], 22: [23], }, create_using=create_using, ) G.name = "Truncated Cube Graph" return G
7669e7f2f31485015f3ea7cdd535e086467fa433
193
https://github.com/networkx/networkx.git
364
def truncated_cube_graph(create_using=None): G = nx.from_dict_of_lists( { 0: [1, 2, 4], 1: [11, 14], 2: [3, 4], 3: [6, 8], 4: [5],
6
260
truncated_cube_graph
367
1
12
76
src/prefect/cli/profile.py
58,520
Remove extra "f" (#6384)
prefect
26
Python
122
profile.py
async def check_orion_connection(profile_name): with use_profile(profile_name, include_current_context=False): httpx_settings = dict(timeout=3) try: # attempt to infer Cloud 2.0 API from the connection URL cloud_client = get_cloud_client( httpx_settings=httpx_settings, infer_cloud_url=True ) res = await cloud_client.api_healthcheck() exit_method, msg = ( exit_with_success, f"Connected to Prefect Cloud using profile {profile_name!r}", ) except CloudUnauthorizedError: # if the Cloud 2.0 API exists and fails to authenticate, notify the user exit_method, msg = ( exit_with_error, f"Error authenticating with Prefect Cloud using profile {profile_name!r}", ) except httpx.HTTPStatusError as exc: if exc.response.status_code == status.HTTP_404_NOT_FOUND: # if the route does not exist, attmpt to connect as a hosted Orion instance try: # inform the user if Prefect Orion endpoints exist, but there are # connection issues client = get_client(httpx_settings=httpx_settings) connect_error = await client.api_healthcheck() if connect_error is not None: exit_method, msg = ( exit_with_error, f"Error connecting to Prefect Orion using profile {profile_name!r}", ) elif await client.using_ephemeral_app(): # if the client is using an ephemeral Orion app, inform the user exit_method, msg = ( exit_with_success, f"No Prefect Orion instance specified using profile {profile_name!r}. " f"Flow run metadata will be stored at the locally configured database: {prefect.settings.PREFECT_ORION_DATABASE_CONNECTION_URL.value()}", ) else: exit_method, msg = ( exit_with_success, f"Connected to Prefect Orion using profile {profile_name!r}", ) except Exception as exc: exit_method, msg = ( exit_with_error, f"Error connecting to Prefect Orion using profile {profile_name!r}", ) else: exit_method, msg = ( exit_with_error, f"Error connecting to Prefect Cloud: {exc!r}", ) except TypeError: # if no Prefect Orion API URL has been set, httpx will throw a TypeError try: # try to connect with the client anyway, it will likely use an # ephemeral Orion instance client = get_client(httpx_settings=httpx_settings) connect_error = await client.api_healthcheck() if connect_error is not None: exit_method, msg = ( exit_with_error, f"Error connecting to Prefect Orion using profile {profile_name!r}", ) elif await client.using_ephemeral_app(): exit_method, msg = ( exit_with_success, f"No Prefect Orion instance specified using profile {profile_name!r}. " f"Flow run metadata will be stored at the locally configured database: {prefect.settings.PREFECT_ORION_DATABASE_CONNECTION_URL.value()}", ) else: exit_method, msg = ( exit_with_success, f"Connected to Prefect Orion using profile {profile_name!r}", ) except Exception as exc: exit_method, msg = ( exit_with_error, f"Error connecting to Prefect Orion using profile {profile_name!r}", ) except (httpx.ConnectError, httpx.UnsupportedProtocol) as exc: exit_method, msg = exit_with_error, "Invalid Prefect API URL" return exit_method, msg @profile_app.command()
2f22824cd7af9bb89c103698c05036f2542caff1
@profile_app.command()
298
https://github.com/PrefectHQ/prefect.git
1,781
async def check_orion_connection(profile_name): with use_profile(profile_name, include_current_context=False): httpx_settings = dict(timeout=3) try: # attempt to infer Cloud 2.0 API from the connection URL cloud_client = get_cloud_client( httpx_settings=httpx_settings, infer_cloud_url=True ) res = await cloud_client.api_healthcheck() exit_method, msg = ( exit_with_success, f"Connected to Prefect Cloud using profile {profile_name!r}", ) except CloudUnauthorizedError: # if the Cloud 2.0 API exists and fails to authenticate, notify the user exit_method, msg = ( exit_with_error, f"Error authenticating with Prefect Cloud using profile {profile_name!r}", ) except httpx.HTTPStatusError as exc: if exc.response.status_code == status.HTTP_404_NOT_FOUND: # if the route does not exist, attmpt to connect as a hosted Orion instance try: # inform the user if Prefect Orion endpoints exist, but there are # connection issues client = get_client(httpx_settings=httpx_settings) connect_error = await client.api_healthcheck() if connect_error is not None: exit_method, msg = ( exit_with_error, f"Error connecting to Prefect Orion using profile {profile_name!r}", ) elif await client.using_ephemeral_app(): # if the client is using an ephemeral Orion app, inform the user exit_method, msg = ( exit_with_success, f"No Prefect Orion instance specified using profile {profile_name!r}. " f"Flow run metadata will be stored at the locally configured database: {prefect.settings.PREFECT_ORION_DATABASE_CONNECTION_URL.value()}", ) else: exit_method, msg = ( exit_with_success, f"Connected to Prefect Orion using profile {profile_name!r}", ) except Exception as exc: exit_method, msg = ( exit_with_error, f"Error connecting to Prefect Orion using profile {profile_name!r}", ) else: exit_method, msg = ( exit_with_error, f"Error connecting to Prefect Cloud: {exc!r}", ) except TypeError: # if no Prefect Orion API URL has been set, httpx will throw a TypeError try: # try to connect with the client anyway, it will likely use an # ephemeral Orion instance client = get_client(httpx_settings=httpx_settings) connect_error = await client.api_healthcheck() if connect_error is not None: exit_method, msg = ( exit_with_error, f"Error connecting to Prefect Orion using profile {profile_name!r}", ) elif await client.using_ephemeral_app(): exit_method, msg = ( exit_with_success, f"No Prefect Orion instance specified using profile {profile_name!r}. " f"Flow run metadata will be stored at the locally configured database: {prefect.settings.PREFECT_ORION_DATABASE_CONNECTION_URL.value()}", ) else: exit_method, msg = ( exit_with_success, f"Connected to Prefect Orion using profile {profile_name!r}", ) except Exception as exc: exit_method, msg = ( exit_with_er
38
585
check_orion_connection
13
0
2
3
src/prefect/utilities/logging.py
53,044
Update src/prefect/utilities/logging.py Co-authored-by: Michael Adkins <madkinszane@gmail.com>
prefect
13
Python
13
logging.py
def process(self, msg, kwargs): kwargs["extra"] = {**self.extra, **(kwargs.get("extra") or {})} return (msg, kwargs)
3a2d581ec0540dab8efc5e30c1bc10dfa321f2b5
39
https://github.com/PrefectHQ/prefect.git
26
def process(self, msg, kwargs): kwargs["extra"] = {**self.extra, **(kwarg
6
62
process
7
0
1
3
keras/legacy_tf_layers/variable_scope_shim_test.py
274,469
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
11
Python
7
variable_scope_shim_test.py
def call(self, inputs): with tf.compat.v1.variable_scope("foo"): return self.scale_by_y(inputs)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
26
https://github.com/keras-team/keras.git
24
def call(self, inputs): with tf.compat.v1.variable_scope(
8
45
call
179
0
10
37
test/test_prototype_transforms.py
194,353
rename features._Feature to datapoints._Datapoint (#7002) * rename features._Feature to datapoints.Datapoint * _Datapoint to Datapoint * move is_simple_tensor to transforms.utils * fix CI * move Datapoint out of public namespace
vision
13
Python
72
test_prototype_transforms.py
def test__get_params(self, padding, pad_if_needed, size, mocker): image = mocker.MagicMock(spec=datapoints.Image) image.num_channels = 3 image.spatial_size = (24, 32) h, w = image.spatial_size transform = transforms.RandomCrop(size, padding=padding, pad_if_needed=pad_if_needed) params = transform._get_params([image]) if padding is not None: if isinstance(padding, int): pad_top = pad_bottom = pad_left = pad_right = padding elif isinstance(padding, list) and len(padding) == 2: pad_left = pad_right = padding[0] pad_top = pad_bottom = padding[1] elif isinstance(padding, list) and len(padding) == 4: pad_left, pad_top, pad_right, pad_bottom = padding h += pad_top + pad_bottom w += pad_left + pad_right else: pad_left = pad_right = pad_top = pad_bottom = 0 if pad_if_needed: if w < size[1]: diff = size[1] - w pad_left += diff pad_right += diff w += 2 * diff if h < size[0]: diff = size[0] - h pad_top += diff pad_bottom += diff h += 2 * diff padding = [pad_left, pad_top, pad_right, pad_bottom] assert 0 <= params["top"] <= h - size[0] + 1 assert 0 <= params["left"] <= w - size[1] + 1 assert params["height"] == size[0] assert params["width"] == size[1] assert params["needs_pad"] is any(padding) assert params["padding"] == padding
a8007dcdfb5159a711fa343d2ac4bb7df826975f
308
https://github.com/pytorch/vision.git
558
def test__get_params(self, padding, pad_if_needed, size, mocker): image = mocker.MagicMock(spec=datapoints.Image) image.num_channels = 3 image.spatial_size = (24, 32) h, w = image.spatial_size transform = transforms.RandomCrop(size, padding=padding, pad_if_needed=pad_if_needed) params = transform._get_params([image]) if padding is not None: if isinstance(padding, int): pad_top = pad_bottom = pad_left = pad_right = padding elif isinstance(padding, list) and len(padding) == 2:
30
470
test__get_params
134
1
1
48
tests/components/mqtt/test_discovery.py
288,080
Move MQTT discovery hass.data globals to dataclass (#78706) * Add MQTT discovery hass.data globals to dataclass * isort * Additional rework * Add hass.data["mqtt_tags"] to dataclass * Follow-up comment * Corrections
core
9
Python
73
test_discovery.py
async def test_discovery_expansion(hass, mqtt_mock_entry_no_yaml_config, caplog): await mqtt_mock_entry_no_yaml_config() data = ( '{ "~": "some/base/topic",' ' "name": "DiscoveryExpansionTest1",' ' "stat_t": "test_topic/~",' ' "cmd_t": "~/test_topic",' ' "availability": [' " {" ' "topic":"~/avail_item1",' ' "payload_available": "available",' ' "payload_not_available": "not_available"' " }," " {" ' "topic":"avail_item2/~",' ' "payload_available": "available",' ' "payload_not_available": "not_available"' " }" " ]," ' "dev":{' ' "ids":["5706DF"],' ' "name":"DiscoveryExpansionTest1 Device",' ' "mdl":"Generic",' ' "hw":"rev1",' ' "sw":"1.2.3.4",' ' "mf":"None",' ' "sa":"default_area"' " }" "}" ) async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", data) await hass.async_block_till_done() state = hass.states.get("switch.DiscoveryExpansionTest1") assert state.state == STATE_UNAVAILABLE async_fire_mqtt_message(hass, "avail_item2/some/base/topic", "available") await hass.async_block_till_done() state = hass.states.get("switch.DiscoveryExpansionTest1") assert state is not None assert state.name == "DiscoveryExpansionTest1" assert ("switch", "bla") in hass.data["mqtt"].discovery_already_discovered assert state.state == STATE_UNKNOWN async_fire_mqtt_message(hass, "test_topic/some/base/topic", "ON") state = hass.states.get("switch.DiscoveryExpansionTest1") assert state.state == STATE_ON async_fire_mqtt_message(hass, "some/base/topic/avail_item1", "not_available") await hass.async_block_till_done() state = hass.states.get("switch.DiscoveryExpansionTest1") assert state.state == STATE_UNAVAILABLE @patch("homeassistant.components.mqtt.PLATFORMS", [Platform.SWITCH])
84b2c74746b694d217fe6d448a8dfff4bc2d7a9e
@patch("homeassistant.components.mqtt.PLATFORMS", [Platform.SWITCH])
184
https://github.com/home-assistant/core.git
451
async def test_discovery_expansion(hass, mqtt_mock_entry_no_yaml_config, caplog): await mqtt_mock_entry_no_yaml_config() data = ( '{ "~": "some/base/topic",' ' "name": "DiscoveryExpansionTest1",' ' "stat_t": "test_topic/~",' ' "cmd_t": "~/test_topic",' ' "availability": [' " {" ' "topic":"~/avail_item1",' ' "payload_available": "available",' ' "payload_not_available": "not_available"' " }," " {" ' "topic":"avail_item2/~",' ' "payload_available": "available",' ' "payload_not_ava
18
375
test_discovery_expansion
20
0
3
6
pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py
21,493
Vendor in pip 22.1.2
pipenv
10
Python
18
tarfile.py
def __iter__(self): while True: line = self.readline() if not line: break yield line #class ExFileObject #------------------ # Exported Classes #------------------
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
23
https://github.com/pypa/pipenv.git
78
def __iter__(self): while True: line = self.readline() if not line: break
4
47
__iter__
69
1
4
15
pipenv/patched/notpip/_vendor/platformdirs/android.py
20,198
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
pipenv
17
Python
53
android.py
def _android_folder() -> str: try: # First try to get path to android app via pyjnius from jnius import autoclass Context = autoclass("android.content.Context") # noqa: N806 result: str = Context.getFilesDir().getParentFile().getAbsolutePath() except Exception: # if fails find an android folder looking path on the sys.path pattern = re.compile(r"/data/(data|user/\d+)/(.+)/files") for path in sys.path: if pattern.match(path): result = path.split("/files")[0] break else: raise OSError("Cannot find path to android app folder") return result @lru_cache(maxsize=1)
f3166e673fe8d40277b804d35d77dcdb760fc3b3
@lru_cache(maxsize=1)
84
https://github.com/pypa/pipenv.git
189
def _android_folder() -> str: try: # First try to get path to android app via pyjnius from jnius import autoclass Context = autoclass("android.content.Context") # noqa: N806 result: str = Context.getFilesDir().getParentFile().getAbsolutePath() except Exception: # if fails find an android folder looking path on the sys.path pattern = re.compile(r"/data/(data|user/\d+)/(.+)/files") for path in sys.path: if pattern.match(path): re
20
163
_android_folder
112
0
8
21
python/ray/data/tests/test_dataset.py
129,300
[Dataset] [DataFrame 2/n] Add pandas block format implementation (partial) (#20988) This PR adds pandas block format support by implementing `PandasRow`, `PandasBlockBuilder`, `PandasBlockAccessor`. Note that `sort_and_partition`, `combine`, `merge_sorted_blocks`, `aggregate_combined_blocks` in `PandasBlockAccessor` redirects to arrow block format implementation for now. They'll be implemented in a later PR. Co-authored-by: Clark Zinzow <clarkzinzow@gmail.com> Co-authored-by: Eric Liang <ekhliang@gmail.com>
ray
15
Python
61
test_dataset.py
def test_from_pandas_refs(ray_start_regular_shared, enable_pandas_block): ctx = ray.data.context.DatasetContext.get_current() old_enable_pandas_block = ctx.enable_pandas_block ctx.enable_pandas_block = enable_pandas_block try: df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]}) df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]}) ds = ray.data.from_pandas_refs([ray.put(df1), ray.put(df2)]) assert ds._dataset_format( ) == "pandas" if enable_pandas_block else "arrow" values = [(r["one"], r["two"]) for r in ds.take(6)] rows = [(r.one, r.two) for _, r in pd.concat([df1, df2]).iterrows()] assert values == rows # test from single pandas dataframe ref ds = ray.data.from_pandas_refs(ray.put(df1)) assert ds._dataset_format( ) == "pandas" if enable_pandas_block else "arrow" values = [(r["one"], r["two"]) for r in ds.take(3)] rows = [(r.one, r.two) for _, r in df1.iterrows()] assert values == rows finally: ctx.enable_pandas_block = old_enable_pandas_block
4a55d10bb1b70971f50a3872421f2c1eebd84e64
269
https://github.com/ray-project/ray.git
238
def test_from_pandas_refs(ray_start_regular_shared, enable_pandas_block): ctx = ray.data.context.DatasetContext.get_current() old_enable_pandas_block = ctx.enable_pandas_block ctx.enable_pandas_block = enable_pandas_block try: df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]}) df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]}) ds = ray.data.from_pandas_refs([ray.put(df1), ray.put(df2)]) assert ds._dataset_format( ) == "pandas" if enable_pandas_block else "arrow" values = [(r["one"], r["two"]) for r in ds.take(6)] rows = [(r.one, r.two) for _, r in pd.concat([df1, df
27
435
test_from_pandas_refs
8
0
1
3
homeassistant/components/zha/switch.py
301,326
Add configurable zha switch entity (#71784) * add configurable zha switch entity * final zha configurable switch * fix codecov * replaced errorneous cluster with local quirk * test fix * minor changes
core
8
Python
8
switch.py
async def async_turn_on(self, **kwargs) -> None: await self.async_turn_on_off(True)
0c2f22d4780612545c483627da729e44d46ee9fd
18
https://github.com/home-assistant/core.git
22
async def async_turn_on(self, **kwargs) -> None: await self.async_turn_on_off(True)
4
33
async_turn_on
24
0
3
9
homeassistant/components/fibaro/climate.py
291,300
Support hvacsystem in fibaro integration (#78234) fixes undefined
core
14
Python
20
climate.py
def hvac_action(self) -> HVACAction | None: if not self._op_mode_device: return None prop = self._op_mode_device.fibaro_device.properties if "thermostatOperatingState" in prop: with suppress(ValueError): return HVACAction(prop.thermostatOperatingState.lower()) return None
cd2377bc054ebe4c5c0432aac525d768dcfbe57a
51
https://github.com/home-assistant/core.git
96
def hvac_action(self) -> HVACAction | None: if not self._op_mode_device: return None prop = self._op_mode_device.fibaro_device.properties if "thermostatOperatingState" in prop: with suppress(ValueError):
11
89
hvac_action
19
0
1
9
tests/cli/test_cloud.py
59,860
Add login with a browser to `prefect cloud login` (#7334)
prefect
12
Python
18
test_cloud.py
def test_login_with_invalid_key(key, expected_output, respx_mock): respx_mock.get(PREFECT_CLOUD_API_URL.value() + "/me/workspaces").mock( return_value=httpx.Response(status.HTTP_403_FORBIDDEN) ) invoke_and_assert( ["cloud", "login", "--key", key, "--workspace", "foo"], expected_code=1, expected_output=expected_output, )
1a6dee5e9eb71e6e6d1d3492002e9cd674ab9f9b
60
https://github.com/PrefectHQ/prefect.git
58
def test_login_with_invalid_key(key, expected_output, respx_mock): respx_mock.get(PREFECT_CLOUD_API_URL.value() + "/me/workspaces").mock( return_value=httpx.Response(status.HTTP_403_FORBIDDEN) ) invoke_and_assert( ["cloud", "login", "--key", key, "--workspace", "foo"], expected_code=1, expe
15
98
test_login_with_invalid_key
15
0
4
2
fastai/data_block.py
190,254
Upgrading to support latest Pytorch version
DeOldify
12
Python
13
data_block.py
def _decode(df): return np.array([[df.columns[i] for i,t in enumerate(x) if t==1] for x in df.values], dtype=np.object)
4fc3616712edb19179b17dd270ad6cf63abf99c2
46
https://github.com/jantic/DeOldify.git
17
def _decode(df): return np.array([[df.columns[i] for i,t in enumerate(x) if t==1] for x i
12
68
_decode
69
1
6
18
keras/backend.py
269,587
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
16
Python
35
backend.py
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): if ndim(x) == 4 and list(reduction_axes) in [[0, 1, 2], [0, 2, 3]]: if not _has_nchw_support() and list(reduction_axes) == [0, 2, 3]: return _broadcast_normalize_batch_in_training( x, gamma, beta, reduction_axes, epsilon=epsilon ) return _fused_normalize_batch_in_training( x, gamma, beta, reduction_axes, epsilon=epsilon ) else: if sorted(reduction_axes) == list(range(ndim(x)))[:-1]: return _regular_normalize_batch_in_training( x, gamma, beta, reduction_axes, epsilon=epsilon ) else: return _broadcast_normalize_batch_in_training( x, gamma, beta, reduction_axes, epsilon=epsilon ) @keras_export("keras.backend.batch_normalization") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
84afc5193d38057e2e2badf9c889ea87d80d8fbf
@keras_export("keras.backend.batch_normalization") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
154
https://github.com/keras-team/keras.git
232
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): if ndim(x) == 4 and list(reduction_axes) in [[0, 1, 2], [0, 2, 3]]: if not _has_nchw_support() and list(reduction_axes) == [0, 2, 3]: return _broadcast_normalize_batch_in_training( x, gamma, beta, reduction_axes, epsilon=epsilon ) return _fused_
21
245
normalize_batch_in_training
23
0
2
9
mitmproxy/tools/console/grideditor/base.py
252,592
Replace blinker with custom implementation (#5528) * replace blinker with custom implementation The major benefit here is type checking, followed by proper support for async receivers. * fix compatibility with Python 3.9 * fix nits * try harder to force gc * try harderer * coverage++ * coverage++ * nits
mitmproxy
11
Python
21
base.py
def set_current_value(self, val) -> None: errors = self.lst[self.focus][1] emsg = self.editor.is_error(self.focus_col, val) if emsg: signals.status_message.send(message=emsg, expire=5) errors.add(self.focus_col) else: errors.discard(self.focus_col) self.set_value(val, self.focus, self.focus_col, errors)
f4dc2f2cfdb40e04022e4deb4aa67578deff5d23
87
https://github.com/mitmproxy/mitmproxy.git
90
def set_current_value(self, val) -> None: errors = self.lst[self.focus][1] emsg = self.editor.is_error(self.focus_col, val) if emsg: signals.status_message.send(message=emsg, expire=5)
18
132
set_current_value
28
0
2
7
python3.10.4/Lib/calendar.py
221,244
add python 3.10.4 for windows
XX-Net
11
Python
24
calendar.py
def formatmonthname(self, theyear, themonth, withyear=True): if withyear: s = '%s %s' % (month_name[themonth], theyear) else: s = '%s' % month_name[themonth] return '<tr><th colspan="7" class="%s">%s</th></tr>' % ( self.cssclass_month_head, s)
8198943edd73a363c266633e1aa5b2a9e9c9f526
49
https://github.com/XX-net/XX-Net.git
89
def formatmonthname(self, theyear, themonth, withyear=True): if withyear: s = '%s %s' % (month_name[themonth], theyear) else: s = '%s' % month_name[themonth] return '<tr><th colspan="7" class="%s">%s</th></tr>' % ( self.cssclass_month_head, s)
8
80
formatmonthname
10
0
1
4
modules/image/Image_editing/colorization/deoldify/test.py
52,122
update deoldify (#1992) * update deoldify * add clean func * update README * update format
PaddleHub
10
Python
10
test.py
def test_predict1(self): pred_img, out_path = self.module.predict(input='tests/test.jpg') self.assertIsInstance(pred_img, np.ndarray) self.assertIsInstance(out_path, str)
ca09b195daa8033a6f85bccf27362d0b114f9706
37
https://github.com/PaddlePaddle/PaddleHub.git
30
def test_predict1(self): pr
11
60
test_predict1
12
0
1
6
homeassistant/components/axis/device.py
318,150
Improve type hints in axis (#75910)
core
10
Python
12
device.py
async def async_reset(self) -> bool: self.disconnect_from_stream() return await self.hass.config_entries.async_unload_platforms( self.config_entry, PLATFORMS )
8181da70901c6b848ebc2efb2d39a7a3536599f3
29
https://github.com/home-assistant/core.git
51
async def async_reset(self) -> bool: self.disconnect_from_stream() return await self.hass.config_entries.async_unload_platforms( self.config_entry, PLATFORMS
9
50
async_reset
33
0
1
9
tests/test_builder.py
104,939
Set builder name from module instead of class (#4388) * Set builder name from module instead of class * Fix tests * Rename dummy_builder to builder in tests
datasets
15
Python
21
test_builder.py
def test_cache_dir_for_features(self): with tempfile.TemporaryDirectory() as tmp_dir: f1 = Features({"id": Value("int8")}) f2 = Features({"id": Value("int32")}) builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, name="dummy", features=f1) other_builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, name="dummy", features=f1) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, name="dummy", features=f2) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
d6ae1ea3f93a48d03eab78eecf7b6599144143e1
112
https://github.com/huggingface/datasets.git
116
def test_cache_dir_for_features(self): with tempfile.TemporaryDirectory() as tmp_dir: f1 = Features({"id": Value("int8")}) f2 = Features({"id": Value("int32")}) builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, name="dummy", features=f1) other_builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, name="dummy", features=f1) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, name="dummy", features=f2) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
17
188
test_cache_dir_for_features
13
0
2
4
src/sentry/snuba/metrics/query_builder.py
99,664
ref(metrics): Honor snuba group limits without orderBy [TET-5] (#34287) * ref(metrics): Honor snuba group limits without orderBy This PR fixes the else branch to apply similar session V2 limits without explicit orderBy. Essentially how we achieve this now is through the following logic: Let's say fields across the three different entities are requested with a limit of 3, groupBy project and no orderBy clause - If the results of query to entity 1, hits the limit then we use the project groups as filters for subsequent queries - If the results of query to entity 1 do not hit the limit, but results of query 2 does, then we nuke the groups from query 1 that do not exist in query 2 results and apply those as a filter to query 3 - If the results of all three queries to all three entities don't hit the limit, then at the very end, we might end up with an extra number of groups greater than the limit, which is why we nuke the excess groups
sentry
9
Python
12
query_builder.py
def _parse_limit(self, paginator_kwargs) -> Optional[Limit]: if "limit" not in paginator_kwargs: return return Limit(paginator_kwargs["limit"])
1b1e1ed83fa3ee7da1009b927efbd7af94609301
27
https://github.com/getsentry/sentry.git
37
def _parse_limit(self, paginator_kwargs) -> Optional[Limit]: if "limit" not in paginator_kwargs: return return Limit(paginator_kwargs["limit"])
5
45
_parse_limit
51
0
2
16
test/test_examples.py
181,237
Fix bug with gr.update and interactive=True (#2639) * Fix update interactivity * Lint * CHANGELOG * Fix * Undo interactive=True * Do not call update twice * Add unit test * Revert change * Lint
gradio
14
Python
44
test_examples.py
async def test_caching_with_dict(self): text = gr.Textbox() out = gr.Label() io = gr.Interface( lambda _: {text: gr.update(lines=4, interactive=False), out: "lion"}, "textbox", [text, out], examples=["abc"], cache_examples=True, ) prediction = await io.examples_handler.load_from_cache(0) assert not any(d["trigger"] == "fake_event" for d in io.config["dependencies"]) assert prediction == [ {"lines": 4, "__type__": "update", "mode": "static"}, {"label": "lion"}, ]
e6336d688259494205ff4616ff2c03d5460b36bc
124
https://github.com/gradio-app/gradio.git
183
async def test_caching_with_dict(self): text = gr.Textbox() out = gr.Label() io = gr.Interface( lambda _: {text: gr.update(lines=4, interactive=False), out: "lion"}, "textbox", [text, out], examples=["abc"], cache_examples=True, ) prediction = await io.examples_handler.load
21
209
test_caching_with_dict
87
0
14
18
homeassistant/components/sonarr/sensor.py
292,531
Use aiopyarr for sonarr (#65349)
core
13
Python
34
sensor.py
def native_value(self) -> StateType: key = self.entity_description.key if key == "diskspace" and self.data.get(key) is not None: total_free = sum(disk.freeSpace for disk in self.data[key]) free = total_free / 1024**3 return f"{free:.2f}" if key == "commands" and self.data.get(key) is not None: return len(self.data[key]) if key == "queue" and self.data.get(key) is not None: return self.data[key].totalRecords if key == "series" and self.data.get(key) is not None: return len(self.data[key]) if key == "upcoming" and self.data.get(key) is not None: return len(self.data[key]) if key == "wanted" and self.data.get(key) is not None: return self.data[key].totalRecords return None
f30681dae7efffd8980b3ee3ae7f355c603b842c
194
https://github.com/home-assistant/core.git
238
def native_value(self) -> StateType: key = self.entity_description.key if key == "diskspace" and self.data.get(key) is not None: total_free = sum(disk.freeSpace for disk in self.data[key]) free = total_free / 1024**3 return f"{free:.2f}" if key == "commands" and self.data.get(key) is not None: return len(self.data[key]) if key == "queue" and self.data.get(key) is not None: return self.data[key].totalRecords if key == "series" and self.data.get(key) is not No
14
316
native_value
32
0
1
28
tests/providers/google/cloud/operators/test_dataplex.py
46,141
Add Dataplex operators (#20377)
airflow
10
Python
22
test_dataplex.py
def test_execute(self, hook_mock): op = DataplexDeleteTaskOperator( project_id=PROJECT_ID, region=REGION, lake_id=LAKE_ID, dataplex_task_id=DATAPLEX_TASK_ID, task_id="delete_dataplex_task", api_version=API_VERSION, gcp_conn_id=GCP_CONN_ID, delegate_to=DELEGATE_TO, impersonation_chain=IMPERSONATION_CHAIN, ) op.execute(context=None) hook_mock.assert_called_once_with( gcp_conn_id=GCP_CONN_ID, delegate_to=DELEGATE_TO, api_version=API_VERSION, impersonation_chain=IMPERSONATION_CHAIN, ) hook_mock.return_value.delete_task.assert_called_once_with( project_id=PROJECT_ID, region=REGION, lake_id=LAKE_ID, dataplex_task_id=DATAPLEX_TASK_ID, retry=None, timeout=None, metadata=(), )
87c1246b79769f20214a339aadc6a8270d453953
115
https://github.com/apache/airflow.git
300
def test_execute(self, hook_mock): op = DataplexDeleteTaskOperator( project_id=PROJECT_ID, region=REGION, lake_id=LAKE_ID, dataplex_task_id=DATAPLEX_TASK_ID, task_id="delete_dataplex_task", api_version=API_VERSION, gcp_conn_id=GCP_CONN_ID, delegate_to=DELEGATE_TO, impersonation_chain=IMPERSONATION_CHAIN, ) op.execute(context=None) hook_mock.assert_called_once_with( gcp_conn_id=GCP_CONN_ID, delegate_to=DELEGATE_TO, ap
30
161
test_execute
23
1
1
12
tests/gamestonk_terminal/stocks/insider/test_openinsider_view.py
281,863
Tests : Stocks (#1240) * Updating tests : stocks/sector_industry_analysis * Updating tests : stocks/prediction_techniques * Updating tests : doc * Updating tests : black * Updating tests : stocks/sector_industry_analysis * Updating tests : stocks/technical_analysis * Updating tests : etf/technical_analysis * Updating tests : black * Updating tests : stocks/quantitative_analysis * Updating tests : stocks/quantitative_analysis * Updating tests : stocks/options * Updating tests : stocks/options * Updating tests : stocks * Updating tests : black * Updating tests : stocks/prediction_techniques * Updating tests : black * Updating tests : stocks * Updating tests : etf * Updating tests : stocks * Updating tests : black * Updating tests : fundamental_analysis * Updating tests : dark_pool_shorts/finra_model * Updating tests : black * Updating tests : stocks/dark_pook_shorts * Updating tests : stocks/discovery * Updating tests : stocks/insider * Updating tests : stocks * Updating tests : black * Updating tests : stocks/options/yfinance_model * Updating tests : stocks * Updating tests : stocks/insider
OpenBBTerminal
9
Python
21
test_openinsider_view.py
def test_print_insider_filter_no_table(mocker): # MOCK SOUP mocker.patch( target="gamestonk_terminal.stocks.insider.openinsider_view.get_open_insider_link", return_value=None, ) openinsider_view.print_insider_filter( preset_loaded="whales", ticker="", limit=10, links=False, export="", ) @pytest.mark.default_cassette("test_print_insider_data") @pytest.mark.vcr @pytest.mark.parametrize( "color", [True, False], )
379cf31cfe7473c6b5747861bb2ec2dbb9974b5d
@pytest.mark.default_cassette("test_print_insider_data") @pytest.mark.vcr @pytest.mark.parametrize( "color", [True, False], )
43
https://github.com/OpenBB-finance/OpenBBTerminal.git
88
def test_print_insider_filter_no_table(mocker): # MOCK SOUP mocker.patch( target="gamestonk_terminal.stocks.insider.openinsider_view.get_open_insider_link", return_value=None, ) openinsider_vi
17
121
test_print_insider_filter_no_table
100
1
1
11
pandas/tests/scalar/timedelta/test_constructors.py
169,912
REF: _reso->_creso (#49107)
pandas
12
Python
71
test_constructors.py
def test_overflow_on_construction(): # GH#3374 value = Timedelta("1day").value * 20169940 msg = "Cannot cast 1742682816000000000000 from ns to 'ns' without overflow" with pytest.raises(OutOfBoundsTimedelta, match=msg): Timedelta(value) # xref GH#17637 msg = "Cannot cast 139993 from D to 'ns' without overflow" with pytest.raises(OutOfBoundsTimedelta, match=msg): Timedelta(7 * 19999, unit="D") # used to overflow before non-ns support td = Timedelta(timedelta(days=13 * 19999)) assert td._creso == NpyDatetimeUnit.NPY_FR_us.value assert td.days == 13 * 19999 @pytest.mark.parametrize( "val, unit", [ (3508, "M"), (15251, "W"), # 1 (106752, "D"), # change from previous: (2562048, "h"), # 0 hours (153722868, "m"), # 13 minutes (9223372037, "s"), # 44 seconds ], )
90b4add77859d1349530fff3c8cadeef95f36f39
@pytest.mark.parametrize( "val, unit", [ (3508, "M"), (15251, "W"), # 1 (106752, "D"), # change from previous: (2562048, "h"), # 0 hours (153722868, "m"), # 13 minutes (9223372037, "s"), # 44 seconds ], )
89
https://github.com/pandas-dev/pandas.git
200
def test_overflow_on_construction(): # GH#3374 value = Timedelta("1day").value * 20169940 msg = "Cannot cast 1742682816000000000000 from ns to 'ns' without overflow" with pytest.raises(OutOfBoundsTimedelta, match=msg): Timedelta(value) # xref GH#17637 msg = "Cannot cast 139993 from D to 'ns' without overflow" with pytest.raises(OutOfBoundsTimedelta, match=msg): Timedelta(7 * 19999, unit="D") # used to overflow before non-ns support td = Timedelta(timedelta(days=13 * 19999)) assert td._creso == NpyDatetimeUnit.NPY_FR_us.value assert td.days == 13 * 19999 @pytest.mark.parametrize( "val, unit", [ (3508, "M"), (15251, "W"), # 1 (106752, "D"), # change from previous: (2562048, "h"), # 0 hours (153722868, "m"), # 13 minutes (9223372037, "s"), # 44
17
236
test_overflow_on_construction
7
0
1
7
homeassistant/components/motion_blinds/cover.py
294,802
Motion Blinds API lock (#68587)
core
7
Python
7
cover.py
async def async_set_cover_position(self, **kwargs): position = kwargs[ATTR_POSITION]
425b825ae990b054838fea09b86202407d14dae1
44
https://github.com/home-assistant/core.git
21
async def async_set_cover_position(self, **kwargs): position = kwargs[ATTR_POSITION]
5
27
async_set_cover_position
22
0
2
7
django/db/backends/base/base.py
204,804
Refs #33476 -- Reformatted code with Black.
django
12
Python
21
base.py
def dec_thread_sharing(self): with self._thread_sharing_lock: if self._thread_sharing_count <= 0: raise RuntimeError( "Cannot decrement the thread sharing count below zero." ) self._thread_sharing_count -= 1
9c19aff7c7561e3a82978a272ecdaad40dda5c00
27
https://github.com/django/django.git
99
def dec_thread_sharing(self): with self._thread_
5
48
dec_thread_sharing
63
0
10
25
nuitka/OutputDirectories.py
178,706
macOS: Added support for mixing --onefile and --macos-create-app-bundle * For some software, e.g. PySide2 it will actually be the only way to get it working.
Nuitka
19
Python
35
OutputDirectories.py
def getResultFullpath(onefile): result = getResultBasepath(onefile=onefile) if Options.shallMakeModule(): result += getSharedLibrarySuffix(preferred=True) else: output_filename = Options.getOutputFilename() if Options.isOnefileMode() and output_filename is not None: if onefile: result = output_filename else: result = os.path.join( getStandaloneDirectoryPath(), os.path.basename(output_filename), ) elif output_filename is not None: result = output_filename elif getOS() == "Windows": result += ".exe" elif ( not Options.isStandaloneMode() or onefile and not Options.shallCreateAppBundle() ): result += ".bin" return result
053c207229292b7f011937964a69cdf271d47532
123
https://github.com/Nuitka/Nuitka.git
298
def getResultFullpath(onefile): result = getResultBasepath(onefile=onefile) if Options.shallMakeModule(): result += getSharedLibrarySuffix(preferred=True) else: output_filename = Options.getOutputFilename() if Options.isOnefileMode() and output_filename is not None: if onefile: result = output_filename else: result = os.path.join( getStandaloneDirectoryPath(), os.path.basename(output_filename), ) elif output_filename is not None: result = output_filename elif getOS() == "Windows": result += ".exe" elif ( n
19
211
getResultFullpath
150
1
3
30
tests/exchange/test_exchange.py
149,366
Change to precise casing instead of .lower()
freqtrade
20
Python
89
test_exchange.py
async def test__async_kucoin_get_candle_history(default_conf, mocker, caplog): caplog.set_level(logging.INFO) api_mock = MagicMock() api_mock.fetch_ohlcv = MagicMock(side_effect=ccxt.DDoSProtection( "kucoin GET https://openapi-v2.kucoin.com/api/v1/market/candles?" "symbol=ETH-BTC&type=5min&startAt=1640268735&endAt=1640418735" "429 Too Many Requests" '{"code":"429000","msg":"Too Many Requests"}')) exchange = get_patched_exchange(mocker, default_conf, api_mock, id="KuCoin") mocker.patch('freqtrade.exchange.Exchange.name', PropertyMock(return_value='KuCoin')) msg = "Kucoin 429 error, avoid triggering DDosProtection backoff delay" assert not num_log_has_re(msg, caplog) for _ in range(3): with pytest.raises(DDosProtection, match=r'429 Too Many Requests'): await exchange._async_get_candle_history( "ETH/BTC", "5m", (arrow.utcnow().int_timestamp - 2000) * 1000, count=3) assert num_log_has_re(msg, caplog) == 3 caplog.clear() # Test regular non-kucoin message api_mock.fetch_ohlcv = MagicMock(side_effect=ccxt.DDoSProtection( "kucoin GET https://openapi-v2.kucoin.com/api/v1/market/candles?" "symbol=ETH-BTC&type=5min&startAt=1640268735&endAt=1640418735" "429 Too Many Requests" '{"code":"2222222","msg":"Too Many Requests"}')) msg = r'_async_get_candle_history\(\) returned exception: .*' msg2 = r'Applying DDosProtection backoff delay: .*' with patch('freqtrade.exchange.common.asyncio.sleep', get_mock_coro(None)): for _ in range(3): with pytest.raises(DDosProtection, match=r'429 Too Many Requests'): await exchange._async_get_candle_history( "ETH/BTC", "5m", (arrow.utcnow().int_timestamp - 2000) * 1000, count=3) # Expect the "returned exception" message 12 times (4 retries * 3 (loop)) assert num_log_has_re(msg, caplog) == 12 assert num_log_has_re(msg2, caplog) == 9 @pytest.mark.asyncio
39d925c2950aa3c734c454535fef70d89353211e
@pytest.mark.asyncio
243
https://github.com/freqtrade/freqtrade.git
341
async def test__async_kucoin_get_candle_history(default_conf, mocker, caplog): caplog.set_level(logging.INFO) api_mock = MagicMock() api_mock.fetch_ohlcv = MagicMock(side_effect=ccxt.DDoSProtection( "kucoin GET https://openapi-v2.kucoin.com/api/v1/market/candles?" "symbol=ETH-BTC&type=5min&startAt=1640268735&endAt=1640418735" "429 Too Many Requests" '{"code":"429000","msg":"Too Many Requests"}')) exchange = get_patched_exchange(mocker, default_conf, api_mock, id="KuCoin") mocker.patch('freqtrade.exchange.Exchange.name', PropertyMock(return_value='KuCoin')) msg = "Kucoin 429 error, avoid triggering DDosProtection backoff delay" assert not num_log_has_re(msg, caplog) for _ in range(3): with pytest.raises(DDosProtection, match=r'429 Too Many Requests'): await exchange._async_get_candle_history( "ETH/BTC", "5m", (arrow.utcnow().int_timestamp - 2000) * 1000, count=3) assert num_log_has_re(msg, caplog) == 3 caplog.clear() # Test regular non-kucoin message api_mock.fetch_ohlcv = MagicMock(side_effect=ccxt.DDoSProtection( "kucoin GET https://openapi-v2.kucoin.com/api/v1/market/candles?" "symbol=ETH-BTC&type=5min&startAt=1640268735&endAt=1640418735" "429 Too Many Requests" '{"code":"2222222","msg":"Too Many Requests"}')) msg = r'_async_get_candle_history\(\) returned exception: .*' msg2 = r'Applying DDosProtection backoff delay: .*' with patch('freqtrade.exchange.common.asyncio.sleep', get_mock_c
37
423
test__async_kucoin_get_candle_history
9
0
1
5
homeassistant/components/risco/sensor.py
300,807
Clean up accessing entity_registry.async_get_registry helper via hass (#72005)
core
10
Python
9
sensor.py
async def async_added_to_hass(self): self._entity_registry = er.async_get(self.hass) self.async_on_remove( self.coordinator.async_add_listener(self._refresh_from_coordinator) )
69cc6ab5f1d58adc586c3b300a4f7f0cde2cd0c2
33
https://github.com/home-assistant/core.git
48
async def async_added_to_hass(self): self._entity_registry = er.async_get(self.hass) self.async_on_remo
10
57
async_added_to_hass
35
0
5
14
homeassistant/components/london_air/sensor.py
305,438
Improve entity type hints [l] (#77655)
core
15
Python
28
sensor.py
def update(self) -> None: sites_status = [] self._api_data.update() if self._api_data.data: self._site_data = self._api_data.data[self._name] self._updated = self._site_data[0]["updated"] for site in self._site_data: if site["pollutants_status"] != "no_species_data": sites_status.append(site["pollutants_status"]) if sites_status: self._state = max(set(sites_status), key=sites_status.count) else: self._state = None
d1ecd74a1a153b85b829acf45b5c6a5ea79df5c1
104
https://github.com/home-assistant/core.git
166
def update(self) -> None: sites_status = [] self._api_data.update() if self._api_data.data: self._site_data = self._api_data.d
15
173
update
11
0
2
3
python3.10.4/Lib/ast.py
220,187
add python 3.10.4 for windows
XX-Net
10
Python
11
ast.py
def set_precedence(self, precedence, *nodes): for node in nodes: self._precedences[node] = precedence
8198943edd73a363c266633e1aa5b2a9e9c9f526
23
https://github.com/XX-net/XX-Net.git
28
def set_precedence(self, precedence, *nodes): for node in nodes: self._precedences[nod
6
34
set_precedence
46
0
4
19
rllib/algorithms/apex_dqn/apex_dqn.py
135,780
[RLlib] Move all config validation logic into AlgorithmConfig classes. (#29854)
ray
15
Python
39
apex_dqn.py
def update_target_networks(self, num_new_trained_samples) -> None: self._num_ts_trained_since_last_target_update += num_new_trained_samples if ( self._num_ts_trained_since_last_target_update >= self.config["target_network_update_freq"] ): self._num_ts_trained_since_last_target_update = 0 with self._timers[TARGET_NET_UPDATE_TIMER]: to_update = self.workers.local_worker().get_policies_to_train() self.workers.local_worker().foreach_policy_to_train( lambda p, pid: pid in to_update and p.update_target() ) self._counters[NUM_TARGET_UPDATES] += 1 self._counters[LAST_TARGET_UPDATE_TS] = self._counters[ NUM_AGENT_STEPS_TRAINED if self.config.count_steps_by == "agent_steps" else NUM_ENV_STEPS_TRAINED ]
2ed09c54459cc3f74e2dab13406018698559856c
111
https://github.com/ray-project/ray.git
260
def update_target_networks(self, num_new_trained_samples) -> None: self._num_ts_trained_since_last_target_update += num_new_trained_samples if ( self._num_ts_trained_since_last_target_update >= self.config["target_network_update_freq"] ): self._num_ts_trained_since_last_target_update = 0 with self._timers[TARGET_NET_UPDATE_TIMER]: to_update = self.workers.local_worker().get_policies_to_train() self.workers.local_worker().foreach_policy_to_train( lambda p, pid: pid in to_update and p.update_target() ) self._counters[NUM_TARGET_UPDATES] += 1 self._counters[LAST_TARGET_UPDATE_TS] = self._counters[
21
180
update_target_networks
17
0
1
4
saleor/payment/gateways/np_atobarai/tests/test_utils.py
26,106
Port NP Atobarai gateway to 3.1 (#8684) * Port net protections (#8640) to 3.1 * Add NP final code review feedback onto 3.1 * Fix optional sku in NP payload & add docstrings * Refactor tracking_number_updated * Change NetProtections timeout value to 20 * Do not use f-strings in logger warnings * Trace only http requests * Simplify code * Add comment about longer than usual timeout period * Remove order from process payment * Add comment for 400 status code * Reduce scope of Posuto context manager * Refactor voucher and shipping amount for payment lines data * Update PaymentResult.psp_reference type to Optional[str] * Add handler for report error in transaction reregistration * Add docstrings to goods functions * Add FOR_REREGISTRATION payment status * Refactor create_refund_data * Fix refund data * Add docstrings to goods functions * Add prefetch to _create_refund_manual_amount * Move refund logic to NP * Fix billing amount for partial refunds * Fix multiple shipping refunds * Set currency to JPY * WIP fix refunds * Clean up code * Refactor * Fix get_goods_with_refunds for all returned products Co-authored-by: Mateusz Grzyb <grzybmateusz@protonmail.com>
saleor
11
Python
15
test_utils.py
def test_get_fulfillment_for_order_no_refundable_fulfillment(order): # given order.fulfillments.create(tracking_number="123", status=FulfillmentStatus.REFUNDED) # then with pytest.raises(PaymentError, match=r".* not exist .*"): # when get_fulfillment_for_order(order)
bf654a5f958fcf0611b61cf43ac13c886761b80a
38
https://github.com/saleor/saleor.git
42
def test_get_fulfillment_for_order_no_refundable_fulfillment(order): # given order.fulfillments.create(tracking_number="123", status=Fulfillmen
13
67
test_get_fulfillment_for_order_no_refundable_fulfillment
18
0
2
4
.venv/lib/python3.8/site-packages/pip/_internal/cli/parser.py
60,526
upd; format
transferlearning
8
Python
16
parser.py
def format_heading(self, heading): # type: (str) -> str if heading == "Options": return "" return heading + ":\n"
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
18
https://github.com/jindongwang/transferlearning.git
49
def format_heading(self, heading): # type: (str) -> str if heading == "Options":
3
36
format_heading
70
1
1
17
tests/admin_widgets/tests.py
200,799
Refs #33476 -- Reformatted code with Black.
django
11
Python
58
tests.py
def test_m2m_related_model_not_in_admin(self): # M2M relationship with model not registered with admin site. Raw ID # widget should have no magnifying glass link. See #16542 consultor1 = Advisor.objects.create(name="Rockstar Techie") c1 = Company.objects.create(name="Doodle") c2 = Company.objects.create(name="Pear") consultor1.companies.add(c1, c2) rel = Advisor._meta.get_field("companies").remote_field w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site) self.assertHTMLEqual( w.render("company_widget1", [c1.pk, c2.pk], attrs={}), '<input type="text" name="company_widget1" value="%(c1pk)s,%(c2pk)s">' % {"c1pk": c1.pk, "c2pk": c2.pk}, ) self.assertHTMLEqual( w.render("company_widget2", [c1.pk]), '<input type="text" name="company_widget2" value="%(c1pk)s">' % {"c1pk": c1.pk}, ) @override_settings(ROOT_URLCONF="admin_widgets.urls")
9c19aff7c7561e3a82978a272ecdaad40dda5c00
@override_settings(ROOT_URLCONF="admin_widgets.urls")
144
https://github.com/django/django.git
218
def test_m2m_related_model_not_in_admin(self): # M2M relationship with model not registered with admin site. Raw ID # widget should have no magnifying glass link. See #16542 consultor1 = Advisor.objects.create(name="Rockstar Techie") c1 = Company.objects.create(name="Doodle") c2 = Company.objects.create(name="Pear") consultor1.companies.add(c1, c2) rel = Advisor._meta.get_field("companies").remote_field w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site) self.assertHTMLEqual( w.render("co
26
255
test_m2m_related_model_not_in_admin
80
0
9
21
tests/lobpcg_test.py
121,070
Add initial LOBPCG top-k eigenvalue solver (#3112) This initial version is f32-only for accelerators, since it relies on an eigh call (which itself is f32 at most) in its inner loop. For details, see jax.experimental.linalg.standard_lobpcg documentation. This is a partial implementation of the similar [scipy lobpcg function](https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.lobpcg.html).
jax
14
Python
58
lobpcg_test.py
def _make_concrete_cases(f64): dtype = np.float64 if f64 else np.float32 example_names = list(_concrete_generators(dtype)) cases = [] for name in example_names: nkm = [(100, 10, 20)] if not flags.FLAGS.jax_skip_slow_tests: nkm.append((1000, 100, 200)) for n, k, m in nkm: if name == 'ring laplacian': m *= 3 if name.startswith('linear'): m *= 2 if f64: m *= 2 case = [('matrix_name', name), ('n', n), ('k', k), ('m', m)] clean_matrix_name = _clean_matrix_name(name) case.append(('testcase_name', f'{clean_matrix_name}_n{n}')) cases.append(dict(case)) assert len({c['testcase_name'] for c in cases}) == len(cases) return cases
76fcf63fb4e53fd82faece677ed46db8b0c71707
176
https://github.com/google/jax.git
155
def _make_concrete_cases(f64): dtype = np.float64 if f64 else np.float32 example_names = list(_concrete_generators(dtype)) cases = [] for name in example_names: nkm = [(100, 10, 20)] if not flags.FLAGS.jax_skip_slow_tests: nkm.append((1000, 100, 200)) for n, k, m in nkm: if name == 'ring laplacian': m *= 3 if name.startswith('linear'): m *= 2 if f64:
26
283
_make_concrete_cases
8
0
1
5
src/streamlink/plugins/funimationnow.py
187,049
plugins.funimationnow: replace itertags
streamlink
15
Python
8
funimationnow.py
def login_csrf(self): return self.session.http.get(self.login_url, schema=validate.Schema( validate.parse_html(), validate.xml_xpath_string(f".//input[@name='{self.CSRF_NAME}'][1]/@value") ))
b2557361f734304fbd80b4985c753668fed00db5
39
https://github.com/streamlink/streamlink.git
43
def login_csrf(self): return self.session.http.get(self.login_url, schema=validate.Schema( validate.p
12
68
login_csrf
20
0
1
18
tests/test_edgeql_scope.py
176,144
Always include the definition context namespace in computable contexts (#3331) We need to include the *original* source namespace in our ctx namespace when compiling computables. The current mechanism of trying to look up in view_sets or failing that using the source namespace from the computable use, but this was failing to find it in some cases with FOR. Fix this by instead directly pulling in the namespace from qlctx. The inclusion of qlctx's namespace nicely allows us to ditch so later logic as well. Additionally we need to merge the namespace into *both* sides in get_view_map_remapping, to handle cases like referencing a `FOR` variable where the current ns doesn't get merged in. Fixes #3323.
edgedb
16
Python
15
test_edgeql_scope.py
async def test_edgeql_scope_ref_outer_02a(self): await self.assert_query_result( , [{ "cards": [ {"tag": ["Alice"]}, {"tag": ["Alice"]}, {"tag": ["Alice"]}, {"tag": ["Alice"]} ] }], )
0dada08f4eedb104bfa40932b576e44d82218547
53
https://github.com/edgedb/edgedb.git
172
async def test_edgeql_scope_ref_outer_02a(self): await self.assert_query_result( , [{ "cards": [ {"tag": ["Alice"]}, {"tag": ["Alice"]}, {"tag": ["Alice"]}, {"tag": ["Alice"]} ] }], )
3
99
test_edgeql_scope_ref_outer_02a
15
0
3
8
python/ray/ml/preprocessor.py
148,035
[air - preprocessor] Add BatchMapper. (#23700) Add BatchMapper preprocessor. Update the semantics of preprocessor.fit() to allow for multiple fit. This is to follow scikitlearn example. Introduce FitStatus to explicitly incorporate Chain case.
ray
11
Python
15
preprocessor.py
def _check_is_fitted(self) -> bool: fitted_vars = [v for v in vars(self) if v.endswith("_")] return bool(fitted_vars)
06a57b20de12c840406a3bac69751c83a44f008c
32
https://github.com/ray-project/ray.git
36
def _check_is_fitted(self) -> bool: fitted_vars = [v for v in vars(self)
7
55
_check_is_fitted
14
0
2
9
mkdocs/tests/config/config_options_tests.py
225,428
Add tests for new class-based configs The old-style tests are intentionally kept at config_options_legacy_tests.py
mkdocs
13
Python
14
config_options_tests.py
def test_valid_dir(self) -> None: for cls in c.Dir, c.FilesystemObject: with self.subTest(cls): d = os.path.dirname(__file__)
ff8552a57abf2c32f2d0344ef12707b88e008493
82
https://github.com/mkdocs/mkdocs.git
46
def test_valid_dir(self) -> None: for cls in c.Dir, c.FilesystemObject: with self.subTest(cls): d = os.path.dirname(__file__)
12
59
test_valid_dir
7
0
1
3
tests/gamestonk_terminal/etf/test_yfinance_model.py
281,802
ETF tests (#1208) * etf tests for stockanalysis * add financedatabase etf tests * fix financedatabase etf documentation * yfinance etf tests * add etf/discovery tests * add tests to etf/screener * add etf controller tests * add etf/ta tests * remove tabulate and use rich table * etf/pred * add more etf tests, thanks Chavi * update about us website * Updating tests : etf Co-authored-by: Chavithra PARANA <chavithra@gmail.com>
OpenBBTerminal
8
Python
7
test_yfinance_model.py
def test_get_etf_summary_description(recorder, name): result = yfinance_model.get_etf_summary_description(name) recorder.capture(result)
d8ca7556edde9a700706c7802a229cb4439304c5
21
https://github.com/OpenBB-finance/OpenBBTerminal.git
12
def test_get_etf_summary_description(recorder, name): result = yfinance_model.get
7
34
test_get_etf_summary_description
37
0
2
9
src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py
38,499
Add Wav2Vec2Conformer (#16812) * save intermediate * add wav2vec2 conformer * add more code * more * first test passes * make all checkpoints work * update * up * more clean ups * save clean-up * save clean-up * save more * remove bogus * finalize design conformer * remove vision * finish all tests * more changes * finish code * add doc tests * add slow tests * fix autoconfig test * up * correct docstring * up * update * fix * Apply suggestions from code review Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Anton Lozhkov <aglozhkov@gmail.com> * Update docs/source/en/model_doc/wav2vec2-conformer.mdx * upload * save copied from * correct configs * fix model outputs * add to docs * fix imports * finish * finish code * correct copied from * correct again * correct make fix * improve make fix copies * save * correct fix copy from * correct init structure * correct * fix import * apply suggestions Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Anton Lozhkov <aglozhkov@gmail.com>
transformers
17
Python
31
modeling_wav2vec2_conformer.py
def _compute_perplexity(probs, mask=None): if mask is not None: mask_extended = mask.flatten()[:, None, None].expand(probs.shape) probs = torch.where(mask_extended, probs, torch.zeros_like(probs)) marginal_probs = probs.sum(dim=0) / mask.sum() else: marginal_probs = probs.mean(dim=0) perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum() return perplexity
5a9957358cebd616e58b2d1ab3b887c2f2793b45
117
https://github.com/huggingface/transformers.git
108
def _compute_perplexity(probs, mask=None): if mask is not None: mask_extended = mask.flatten()[:, None,
17
180
_compute_perplexity
141
0
1
3
jax/experimental/maps.py
119,207
Add experimental support for SPMD lowering of xmap via MANUAL sharding annotations Note that it's still limited and turns out to be a bit hard (partly due to unclear XLA semantics at this point). Using constants that are not xmap inputs is likely to cause SPMD partitioner errors and cross-replica collectives don't seem to work either. In any case, the next step will be to allow nesting those xmaps inside pjits. PiperOrigin-RevId: 426447989
jax
13
Python
95
maps.py
def _ensure_supports_manual_and(f): def update(v): if v and not hasattr(xc.OpSharding.Type, "MANUAL"): raise RuntimeError("This flag requires a version of jaxlib that supports MANUAL sharding type") return f(v) return update try: config.define_bool_state( name="experimental_xmap_spmd_lowering", default=False, help=("When set, multi-device xmap computations will be compiled through " "the XLA SPMD partitioner instead of explicit cross-replica collectives. " "Not supported on CPU!"), update_global_hook=_clear_compilation_cache, update_thread_local_hook=_thread_local_flag_unsupported) config.define_bool_state( name="experimental_xmap_spmd_lowering_manual", default=False, help=("When set, multi-device xmap computations will be compiled using " "the MANUAL partitioning feature of the XLA SPMD partitioner instead of " "sharding constraints on vectorized code. " "Requires experimental_xmap_spmd_lowering!"), update_global_hook=_ensure_supports_manual_and(_ensure_spmd_and(_clear_compilation_cache)), update_thread_local_hook=_thread_local_flag_unsupported) config.define_bool_state( name="experimental_xmap_ensure_fixed_sharding", default=False, help=("When set and `experimental_xmap_spmd_lowering` is enabled, the lowering will " "try to limit the flexibility of the automated SPMD partitioner heuristics " "by emitting additional sharding annotations for program intermediates."), update_global_hook=_ensure_spmd_and(_clear_compilation_cache), update_thread_local_hook=_thread_local_flag_unsupported) except Exception: raise ImportError("jax.experimental.maps has to be imported before JAX flags " "are parsed")
086a607d8c8ea8487a59d6ced8aaf59834b8846c
9
https://github.com/google/jax.git
326
def _ensure_supports_manual_and(f): def update(v): if v and not hasattr(xc.OpSharding.Type, "MANUAL"): raise RuntimeError("This flag requires a version of jaxlib that supports MANUAL sharding type") return f(v) return update try: config.define_bool_state( name="experimental_xmap_spmd_lowering", default=False, help=("When set, multi-device xmap computations will be compiled through " "the XLA SPMD partitioner instead of explicit cross-replica collectives. " "Not supported on CPU!"), update_global_hook=_clear_compilation_cache, update_thread_local_hook=_thread_local_flag_unsupported)
21
246
_ensure_supports_manual_and
33
0
2
10
test/test_youtube_lists.py
106,199
Fix test_youtube_mix
youtube-dl
12
Python
28
test_youtube_lists.py
def test_youtube_mix(self): dl = FakeYDL() dl.params['format'] = 'best' ie = YoutubeTabIE(dl) result = dl.extract_info('https://www.youtube.com/watch?v=uVJ0Il5WvbE&list=PLhQjrBD2T381k8ul4WQ8SQ165XqY149WW', download=False, ie_key=ie.ie_key(), process=True) entries = (result or {}).get('entries', [{'id': 'not_found', }]) self.assertTrue(len(entries) >= 50) original_video = entries[0] self.assertEqual(original_video['id'], 'uVJ0Il5WvbE')
2c2c2bd348b7dce0aad55a6fc37a18c6f9a000e3
98
https://github.com/ytdl-org/youtube-dl.git
120
def test_youtube_mix(self): dl = FakeYDL()
18
167
test_youtube_mix