n_words
int64
3
1.95k
n_ast_errors
int64
0
2
complexity
int64
1
151
nloc
int64
2
546
path
stringlengths
8
125
id
int64
280
339k
commit_message
stringlengths
3
18.1k
repo
stringlengths
3
28
ast_levels
int64
4
28
language
stringclasses
1 value
vocab_size
int64
3
677
file_name
stringlengths
5
67
code
stringlengths
101
24k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
2.76k
token_counts
int64
7
3.77k
url
stringlengths
31
61
n_whitespaces
int64
4
13.9k
random_cut
stringlengths
21
13.9k
n_identifiers
int64
1
157
n_ast_nodes
int64
10
3.6k
fun_name
stringlengths
3
72
40
0
1
12
corporate/tests/test_stripe.py
84,213
mypy: Enable truthy-bool errors. Signed-off-by: Anders Kaseorg <anders@zulip.com>
zulip
11
Python
35
test_stripe.py
def test_invoice_plan_without_stripe_customer(self) -> None: self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, False, False) plan = get_current_plan_by_realm(get_realm("zulip")) assert plan is not None plan.customer.stripe_customer_id = None plan.customer.save(update_fields=["stripe_customer_id"]) with self.assertRaises(BillingError) as context: invoice_plan(plan, timezone_now()) self.assertRegex( context.exception.error_description, "Realm zulip has a paid plan without a Stripe customer", )
df69e1d9792a5ea7a72e32981f68a46a7fb88ce1
89
https://github.com/zulip/zulip.git
128
def test_invoice_plan_without_stripe_customer(self) -> None: self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, False, False) plan = get_current_plan_by_realm(get_realm("zulip")) assert plan is not None plan.customer.stripe_customer_id = None plan.customer.save(update_fields=["stripe_customer_id"]) with self.assertRaises(BillingError) as context:
21
145
test_invoice_plan_without_stripe_customer
65
0
8
16
modin/config/envvars.py
153,295
REFACTOR-#3900: add flake8-no-implicit-concat plugin and refactor flake8 error codes (#3901) Signed-off-by: jeffreykennethli <jkli@ponder.io>
modin
17
Python
48
envvars.py
def _check_vars(): valid_names = { obj.varname for obj in globals().values() if isinstance(obj, type) and issubclass(obj, EnvironmentVariable) and not obj.is_abstract } found_names = {name for name in os.environ if name.startswith("MODIN_")} unknown = found_names - valid_names if unknown: warnings.warn( f"Found unknown environment variable{'s' if len(unknown) > 1 else ''}," + f" please check {'their' if len(unknown) > 1 else 'its'} spelling: " + ", ".join(sorted(unknown)) ) _check_vars()
e5e9634357e60925a5a70e56a1d4882d269f533a
87
https://github.com/modin-project/modin.git
164
def _check_vars(): valid_names = { obj.varname for obj in globals().values() if isinstance(obj, type) and issubclass(obj, EnvironmentVariable) and not obj.is_abstract } found_names = {name for name in os.environ if name.startswith("MODIN_")} unknown = found_names - valid_names if unknown: warnings.warn( f"Found unknown environment va
22
192
_check_vars
56
0
5
11
scapy/contrib/automotive/scanner/enumerator.py
209,091
Minor refactoring of Automotive-Scanner show functions
scapy
12
Python
43
enumerator.py
def _show_negative_response_information(self, **kwargs): # type: (Any) -> str filtered = kwargs.get("filtered", True) s = "%d negative responses were received\n" % \ len(self.results_with_negative_response) s += "\n" s += self._show_negative_response_details(**kwargs) or "" + "\n" if filtered and len(self.negative_response_blacklist): s += "The following negative response codes are blacklisted: %s\n"\ % [self._get_negative_response_desc(nr) for nr in self.negative_response_blacklist] return s + "\n"
d74d8601575464a017f6e0f0031403b8c18d4429
78
https://github.com/secdev/scapy.git
161
def _show_negative_response_information(self, **kwargs):
12
139
_show_negative_response_information
6
0
1
3
homeassistant/components/github/sensor.py
309,972
Revamp github integration (#64190) Co-authored-by: Paulus Schoutsen <balloob@gmail.com> Co-authored-by: Franck Nijhof <git@frenck.dev> Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
core
9
Python
6
sensor.py
def native_value(self) -> StateType: return self.entity_description.value_fn(self.coordinator.data)
6a0c3843e51085e59d6fb69920733485f2f98fe5
21
https://github.com/home-assistant/core.git
20
def native_value(self) -> StateType: return self.entity_description.value_fn(self.coordinat
7
36
native_value
120
0
1
59
tests/orion/api/test_task_runs.py
55,876
Add sorts for task run name
prefect
17
Python
49
test_task_runs.py
async def test_read_task_runs_applies_sort(self, flow_run, session, client): now = pendulum.now() task_run_1 = await models.task_runs.create_task_run( session=session, task_run=schemas.core.TaskRun( name="Task Run 1", flow_run_id=flow_run.id, task_key="my-key", expected_start_time=now.subtract(minutes=5), dynamic_key="0", ), ) task_run_2 = await models.task_runs.create_task_run( session=session, task_run=schemas.core.TaskRun( name="Task Run 2", flow_run_id=flow_run.id, task_key="my-key", expected_start_time=now.add(minutes=5), dynamic_key="1", ), ) await session.commit() response = await client.post( "/task_runs/filter", json=dict( limit=1, sort=schemas.sorting.TaskRunSort.EXPECTED_START_TIME_DESC.value ), ) assert response.status_code == status.HTTP_200_OK assert response.json()[0]["id"] == str(task_run_2.id) response = await client.post( "/task_runs/filter", json=dict( limit=1, offset=1, sort=schemas.sorting.TaskRunSort.EXPECTED_START_TIME_DESC.value, ), ) assert response.status_code == status.HTTP_200_OK assert response.json()[0]["id"] == str(task_run_1.id) # name asc response = await client.post( "/task_runs/filter", json=dict( limit=1, sort=schemas.sorting.TaskRunSort.NAME_ASC.value, ), ) assert response.status_code == status.HTTP_200_OK assert response.json()[0]["id"] == str(task_run_1.id) # name desc response = await client.post( "/task_runs/filter", json=dict( limit=1, sort=schemas.sorting.TaskRunSort.NAME_DESC.value, ), ) assert response.status_code == status.HTTP_200_OK assert response.json()[0]["id"] == str(task_run_2.id)
d225bb1da80d22b32148b68ebc9ff578bfd85c9b
369
https://github.com/PrefectHQ/prefect.git
755
async def test_read_task_runs_applies_sort(self, flow_run, session, client): now = pendulum.now() task_run_1 = await models.task_runs.create_task_run( session=session, task_run=schemas.core.TaskRun( name="Task Run 1", flow_run_id=flow_run.id, task_key="my-key", expected_start_time=now.subtract(minutes=5), dynamic_key="0", ), ) task_run_2 = await models.task_runs.create_task_run( session=session, task_run=schemas.core.TaskRun( name="Task Run 2", flow_run_id=flow_run.id, task_key="my-key", expected_start_time=now.add(minutes=5), dynamic_key="1", ), ) await session.commit() response = await client.post( "/task_runs/filter", json=dict( limit=1, sort=schemas.sorting.TaskRunSort.EXPECTED_START_TIME_DESC.value ), ) assert response.status_code == status.HTTP_200_OK assert response.json()[0]["id"] == str(task_run_2.id) response = await client.post( "/task_runs/filter", json=dict( limit=1, offset=1, sort=schemas.sorting.TaskRunSort.EXPECTED_START_TIME_DESC.value, ), ) assert response.status_code == status.HTTP_200_OK assert response.json()[0]["id"] == str(task_run_1.id) # name asc response = await client.post(
43
577
test_read_task_runs_applies_sort
12
0
1
6
test/units/parsing/vault/test_vault.py
266,349
Avoid deprecated TestCase functions in unit tests. (#76678) * Avoid deprecated TestCase functions in unit tests. * Add assertRaisesRegex for Python 2.7. * Fix indentation.
ansible
8
Python
12
test_vault.py
def test_odd_length(self): b_data = b'123456789abcdefghijklmnopqrstuvwxyz' self.assertRaisesRegex(vault.AnsibleVaultFormatError, '.*Vault format unhexlify error.*', vault._unhexlify, b_data)
97104f1221b64ef36cf42cb90c5a0eff263a2adb
25
https://github.com/ansible/ansible.git
115
def test_odd_length(self): b_data = b'123456789abcdefghijklmnopqrstuvwxyz' se
7
40
test_odd_length
27
0
1
9
mindsdb/integrations/handlers/jira_handler/tests/test_jira_handler.py
118,383
removing the wrongly commited files and addressing the review comments of PR 4112
mindsdb
9
Python
24
test_jira_handler.py
def setUpClass(cls): cls.kwargs = { "table_name": "project", "jira_url": "https://jira.linuxfoundation.org/", "user_id": "balaceg", "api_key": "4Rhq&Ehd#KV4an!", "jira_query": "project = RELENG and status = 'In Progress'" } cls.handler = JiraHandler('test_jira_handler', cls.kwargs)
e308f43952f3e27d3b48ac28dd3eaffeb26e8ee0
42
https://github.com/mindsdb/mindsdb.git
102
def setUpClass(cls): cls.kwargs = { "table_name": "project", "jira_url": "https://jira.linuxfoundation.org/", "user_id": "balaceg", "api_key": "4Rhq&Ehd#KV4an!", "jira_query": "project = RELENG and status = 'In Progress'" }
5
83
setUpClass
104
0
2
21
tests/rest/client/test_login.py
246,608
Add type hints to `tests/rest/client` (#12066)
synapse
12
Python
80
test_login.py
def test_multi_sso_redirect(self) -> None: # first hit the redirect url, which should redirect to our idp picker channel = self._make_sso_redirect_request(None) self.assertEqual(channel.code, 302, channel.result) location_headers = channel.headers.getRawHeaders("Location") assert location_headers uri = location_headers[0] # hitting that picker should give us some HTML channel = self.make_request("GET", uri) self.assertEqual(channel.code, 200, channel.result) # parse the form to check it has fields assumed elsewhere in this class html = channel.result["body"].decode("utf-8") p = TestHtmlParser() p.feed(html) p.close() # there should be a link for each href returned_idps: List[str] = [] for link in p.links: path, query = link.split("?", 1) self.assertEqual(path, "pick_idp") params = urllib.parse.parse_qs(query) self.assertEqual(params["redirectUrl"], [TEST_CLIENT_REDIRECT_URL]) returned_idps.append(params["idp"][0]) self.assertCountEqual(returned_idps, ["cas", "oidc", "oidc-idp1", "saml"])
64c73c6ac88a740ee480a0ad1f9afc8596bccfa4
188
https://github.com/matrix-org/synapse.git
292
def test_multi_sso_redirect(self) -> None: # first hit the redirect url, which should redirect to our idp picker channel = self._make_sso_redirect_request(None) self.assertEqual(channel.code, 302, channel.result) location_headers = channel.headers.getRawHeaders("Location") assert location_headers uri = location_headers[0] # hitting that picker should give us some HTML channel = self.make_request("GET", uri) self.assertEqual(channel.code, 200, channel.
33
316
test_multi_sso_redirect
89
1
1
4
tests/utilities/test_importtools.py
58,239
Improve tests
prefect
12
Python
52
test_importtools.py
def reset_sys_modules(): original = sys.modules.copy() yield sys.modules = original @pytest.mark.usefixtures("reset_sys_modules") @pytest.mark.parametrize( "working_directory,script_path", [ # Working directory is not necessary for these imports to work (__root_path__, TEST_PROJECTS_DIR / "flat-project" / "explicit_relative.py"), (__root_path__, TEST_PROJECTS_DIR / "flat-project" / "implicit_relative.py"), (__root_path__, TEST_PROJECTS_DIR / "nested-project" / "implicit_relative.py"), # They also work with the working directory set (TEST_PROJECTS_DIR / "flat-project", "explicit_relative.py"), (TEST_PROJECTS_DIR / "flat-project", "implicit_relative.py"), (TEST_PROJECTS_DIR / "nested-project", "implicit_relative.py"), # The tree structure requires the working directory to be at the base of all # module imports (TEST_PROJECTS_DIR / "tree-project", Path("imports") / "implicit_relative.py"), ], )
b950152d8afc439135d5241c27821b1eeaa72a1e
@pytest.mark.usefixtures("reset_sys_modules") @pytest.mark.parametrize( "working_directory,script_path", [ # Working directory is not necessary for these imports to work (__root_path__, TEST_PROJECTS_DIR / "flat-project" / "explicit_relative.py"), (__root_path__, TEST_PROJECTS_DIR / "flat-project" / "implicit_relative.py"), (__root_path__, TEST_PROJECTS_DIR / "nested-project" / "implicit_relative.py"), # They also work with the working directory set (TEST_PROJECTS_DIR / "flat-project", "explicit_relative.py"), (TEST_PROJECTS_DIR / "flat-project", "implicit_relative.py"), (TEST_PROJECTS_DIR / "nested-project", "implicit_relative.py"), # The tree structure requires the working directory to be at the base of all # module imports (TEST_PROJECTS_DIR / "tree-project", Path("imports") / "implicit_relative.py"), ], )
19
https://github.com/PrefectHQ/prefect.git
180
def reset_sys_modules(): original = sys.modules.copy() yield sys.modules = original @pytest.mark.usefixtures("reset_sys_modules") @pytest.mark.parametrize( "working_directory,script_path", [ # Working directory is not necessary for these imports to work (__root_path__, TEST_PROJECTS_DIR / "flat-project" / "explicit_relative.py"), (__root_path__, TEST_PROJECTS_DIR / "flat-project" / "implicit_relative.py"), (__root_path__, TEST_PROJECTS_DIR / "nested-project" / "implicit_relative.py"), # They also work with the working directory set (TEST_PROJECTS_DIR / "flat-project", "explicit_relative.py"),
12
193
reset_sys_modules
175
1
14
41
test/test_extended_models.py
192,711
Clean up purely informational fields from Weight Meta-data (#5852) * Removing `task`, `architecture` and `quantization` * Fix mypy * Remove size field * Remove unused import. * Fix mypy * Remove size from schema list. * update todo * Simplify with assert * Adding min_size to all models. * Update RAFT min size to 128
vision
21
Python
109
test_extended_models.py
def test_schema_meta_validation(model_fn): # TODO: add list of permitted fields classification_fields = ["categories", "acc@1", "acc@5"] defaults = { "all": ["recipe", "num_params", "min_size"], "models": classification_fields, "detection": ["categories", "map"], "quantization": classification_fields + ["backend", "unquantized"], "segmentation": ["categories", "mIoU", "acc"], "video": classification_fields, "optical_flow": [], } model_name = model_fn.__name__ module_name = model_fn.__module__.split(".")[-2] fields = set(defaults["all"] + defaults[module_name]) weights_enum = _get_model_weights(model_fn) if len(weights_enum) == 0: pytest.skip(f"Model '{model_name}' doesn't have any pre-trained weights.") problematic_weights = {} incorrect_params = [] bad_names = [] for w in weights_enum: missing_fields = fields - set(w.meta.keys()) if missing_fields: problematic_weights[w] = missing_fields if w == weights_enum.DEFAULT: if module_name == "quantization": # parameters() count doesn't work well with quantization, so we check against the non-quantized unquantized_w = w.meta.get("unquantized") if unquantized_w is not None and w.meta.get("num_params") != unquantized_w.meta.get("num_params"): incorrect_params.append(w) else: if w.meta.get("num_params") != sum(p.numel() for p in model_fn(weights=w).parameters()): incorrect_params.append(w) else: if w.meta.get("num_params") != weights_enum.DEFAULT.meta.get("num_params"): if w.meta.get("num_params") != sum(p.numel() for p in model_fn(weights=w).parameters()): incorrect_params.append(w) if not w.name.isupper(): bad_names.append(w) assert not problematic_weights assert not incorrect_params assert not bad_names @pytest.mark.parametrize( "model_fn", TM.get_models_from_module(models) + TM.get_models_from_module(models.detection) + TM.get_models_from_module(models.quantization) + TM.get_models_from_module(models.segmentation) + TM.get_models_from_module(models.video) + TM.get_models_from_module(models.optical_flow), ) @run_if_test_with_extended
f1587a20e3169cdd1c8cae5a1067c0ff52d63320
@pytest.mark.parametrize( "model_fn", TM.get_models_from_module(models) + TM.get_models_from_module(models.detection) + TM.get_models_from_module(models.quantization) + TM.get_models_from_module(models.segmentation) + TM.get_models_from_module(models.video) + TM.get_models_from_module(models.optical_flow), ) @run_if_test_with_extended
341
https://github.com/pytorch/vision.git
518
def test_schema_meta_validation(model_fn): # TODO: add list of permitted fields classification_fields = ["categories", "acc@1", "acc@5"] defaults = { "all": ["recipe", "num_params", "min_size"], "models": classification_fields, "detection": ["categories", "map"], "quantization": classification_fields + ["backend", "unquantized"], "segmentation": ["categories", "mIoU", "acc"], "video": classification_fields, "optical_flow": [], } model_name = model_fn.__name__ module_name = model_fn.__module__.split(".")[-2] fields =
45
697
test_schema_meta_validation
19
1
2
9
ludwig/backend/ray.py
8,293
Allow explicitly plumbing through nics (#2605)
ludwig
11
Python
17
ray.py
def create_runner(**kwargs): trainer_kwargs = get_trainer_kwargs(**kwargs) with spread_env(**trainer_kwargs): trainer = Trainer(**trainer_kwargs) trainer.start() try: yield trainer finally: trainer.shutdown() @register_ray_trainer("trainer", MODEL_ECD, default=True)
c99cab3a674e31885e5608a4aed73a64b1901c55
@register_ray_trainer("trainer", MODEL_ECD, default=True)
43
https://github.com/ludwig-ai/ludwig.git
53
def create_runner(**kwargs): trainer_kwargs = g
12
97
create_runner
8
0
1
2
tests/flow_runners/test_base.py
56,067
Splits flow_runners into files by their execution engine. (PrefectHQ/orion#1948) Our `subprocess`, Docker, and Kubernetes runners don't share a lot of behavior, and some of them require complex imports. To make the repo easier to navigate and make room for additional future FlowRunners, I'm splitting flow_runners and their tests into a subpackage. All current imports should be preserved, and we can continue to document them as coming from `prefect.flow_runners`.
prefect
14
Python
8
test_base.py
def test_flow_runner_networks_config_casts_to_list(self, runner_type): assert type(runner_type(networks={"a", "b"}).networks) == list
9a83d0c051e4a461bab8ecc97312fac7c6061d78
25
https://github.com/PrefectHQ/prefect.git
14
def test_flow_runner_networks_config_casts_to_list(self, runner_type): assert t
6
42
test_flow_runner_networks_config_casts_to_list
15
0
1
5
tests/sentry/api/endpoints/test_organization_sentry_functions.py
94,288
Sentry Functions: Endpoint to return list of Sentry Functions (#37626) * feat(integrations): new endpoint for fetching sentry functions for an organization * ref(integrations): add feature flag to gate endpoint * ref(integrations): remove extraneous comment
sentry
11
Python
13
test_organization_sentry_functions.py
def test_get(self): with Feature("organizations:sentry-functions"): response = self.client.get(self.url) assert response.status_code == 200 assert response.data == []
d2ed8bbdfe259eb0f316227a45b2266f41aa9ea0
36
https://github.com/getsentry/sentry.git
54
def test_get(self): with Feature("organizations:sentry-functions
9
62
test_get
78
0
1
31
zerver/tests/test_link_embed.py
83,620
actions: Split out zerver.actions.message_send. Signed-off-by: Anders Kaseorg <anders@zulip.com>
zulip
16
Python
68
test_link_embed.py
def test_youtube_url_title_replaces_url(self) -> None: url = "https://www.youtube.com/watch?v=eSJTXC7Ixgg" with mock_queue_publish("zerver.actions.message_send.queue_json_publish"): msg_id = self.send_personal_message( self.example_user("hamlet"), self.example_user("cordelia"), content=url, ) msg = Message.objects.select_related("sender").get(id=msg_id) event = { "message_id": msg_id, "urls": [url], "message_realm_id": msg.sender.realm_id, "message_content": url, } mocked_data = {"title": "Clearer Code at Scale - Static Types at Zulip and Dropbox"} self.create_mock_response(url) with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES): with self.assertLogs(level="INFO") as info_logs: with mock.patch( "zerver.lib.markdown.link_preview.link_embed_data_from_cache", lambda *args, **kwargs: mocked_data, ): FetchLinksEmbedData().consume(event) self.assertTrue( "INFO:root:Time spent on get_link_embed_data for https://www.youtube.com/watch?v=eSJTXC7Ixgg:" in info_logs.output[0] ) msg.refresh_from_db() expected_content = f self.assertEqual(expected_content, msg.rendered_content)
975066e3f0e3d4c3a356da3dfc1d4472f72717b7
181
https://github.com/zulip/zulip.git
415
def test_youtube_url_title_replaces_url(self) -> None: url = "https://www.youtube.com/watch?v=eSJTXC7Ixgg" with mock_queue_publish("zerver.actions.message_send.queue_json_publish"): msg_id = self.send_personal_message( self.example_user("hamlet"), self.example_user("cordelia"), content=url, ) msg = Message.objects.select_related("sender").get(id=msg_id) event = { "message_id": msg_id, "urls": [url], "message_realm_id": msg.sender.realm_id, "message_content": ur
39
329
test_youtube_url_title_replaces_url
7
0
1
3
airbyte-integrations/connectors/source-salesforce/integration_tests/integration_test.py
3,980
🐛 Fix Python checker configs and Connector Base workflow (#10505)
airbyte
11
Python
7
integration_test.py
def _encode_content(text): base64_bytes = base64.b64encode(text.encode("utf-8")) return base64_bytes.decode("utf-8")
bbd13802d81263d5677a4e8599d0b8708889719d
25
https://github.com/airbytehq/airbyte.git
12
def _encode_content(text): base64_bytes = base64.b64encode(text.encode("utf-8")) return base64_bytes.decode("utf-8"
7
45
_encode_content
14
0
1
4
dev/breeze/tests/test_commands.py
48,141
Seperate provider verification as standalone breeze command (#23454) This is another step in simplifying and converting to Python all of the CI/local development tooling. This PR separates out verification of providers as a separate breeze command `verify-provider-packages`. It was previously part of "prepare_provider_packages.py" but it has been now extracted to a separate in-container python file and it was wrapped with breeze's `verify-provider-packages` command. No longer provider verification is run with "preparing provider docs" nor "preparing provider packages" - it's a standaline command. This command is also used in CI now to run the tests: * all provider packages are built and created on CI together with   airflow version * the packages are installed inside the CI image and providers are verified * the 2.1 version of Airflow is installed together with all 2.1 - compatible providers and provider verification is run there too. This all is much simpler now - we got rediof some 500 lines of bash code again in favour of breeze python code. Fixes: #23430
airflow
9
Python
13
test_commands.py
def test_get_extra_docker_flags_all(): flags = get_extra_docker_flags(MOUNT_ALL) assert "empty" not in "".join(flags) assert len(flags) < 10
3ed07474649b1e202f9b106105fef21f7b2cfddc
27
https://github.com/apache/airflow.git
22
def test_get_extra_docker_flags_all(): flags = get_extra_docker_flags(MOUNT_ALL) assert "empty" not in "".join(flags) assert len
6
48
test_get_extra_docker_flags_all
51
0
1
13
tests/snuba/api/endpoints/test_organization_group_index.py
90,269
ref(tests): Remove `get_valid_response()` (#34822)
sentry
12
Python
40
test_organization_group_index.py
def test_basic_ignore(self): group = self.create_group(status=GroupStatus.RESOLVED) snooze = GroupSnooze.objects.create(group=group, until=timezone.now()) self.login_as(user=self.user) assert not GroupHistory.objects.filter( group=group, status=GroupHistoryStatus.IGNORED ).exists() response = self.get_success_response(qs_params={"id": group.id}, status="ignored") # existing snooze objects should be cleaned up assert not GroupSnooze.objects.filter(id=snooze.id).exists() group = Group.objects.get(id=group.id) assert group.status == GroupStatus.IGNORED assert GroupHistory.objects.filter(group=group, status=GroupHistoryStatus.IGNORED).exists() assert response.data == {"status": "ignored", "statusDetails": {}, "inbox": None}
096b5511e244eecd8799b2a0324655207ce8985e
169
https://github.com/getsentry/sentry.git
145
def test_basic_ignore(self): group = self.create_group(status=GroupStatus.RESOLVED) snooze = GroupSnooze.objects.create(group=grou
28
274
test_basic_ignore
23
0
1
3
tests/infrastructure/test_process.py
57,424
Add tests for process
prefect
16
Python
21
test_process.py
async def test_process_runs_command(tmp_path): # Perform a side-effect to demonstrate the command is run assert await Process(command=["touch", str(tmp_path / "canary")]).run() assert (tmp_path / "canary").exists()
cb53fb90654e3adfef19e58a42be16228d0695ec
36
https://github.com/PrefectHQ/prefect.git
31
async def test_process_runs_command(tmp_path): # Perform a side-effect to demonstrate the command is run assert await Process(command=["touch", str(tmp_path / "canary")]).run() assert (tmp_path / "canary").exists()
7
66
test_process_runs_command
18
0
1
6
yt_dlp/extractor/murrtube.py
162,581
[murrtube] Add extractor (#2387) Authored by: cyberfox1691
yt-dlp
13
Python
17
murrtube.py
def _download_gql(self, video_id, op, note=None, fatal=True): result = self._download_json( 'https://murrtube.net/graphql', video_id, note, data=json.dumps(op).encode(), fatal=fatal, headers={'Content-Type': 'application/json'}) return result['data']
812283199a2f05046b9b4d59c22a06051b958bf6
59
https://github.com/yt-dlp/yt-dlp.git
64
def _download_gql(self, video_id, op, note=None, fatal=True): result = self._download_json( 'https://mu
13
91
_download_gql
58
0
4
23
tests/providers/cncf/kubernetes/operators/test_kubernetes_pod.py
44,995
Switch XCom implementation to use run_id (#20975)
airflow
13
Python
43
test_kubernetes_pod.py
def test_push_xcom_pod_info(self, mock_extract_xcom, dag_maker, do_xcom_push): mock_extract_xcom.return_value = '{}' with dag_maker(): KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], name="test", task_id="task", in_cluster=False, do_xcom_push=do_xcom_push, ) DummyOperator(task_id='task_to_pull_xcom') dagrun = dag_maker.create_dagrun() tis = {ti.task_id: ti for ti in dagrun.task_instances} pod = self.run_pod( tis["task"].task, context=tis["task"].get_template_context(session=dag_maker.session), ) pod_name = tis["task_to_pull_xcom"].xcom_pull(task_ids="task", key='pod_name') pod_namespace = tis["task_to_pull_xcom"].xcom_pull(task_ids="task", key='pod_namespace') assert pod_name and pod_name == pod.metadata.name assert pod_namespace and pod_namespace == pod.metadata.namespace
0ebd6428e6b484790bfbbe1b8687ef4e6cae10e9
169
https://github.com/apache/airflow.git
295
def test_push_xcom_pod_info(self, mock_extract_xcom, dag_maker, do_xcom_push): mock_extract_xcom.return_value = '{}' with dag_maker(): KubernetesPodOperator( namespace="default", image="ubuntu:16.04", cmds=["bash", "-cx"], name="test", task_id="task", in_cluster=False, do_xcom_push=do_xcom_push, ) DummyOperator(task_id='task_to_pull_xcom') dagrun = dag_maker.create_dagrun(
31
284
test_push_xcom_pod_info
10
0
2
2
label_studio/tasks/serializers.py
177,718
feat: DEV-1844: "Last updated by" column in Data Manager (#2119) * feat: DEV-1696: Add lead time task column in Data Manager * Fix test for test_get_task * Fix lead time annotation * Fix tests * Add migration for view and fix test * Fix test * Fix tests data * Fix annotation count * Fix annotation results in tests * Fix lead_time type to float * Fix test tasks-all-fields-postgre data * Change test data for tasks-all-fields-postgre * Change annotations_results to anystr * Change predictions_results data for tasks-all-fields-postgre * Change data in tasks-all-fields-sqlite test * feat: DEV-1844: "Last updated by" column in Data Manager * More * Add more functions * Add context with user * Remove updated_by from quickview * Fix label stream * Fix tests * Fix tests * Update DM * Fix pagination class * Fix Co-authored-by: Konstantin Korotaev <42615530+KonstantinKorotaev@users.noreply.github.com>
label-studio
10
Python
10
serializers.py
def get_updated_by(self, task): return [{'user_id': task.updated_by_id}] if task.updated_by_id else []
c0c3426467785ffe9a8e3026fff1ef6e4faddca3
24
https://github.com/heartexlabs/label-studio.git
16
def get_updated_by(self, task): return [{'user_id': task.updated_by_id}] if task.updated_by_id else []
4
38
get_updated_by
6
0
1
3
tests/components/mqtt/test_camera.py
313,670
Speed up mqtt tests (#73423) Co-authored-by: jbouwh <jan@jbsoft.nl> Co-authored-by: Jan Bouwhuis <jbouwh@users.noreply.github.com>
core
11
Python
6
test_camera.py
def camera_platform_only(): with patch("homeassistant.components.mqtt.PLATFORMS", [Platform.CAMERA]): yield
51b4d15c8cb83bb715222841aa48e83f77ef38ff
18
https://github.com/home-assistant/core.git
19
def camera_platform_only(): with patch("homeassistant.components.mqtt.PLATFORMS", [Platform.CAMERA]): yield
4
37
camera_platform_only
64
0
5
12
pandas/tests/strings/test_find_replace.py
166,117
WARN: PerformanceWarning for non-pyarrow fallback (#46732)
pandas
12
Python
35
test_find_replace.py
def test_match_na_kwarg(any_string_dtype): # GH #6609 s = Series(["a", "b", np.nan], dtype=any_string_dtype) with maybe_perf_warn(any_string_dtype == "string[pyarrow]" and pa_version_under4p0): result = s.str.match("a", na=False) expected_dtype = np.bool_ if any_string_dtype == "object" else "boolean" expected = Series([True, False, False], dtype=expected_dtype) tm.assert_series_equal(result, expected) with maybe_perf_warn(any_string_dtype == "string[pyarrow]" and pa_version_under4p0): result = s.str.match("a") expected_dtype = "object" if any_string_dtype == "object" else "boolean" expected = Series([True, False, np.nan], dtype=expected_dtype) tm.assert_series_equal(result, expected)
6d165676daef078988c5a292261d7901295e21d9
137
https://github.com/pandas-dev/pandas.git
107
def test_match_na_kwarg(any_string_dtype): # GH #6609 s = Series(["a", "b", np.nan], dtype=any_string_dtype) with maybe_perf_warn(any_string_dtype == "string[pyarrow]" and pa_version_under4p0): result = s.str.match("a", na=False) expected_dtype = np.bool_ if any_string_dtype == "object" else "boolean" expected = Series([True, False, False], dtype=expected_dtype) tm.assert_series_equal(result, expected) with maybe_perf_warn(any_string_dtype == "string[pyarrow]" and pa_version_under4p0): result = s.str.match("a") expected_dtype =
18
230
test_match_na_kwarg
52
0
1
9
awx/main/tests/functional/api/test_instance_group.py
81,022
Disallows disassociate of hubrid type instances from controlplane instance group Introduce new pattern for is_valid_removal Makes disassociate error message a bit more dynamic
awx
12
Python
34
test_instance_group.py
def test_cannot_remove_controlplane_hybrid_instances(post, controlplane_instance_group, node_type_instance, admin_user): instance = node_type_instance(hostname='hybrid_node', node_type='hybrid') controlplane_instance_group.instances.add(instance) url = reverse('api:instance_group_instance_list', kwargs={'pk': controlplane_instance_group.pk}) r = post(url, {'disassociate': True, 'id': instance.id}, admin_user, expect=400) assert 'Cannot disassociate hybrid node' in str(r.data) url = reverse('api:instance_instance_groups_list', kwargs={'pk': instance.pk}) r = post(url, {'disassociate': True, 'id': controlplane_instance_group.id}, admin_user, expect=400) assert f'Cannot disassociate hybrid instance' in str(r.data)
dc64168ed40bdf0d59a715ef82b2a6b46c2ab58e
130
https://github.com/ansible/awx.git
75
def test_cannot_remove_controlplane_hybrid_instances(post, controlplane_instance_group, node_type_instance, admin_user): instance = node_type_instance(hostname='hybrid_node', node_type='hybrid') controlplane_instance_group.instances.add(instance) url = reverse('api:instance_group_instance_list', kwargs={'pk': controlplane_instance_group.pk}) r = post(url, {'disassociate': True, 'id': instance.id}, admin_user, expect=400) assert 'Cannot disassociate hybrid node' in str(r.data) url = reverse('api:instance_instance_groups_list', kwargs={'pk': instance.pk}) r = post(url, {'disassociate': True, 'id': controlplane_instance_group.id}, admin_user, expect=400) assert f'Cannot disassociate hybrid instance' in str(r.data)
19
212
test_cannot_remove_controlplane_hybrid_instances
236
0
1
65
pandas/tests/frame/test_reductions.py
171,091
DEPR: Enforce deprecation of numeric_only=None in DataFrame aggregations (#49551) * WIP * DEPR: Enforce deprecation of numeric_only=None in DataFrame aggregations * Partial reverts * numeric_only in generic/series, fixup * cleanup * Remove docs warning * fixups * Fixups
pandas
16
Python
124
test_reductions.py
def test_operators_timedelta64(self): df = DataFrame( { "A": date_range("2012-1-1", periods=3, freq="D"), "B": date_range("2012-1-2", periods=3, freq="D"), "C": Timestamp("20120101") - timedelta(minutes=5, seconds=5), } ) diffs = DataFrame({"A": df["A"] - df["C"], "B": df["A"] - df["B"]}) # min result = diffs.min() assert result[0] == diffs.loc[0, "A"] assert result[1] == diffs.loc[0, "B"] result = diffs.min(axis=1) assert (result == diffs.loc[0, "B"]).all() # max result = diffs.max() assert result[0] == diffs.loc[2, "A"] assert result[1] == diffs.loc[2, "B"] result = diffs.max(axis=1) assert (result == diffs["A"]).all() # abs result = diffs.abs() result2 = abs(diffs) expected = DataFrame({"A": df["A"] - df["C"], "B": df["B"] - df["A"]}) tm.assert_frame_equal(result, expected) tm.assert_frame_equal(result2, expected) # mixed frame mixed = diffs.copy() mixed["C"] = "foo" mixed["D"] = 1 mixed["E"] = 1.0 mixed["F"] = Timestamp("20130101") # results in an object array result = mixed.min() expected = Series( [ pd.Timedelta(timedelta(seconds=5 * 60 + 5)), pd.Timedelta(timedelta(days=-1)), "foo", 1, 1.0, Timestamp("20130101"), ], index=mixed.columns, ) tm.assert_series_equal(result, expected) # excludes non-numeric result = mixed.min(axis=1, numeric_only=True) expected = Series([1, 1, 1.0], index=[0, 1, 2]) tm.assert_series_equal(result, expected) # works when only those columns are selected result = mixed[["A", "B"]].min(1) expected = Series([timedelta(days=-1)] * 3) tm.assert_series_equal(result, expected) result = mixed[["A", "B"]].min() expected = Series( [timedelta(seconds=5 * 60 + 5), timedelta(days=-1)], index=["A", "B"] ) tm.assert_series_equal(result, expected) # GH 3106 df = DataFrame( { "time": date_range("20130102", periods=5), "time2": date_range("20130105", periods=5), } ) df["off1"] = df["time2"] - df["time"] assert df["off1"].dtype == "timedelta64[ns]" df["off2"] = df["time"] - df["time2"] df._consolidate_inplace() assert df["off1"].dtype == "timedelta64[ns]" assert df["off2"].dtype == "timedelta64[ns]"
b7ea7c6dfd100c40b0bc45aacf6d92c5c22f2e63
605
https://github.com/pandas-dev/pandas.git
859
def test_operators_timedelta64(self): df = DataFrame( { "A": date_range("2012-1-1", periods=3, freq="D"), "B": date_range("2012-1-2", periods=3, freq="D"), "C": Timestamp("20120101") - timedelta(minutes=5, seconds=5), } ) diffs = DataFrame({"A": df["A"] - df["C"], "B": df["A"] - df["B"]}) # min result = diffs.min() assert result[0] == diffs.loc[0, "A"] assert result[1] == diffs.loc[0, "B"] result = diffs.min(axis=1) assert (result == diffs.loc[0, "B"]).all() # max result = diffs.max() assert result[0] == diffs.loc[2, "A"] assert result[1] == diffs.loc[2, "B"] result = diffs.max(axis=1) assert (result == diffs["A"]).all() # abs result = diffs.abs() result2 = abs(diffs) expected = DataFrame({"A": df["A"] - df["C"], "B": df["B"] - df["A"]}) tm.assert_frame_equal(result, expected) tm.assert_frame_equal(result2, expected) # mixed frame mixed = diffs.copy() mixed["C"] = "foo" mixed["D"] = 1 mixed["E"] = 1.0 mixed["F"] = Timestamp("20130101") # results in an object array result = mixed.min() expected = Series( [ pd.Timedelta(timedelta(seconds=5 * 60 + 5)), pd.Timedelta(timedelta(days=-1)), "foo", 1, 1.0, Timestamp("20130101"), ], index=mixed.columns, ) tm.assert_series_equal(result, expected) # excludes non-numeric result = mixed.min(axis=1, numeric_only=True) expected = Series([1, 1, 1.0], index=[0, 1, 2]) tm.assert_series_equal(result, expected) # works when only those columns are selected result = mixed[["A", "B"]].min(1) expected = Series([timedelta(days=-1)] * 3) tm.assert_series_equal(result, expected) result = mixed[["A", "B"]].min() expected = Series( [timedelta(seconds=5 * 60 + 5), timedelta(days=-1)], index=["A", "B"] ) tm.assert_series_equal(result, expected) # GH 3106 df = DataFrame( { "time": date_range("20130102", periods=5), "time2": date_range("20130105", periods=5), } ) df["of
35
1,005
test_operators_timedelta64
42
0
1
11
tests/snuba/api/endpoints/test_organization_events_spans_histogram.py
87,656
fix(perf): Remove suspect spans flag (#40963) Re-pushing up https://github.com/getsentry/sentry/pull/38799 now that test should be (mostly) fixed. That one closed just before I force pushed so it's not re-openable 🤷
sentry
11
Python
34
test_organization_events_spans_histogram.py
def test_bad_params_outside_range_num_buckets(self): query = { "project": [self.project.id], "span": self.format_span("django.middleware", "2b9cbb96dbf59baa"), "numBuckets": -1, } response = self.do_request(query) assert response.status_code == 400, "failing for numBuckets" assert response.data == { "numBuckets": ["Ensure this value is greater than or equal to 1."] }, "failing for numBuckets"
3dea4b7342328fc3ce74685b481f983c7ee6599a
65
https://github.com/getsentry/sentry.git
127
def test_bad_params_outside_range_num_buckets(self): query = { "project": [self.project.id], "span": self.format_span("django.middleware", "2b9cbb96dbf59baa"), "numBuckets": -1, } response = self.do_request(query) assert response.status_code == 400, "failing for numBuckets" assert response.da
10
114
test_bad_params_outside_range_num_buckets
28
0
1
14
tests/openbb_terminal/common/behavioural_analysis/test_finbrain_view.py
283,343
Updating some names (#1575) * quick econ fix * black * keys and feature flags * terminal name :eyes: * some more replacements * some more replacements * edit pyproject * gst -> openbb * add example portfolios back to git * Update api from gst * sorry. skipping some tests * another round of names * another round of test edits * Missed some .gst refs and update timezone * water mark stuff * Fixing Names in terminal.spec and name of GTFF_DEFAULTS to OBBFF_DEFAULTS * fix more GST to OpenBB Terminal * Logging : merge conflicts with main * Revert wrong files Co-authored-by: Andrew <andrew.kenreich@gmail.com> Co-authored-by: DidierRLopes <dro.lopes@campus.fct.unl.pt> Co-authored-by: Chavithra PARANA <chavithra@gmail.com>
OpenBBTerminal
10
Python
21
test_finbrain_view.py
def test_display_sentiment_analysis_empty_df(mocker): view = "openbb_terminal.common.behavioural_analysis.finbrain_view" # MOCK EXPORT_DATA mocker.patch( target="openbb_terminal.common.behavioural_analysis.finbrain_view.export_data" ) # MOCK GTFF mocker.patch.object(target=helper_funcs.obbff, attribute="USE_ION", new=True) # MOCK GET_SENTIMENT mocker.patch( target=f"{view}.finbrain_model.get_sentiment", return_value=pd.DataFrame(), ) finbrain_view.display_sentiment_analysis( ticker="AAPL", export="", )
b71abcfbf4d7e8ac1855522aff0378e13c8b5362
67
https://github.com/OpenBB-finance/OpenBBTerminal.git
95
def test_display_sentiment_analysis_empty_df(mocker): view = "openbb_terminal.common.behavioural_analysis.finbrain_view" # MOCK EXPORT_DATA mocker.patch( target="openbb_terminal.common.behavioural_analysis.finbrain_view.export_data" ) # MOCK GTFF mocker.patch.object(target=helper_funcs.obbff, attribute="USE_ION", new=True) #
17
120
test_display_sentiment_analysis_empty_df
46
0
1
28
src/documents/tests/test_barcodes.py
320,002
Updates how barcodes are detected, using pikepdf images, instead of converting each page to an image
paperless-ngx
9
Python
33
test_barcodes.py
def test_get_mime_type(self): tiff_file = os.path.join( self.SAMPLE_DIR, "simple.tiff", ) pdf_file = os.path.join( self.SAMPLE_DIR, "simple.pdf", ) png_file = os.path.join( self.BARCODE_SAMPLE_DIR, "barcode-128-custom.png", ) tiff_file_no_extension = os.path.join(settings.SCRATCH_DIR, "testfile1") pdf_file_no_extension = os.path.join(settings.SCRATCH_DIR, "testfile2") shutil.copy(tiff_file, tiff_file_no_extension) shutil.copy(pdf_file, pdf_file_no_extension) self.assertEqual(barcodes.get_file_mime_type(tiff_file), "image/tiff") self.assertEqual(barcodes.get_file_mime_type(pdf_file), "application/pdf") self.assertEqual( barcodes.get_file_mime_type(tiff_file_no_extension), "image/tiff", ) self.assertEqual( barcodes.get_file_mime_type(pdf_file_no_extension), "application/pdf", ) self.assertEqual(barcodes.get_file_mime_type(png_file), "image/png")
7aa0e5650b290cbc39e37418508863043f0de008
161
https://github.com/paperless-ngx/paperless-ngx.git
274
def test_get_mime_type(self): tiff_file = os.path.join( self.SAMPLE_DIR, "simple.tiff", ) pdf_file = os.path.join(
19
263
test_get_mime_type
64
0
2
12
seaborn/tests/_core/test_moves.py
41,229
Reorganize how Stat transform works, following Move patterns
seaborn
13
Python
54
test_moves.py
def test_two_semantics(self, df): groupby = GroupBy(["x", "grp2", "grp3"]) res = Dodge()(df, groupby, "x") levels = categorical_order(df["grp2"]), categorical_order(df["grp3"]) w, n = 0.8, len(levels[0]) * len(levels[1]) shifts = np.linspace(0, w - w / n, n) shifts -= shifts.mean() assert_series_equal(res["y"], df["y"]) assert_series_equal(res["width"], df["width"] / n) for (v2, v3), shift in zip(product(*levels), shifts): rows = (df["grp2"] == v2) & (df["grp3"] == v3) assert_series_equal(res.loc[rows, "x"], df.loc[rows, "x"] + shift)
9917c46c544fa1f1a4b76cf174206a0f35305916
181
https://github.com/mwaskom/seaborn.git
148
def test_two_semantics(self, df): groupby = GroupBy(["x", "grp2", "grp3"]) res = Dodge()(df, groupby, "x") levels = categorical_order(df["grp2"])
24
291
test_two_semantics
34
0
2
10
examples/model_compress/pruning/taylorfo_lightning_evaluator.py
113,360
[Compression] Evaluator - step 3 Tutorial (#5016)
nni
11
Python
27
taylorfo_lightning_evaluator.py
def evaluate(self, batch, stage=None): x, y = batch logits = self(x) loss = self.criterion(logits, y) preds = torch.argmax(logits, dim=1) acc = accuracy(preds, y) if stage: self.log(f"default", loss, prog_bar=False) self.log(f"{stage}_loss", loss, prog_bar=True) self.log(f"{stage}_acc", acc, prog_bar=True)
5f571327902c84c208482f66c2b293ad1013ee3d
94
https://github.com/microsoft/nni.git
108
def evaluate(self, batch, stage=None): x, y = batch
17
149
evaluate
24
0
3
7
awx/main/models/schedules.py
81,246
modifying schedules API to return a list of links
awx
13
Python
21
schedules.py
def get_zoneinfo_with_links(self): zone_instance = get_zonefile_instance() return_val = {'zones': sorted(zone_instance.zones), 'links': {}} for zone_name in return_val['zones']: if str(zone_name) != str(zone_instance.zones[zone_name]._filename): return_val['links'][zone_name] = zone_instance.zones[zone_name]._filename return return_val
c836fafb61066d54af6f9726b00a83e6ae8451af
71
https://github.com/ansible/awx.git
77
def get_zoneinfo_with_links(self): zone_instance = get_zonefile_instance() return_val = {'zones': sorted(zone_instance.zones), 'links': {}} for zone_name in return_val['zones']: if str(zone_name) != str(zone_instance.zones[zone_name]._filename): return_val['links'][zone_name] = zone_instance.zones[zo
10
117
get_zoneinfo_with_links
7
0
2
2
mmdet/utils/util_distribution.py
244,221
fix lint (#7793)
mmdetection
9
Python
7
util_distribution.py
def is_mlu_available(): return hasattr(torch, 'is_mlu_available') and torch.is_mlu_available()
24f2fdb38481e6c013a588660c044e410148ce1e
18
https://github.com/open-mmlab/mmdetection.git
13
def is_mlu_available(): return hasattr(torch, 'is_mlu_available') and torch.is_mlu_a
3
34
is_mlu_available
24
0
3
7
pytorch_lightning/trainer/connectors/checkpoint_connector.py
241,510
Remove `hpc_save` (#11101)
lightning
11
Python
19
checkpoint_connector.py
def _hpc_resume_path(self) -> Optional[str]: if not os.path.isdir(self.trainer.weights_save_path): return None dir_path_hpc = str(self.trainer.weights_save_path) max_version = self.__max_ckpt_version_in_folder(dir_path_hpc, "hpc_ckpt_") if max_version is not None: return os.path.join(dir_path_hpc, f"hpc_ckpt_{max_version}.ckpt")
4b5761539e45bd0392aa49378cbaaca574006f03
65
https://github.com/Lightning-AI/lightning.git
73
def _hpc_resume_path(self) -> Optional[str]: if not os.path.isdir(self.trainer.weights_save_path): return None dir_path_hpc = str(self.trainer.weights_save_path)
13
107
_hpc_resume_path
9
0
1
3
mindsdb/integrations/handlers/informix_handler/tests/test_informix_handler.py
116,938
cleaned up whitespace and indentation in test_informix_handler
mindsdb
9
Python
9
test_informix_handler.py
def test_4_get_tables(self): tables = self.handler.get_tables() assert tables.type is RESPONSE_TYPE.TABLE
82ba332ccf612ef32880a25167aba5fd69408889
22
https://github.com/mindsdb/mindsdb.git
23
def test_4_get_tables(self): tables = self
8
36
test_4_get_tables
122
0
3
17
haystack/utils/doc_store.py
257,007
fix launch scripts (#2341)
haystack
13
Python
86
doc_store.py
def launch_opensearch(sleep=15, delete_existing=False): # Start an OpenSearch server via docker logger.debug("Starting OpenSearch...") # This line is needed since it is not possible to start a new docker container with the name opensearch if there is a stopped image with the same now # docker rm only succeeds if the container is stopped, not if it is running if delete_existing: _ = subprocess.run([f"docker rm --force {OPENSEARCH_CONTAINER_NAME}"], shell=True, stdout=subprocess.DEVNULL) status = subprocess.run( [ f'docker start {OPENSEARCH_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p 9201:9200 -p 9600:9600 -e "discovery.type=single-node" --name {OPENSEARCH_CONTAINER_NAME} opensearchproject/opensearch:1.2.4' ], shell=True, ) if status.returncode: logger.warning( "Tried to start OpenSearch through Docker but this failed. " "It is likely that there is already an existing OpenSearch instance running. " ) else: time.sleep(sleep)
d43801143221e71e868c0ac80795bb0a306778e7
77
https://github.com/deepset-ai/haystack.git
230
def launch_opensearch(sleep=15, delete_existing=False): # Start an OpenSearch server via docker logger.debug("Starting OpenSearch...") # This line is needed since it is not possible to start a new docker container with the name opensearch if there is a stopped image with the same now # docker rm only succeeds if the container is stopped, not if it is running if delete_existing: _ = sub
16
142
launch_opensearch
73
0
9
22
wagtail/search/backends/database/postgres/postgres.py
75,504
Reformat with black
wagtail
16
Python
55
postgres.py
def prepare_field(self, obj, field): if isinstance(field, SearchField): yield ( field, get_weight(field.boost), self.prepare_value(field.get_value(obj)), ) elif isinstance(field, AutocompleteField): # AutocompleteField does not define a boost parameter, so use a base weight of 'D' yield (field, "D", self.prepare_value(field.get_value(obj))) elif isinstance(field, RelatedFields): sub_obj = field.get_value(obj) if sub_obj is None: return if isinstance(sub_obj, Manager): sub_objs = sub_obj.all() else: if callable(sub_obj): sub_obj = sub_obj() sub_objs = [sub_obj] for sub_obj in sub_objs: for sub_field in field.fields: yield from self.prepare_field(sub_obj, sub_field)
d10f15e55806c6944827d801cd9c2d53f5da4186
144
https://github.com/wagtail/wagtail.git
350
def prepare_field(self, obj, field): if isinstance(field, SearchField): yield ( field, get_weight(field.boost), self.prepare_value(field.get_value(obj)), ) elif isinstance(field, AutocompleteField): # AutocompleteField does not define a boost parameter, so use a base weight of 'D' yield (field, "D", self.prepare_value(field.get_value(obj))) elif isinstance(field, RelatedFields): sub_obj = field.get_value(obj) if sub_obj is None: return if isinstance(sub_obj, Manager): sub_objs = sub_obj.all() else: if callable(sub_obj): sub_obj = sub_obj() sub_objs = [sub_obj] for sub_obj in sub_objs: for su
19
225
prepare_field
70
0
1
34
tests/jobs/test_scheduler_job.py
47,368
Fix regression in pool metrics (#22939) Co-authored-by: Tanel Kiis <tanel.kiis@reach-u.com> Co-authored-by: Ash Berlin-Taylor <ash_github@firemirror.com>
airflow
13
Python
46
test_scheduler_job.py
def test_emit_pool_starving_tasks_metrics(self, mock_stats_gauge, dag_maker): self.scheduler_job = SchedulerJob(subdir=os.devnull) session = settings.Session() dag_id = 'SchedulerJobTest.test_emit_pool_starving_tasks_metrics' with dag_maker(dag_id=dag_id): op = DummyOperator(task_id='op', pool_slots=2) dr = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED) ti = dr.get_task_instance(op.task_id, session) ti.state = State.SCHEDULED set_default_pool_slots(1) session.flush() res = self.scheduler_job._executable_task_instances_to_queued(max_tis=32, session=session) assert 0 == len(res) mock_stats_gauge.assert_has_calls( [ mock.call('scheduler.tasks.starving', 1), mock.call(f'pool.starving_tasks.{Pool.DEFAULT_POOL_NAME}', 1), ], any_order=True, ) mock_stats_gauge.reset_mock() set_default_pool_slots(2) session.flush() res = self.scheduler_job._executable_task_instances_to_queued(max_tis=32, session=session) assert 1 == len(res) mock_stats_gauge.assert_has_calls( [ mock.call('scheduler.tasks.starving', 0), mock.call(f'pool.starving_tasks.{Pool.DEFAULT_POOL_NAME}', 0), ], any_order=True, ) session.rollback() session.close()
0367a92881e88df36dabb81ef837e5256f3db89d
223
https://github.com/apache/airflow.git
360
def test_emit_pool_starving_tasks_metrics(self, mock_stats_gauge, dag_maker): self.scheduler_job = SchedulerJob(subdir=os.devnull) session = settings.Session() dag_id = 'SchedulerJobTest.test_emit_pool_starving_tasks_metrics' with dag_maker(dag_id=dag_id): op = DummyOperator(task_id='op', pool_slots=2) dr = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED) ti = dr.get_task_instance(op.task_id, session) ti.state = State.SCHEDULED set_default_pool_slots(1) session.flush() res = self.scheduler_job._executable_task_instances_to_queued(max_tis=32, session=session) assert 0 == len(res) mock_stats_gauge.assert_has_calls( [ mock.call('scheduler.tasks.starving', 1), mock.call(f'pool.starving_tasks.{Pool.DEFAULT_POOL_NAME}', 1), ], any_order=True, ) mock_stats_gauge.reset_mock() set_default_pool_slots(2) session.flush() res = self.scheduler_job._executable_task_instances_to_queued(max_tis=32, session=session) assert 1 == len(res) mock_stats_gauge.assert_has_calls( [ mock.call('scheduler.tasks.starving', 0), mock.call(f'pool.starving_tasks.{Pool.DEFAULT_POO
41
369
test_emit_pool_starving_tasks_metrics
16
0
1
4
easyocr/DBNet/assets/ops/dcn/modules/deform_conv.py
123,134
add dbnet
EasyOCR
8
Python
15
deform_conv.py
def forward(self, x, offset, mask): return modulated_deform_conv(x, offset, mask, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, self.deformable_groups)
803b90729d25fda253011c505d0189e8e63cc039
48
https://github.com/JaidedAI/EasyOCR.git
94
def forward(self, x, offset, mask): return modulated_deform_conv(x, offset, mask, self.weight, self.bias, self.stride, self.padding, self.dilation,
13
62
forward
10
0
2
55
tests/components/bluetooth/test_init.py
317,001
Add support for subscribing to bluetooth callbacks by address (#74773)
core
7
Python
8
test_init.py
async def test_register_callback_by_address(hass, mock_bleak_scanner_start): mock_bt = [] callbacks = []
c2fefe03b2dc800f42de695f0b73a8f26621d882
318
https://github.com/home-assistant/core.git
19
async def test_register_callback_by_address(hass, mock_bleak_scanner_start):
5
30
test_register_callback_by_address
33
0
5
46
website/homepage/render_html.py
179,196
added emojis to navbar; added guides main page
gradio
14
Python
27
render_html.py
def render_docs(): if os.path.exists("generated/colab_links.json"): with open("generated/colab_links.json") as demo_links_file: try: demo_links = json.load(demo_links_file) except ValueError: demo_links = {} else: # docs will be missing demo links demo_links = {} SCREENSHOT_FOLDER = "dist/assets/demo_screenshots" os.makedirs(SCREENSHOT_FOLDER, exist_ok=True)
b065879054492fbfdfce9d767f13e02019e7764b
300
https://github.com/gradio-app/gradio.git
111
def render_docs(): if os.path.exists("ge
13
107
render_docs
12
0
1
3
mindsdb/integrations/handlers/mysql_handler/tests/test_mysql_handler.py
115,670
Test added
mindsdb
9
Python
12
test_mysql_handler.py
def test_1_native_query_show_dbs(self): dbs = self.handler.native_query("SHOW DATABASES;") assert dbs[' '] is not RESPONSE_TYPE.ERROR
414b259284343c26fba31b29121c6462b3666fb9
25
https://github.com/mindsdb/mindsdb.git
25
def test_1_native_query_show_dbs(self): dbs = self.handler.native_query("SHOW DATABASE
7
43
test_1_native_query_show_dbs
598
0
54
108
nuitka/build/SconsCompilerSettings.py
178,702
macOS: Minor cleanups
Nuitka
19
Python
323
SconsCompilerSettings.py
def setupCCompiler(env, lto_mode, pgo_mode, job_count): # This is driven by many branches on purpose and has a lot of things # to deal with for LTO checks and flags, pylint: disable=too-many-branches,too-many-statements # Enable LTO for compiler. _enableLtoSettings( env=env, lto_mode=lto_mode, pgo_mode=pgo_mode, job_count=job_count, ) _detectWindowsSDK(env) _enableC11Settings(env) if env.gcc_mode: # Support for gcc and clang, restricting visibility as much as possible. env.Append(CCFLAGS=["-fvisibility=hidden"]) if not env.c11_mode: env.Append(CXXFLAGS=["-fvisibility-inlines-hidden"]) if isWin32Windows(): # On Windows, exporting to DLL need to be controlled. env.Append(LINKFLAGS=["-Wl,--exclude-all-symbols"]) # Make sure we handle import library on our own and put it into the # build directory. env.Append( LINKFLAGS=[ "-Wl,--out-implib,%s" % os.path.join(env.source_dir, "import.lib") ] ) # Make it clear how to handle integer overflows, namely by wrapping around # to negative values. env.Append(CCFLAGS=["-fwrapv"]) if not env.low_memory: # Avoid IO for compilation as much as possible, this should make the # compilation more memory hungry, but also faster. env.Append(CCFLAGS="-pipe") # Support for clang. if "clang" in env.the_cc_name: env.Append(CCFLAGS=["-w"]) env.Append(CPPDEFINES=["_XOPEN_SOURCE"]) # Don't export anything by default, this should create smaller executables. env.Append(CCFLAGS=["-fvisibility=hidden", "-fvisibility-inlines-hidden"]) if env.debug_mode: env.Append(CCFLAGS=["-Wunused-but-set-variable"]) # Support for macOS standalone backporting. if isMacOS(): setEnvironmentVariable(env, "MACOSX_DEPLOYMENT_TARGET", env.macos_min_version) target_flag = "--target=%s-apple-macos%s" % ( env.macos_target_arch, env.macos_min_version, ) env.Append(CCFLAGS=[target_flag]) env.Append(LINKFLAGS=[target_flag]) # The 32 bits MinGW does not default for API level properly, so help it. if env.mingw_mode: # Windows XP env.Append(CPPDEFINES=["_WIN32_WINNT=0x0501"]) # Unicode entry points for programs. if env.mingw_mode: env.Append(LINKFLAGS=["-municode"]) # Detect the gcc version if env.gcc_version is None and env.gcc_mode and not env.clang_mode: env.gcc_version = myDetectVersion(env, env.the_compiler) # Older g++ complains about aliasing with Py_True and Py_False, but we don't # care. if env.gcc_mode and not env.clang_mode and env.gcc_version < (4, 5): env.Append(CCFLAGS=["-fno-strict-aliasing"]) # For gcc 4.6 or higher, there are some new interesting functions. if env.gcc_mode and not env.clang_mode and env.gcc_version >= (4, 6): env.Append(CCFLAGS=["-fpartial-inlining"]) if env.debug_mode: env.Append(CCFLAGS=["-Wunused-but-set-variable"]) # Save some memory for gcc by not tracing macro code locations at all. if ( not env.debug_mode and env.gcc_mode and not env.clang_mode and env.gcc_version >= (5,) ): env.Append(CCFLAGS=["-ftrack-macro-expansion=0"]) # We don't care about deprecations. if env.gcc_mode and not env.clang_mode: env.Append(CCFLAGS=["-Wno-deprecated-declarations"]) # The var-tracking does not scale, disable it. Should we really need it, we # can enable it. TODO: Does this cause a performance loss? if env.gcc_mode and not env.clang_mode: env.Append(CCFLAGS=["-fno-var-tracking"]) # For large files, these can issue warnings about disabling # itself, while we do not need it really. if env.gcc_mode and not env.clang_mode and env.gcc_version >= (6,): env.Append(CCFLAGS=["-Wno-misleading-indentation"]) # Disable output of notes, e.g. on struct alignment layout changes for # some arches, we don't care. if env.gcc_mode and not env.clang_mode: env.Append(CCFLAGS=["-fcompare-debug-second"]) # Prevent using LTO when told not to use it, causes errors with some # static link libraries. if ( env.gcc_mode and not env.clang_mode and env.static_libpython and not env.lto_mode ): env.Append(CCFLAGS=["-fno-lto"]) env.Append(LINKFLAGS=["-fno-lto"]) # Set optimization level for gcc and clang in LTO mode if env.gcc_mode and env.lto_mode: if env.debug_mode: env.Append(LINKFLAGS=["-Og"]) else: # For LTO with static libpython combined, there are crashes with Python core # being inlined, so we must refrain from that. On Windows there is no such # thing, and Nuitka-Python is not affected. env.Append( LINKFLAGS=[ "-O3" if env.nuitka_python or os.name == "nt" or not env.static_libpython else "-O2" ] ) # When debugging, optimize less than when optimizing, when not remove # assertions. if env.debug_mode: if env.clang_mode or (env.gcc_mode and env.gcc_version >= (4, 8)): env.Append(CCFLAGS=["-Og"]) elif env.gcc_mode: env.Append(CCFLAGS=["-O1"]) elif env.msvc_mode: env.Append(CCFLAGS=["-O2"]) else: if env.gcc_mode: env.Append( CCFLAGS=[ "-O3" if env.nuitka_python or os.name == "nt" or not env.static_libpython else "-O2" ] ) elif env.msvc_mode: env.Append( CCFLAGS=[ "/Ox", # Enable most speed optimization "/GF", # Eliminate duplicate strings. "/Gy", # Function level object storage, to allow removing unused ones ] ) env.Append(CPPDEFINES=["__NUITKA_NO_ASSERT__"])
7f9a8a2b207dfdf46e1264d6d9b61466b80875d0
716
https://github.com/Nuitka/Nuitka.git
1,712
def setupCCompiler(env, lto_mode, pgo_mode, job_count): # This is driven by many branches on purpose and has a lot of things # to deal with for LTO checks and flags, pylint: disable=too-many-branches,too-many-statements # Enable LTO for compiler. _enableLtoSettings( env=env, lto_mode=lto_mode, pgo_mode=pgo_mode, job_count=job_count, ) _detectWindowsSDK(env) _enableC11Settings(env) if env.gcc_mode: # Support for gcc and clang, restricting visibility as much as possible. env.Append(CCFLAGS=["-fvisibility=hidden"]) if not env.c11_mode: env.Append(CXXFLAGS=["-fvisibility-inlines-hidden"]) if isWin32Windows(): # On Windows, exporting to DLL need to be controlled. env.Append(LINKFLAGS=["-Wl,--exclude-all-symbols"]) # Make sure we handle import library on our own and put it into the # build directory. env.Append( LINKFLAGS=[ "-Wl,--out-implib,%s" % os.path.join(env.source_dir, "import.lib") ] ) # Make it clear how to handle integer overflows, namely by wrapping around # to negative values. env.Append(CCFLAGS=["-fwrapv"]) if not env.low_memory: # Avoid IO for compilation as much as possible, this should make the # compilation more memory hungry, but also faster. env.Append(CCFLAGS="-pipe") # Support for clang. if "clang" in env.the_cc_name: env.Append(CCFLAGS=["-w"]) env.Append(CPPDEFINES=["_XOPEN_SOURCE"]) # Don't export anything by default, this should create smaller executables. env.Append(CCFLAGS=["-fvisibility=hidden", "-fvisibility-inlines-hidden"]) if env.debug_mode: env.Append(CCFLAGS=["-Wunused-but-set-variable"]) # Support for macOS standalone backporting. if isMacOS(): setEnvironmentVariable(env, "MACOSX_DEPLOYMENT_TARGET", env.macos_min_version) target_flag = "--target=%s-apple-macos%s" % ( env.macos_target_arch, env.macos_min_version, ) env.Append(CCFLAGS=[target_flag]) env.Append(LINKFLAGS=[target_flag]) # The 32 bits MinGW does not default for API level properly, so help it. if env.mingw_mode: # Windows XP env.Append(CPPDEFINES=["_WIN32_WINNT=0x0501"]) # Unicode entry points for programs. if env.mingw_mode: env.Append(LINKFLAGS=["-municode"]) # Detect the gcc version if env.gcc_version is None and env.
37
1,239
setupCCompiler
13
0
1
9
tests/blocks/test_core.py
54,260
Require version
prefect
11
Python
13
test_core.py
async def test_registering_and_getting_blocks(): with pytest.raises(ValueError, match="(No block spec exists)"): get_block_spec("is anyone home", "1.0")
58b51caba356ad021a3b7b76f61d28a40884ba11
66
https://github.com/PrefectHQ/prefect.git
22
async def test_registering_and_getting_blocks(): with pytest.raises(ValueError, match="(No block spec exists)"): get_block_spec("is anyo
6
44
test_registering_and_getting_blocks
12
0
1
7
scripts/tools/initialize_virtualenv.py
47,799
add script to initialise virtualenv (#22971) Co-authored-by: Jarek Potiuk <jarek@potiuk.com>
airflow
8
Python
11
initialize_virtualenv.py
def get_python_version() -> str: major = sys.version_info[0] minor = sys.version_info[1] return f"{major}.{minor}"
03bef084b3f1611e1becdd6ad0ff4c0d2dd909ac
26
https://github.com/apache/airflow.git
24
def get_python_version() -> str: major = sys
6
52
get_python_version
6
0
1
4
wagtail/admin/viewsets/chooser.py
77,710
Add ChooserViewSet
wagtail
9
Python
6
chooser.py
def chosen_view(self): return self.chosen_view_class.as_view( model=self.model, )
4b3c57d72ced0f64378cef26fa12a77bce966ac1
19
https://github.com/wagtail/wagtail.git
30
def chosen_view(self): return self.chosen_view_class.as_view(
5
30
chosen_view
64
0
1
35
src/prefect/orion/database/migrations/versions/postgresql/5f376def75c3_.py
53,586
Fix syntax error in autogenerated migration file
prefect
14
Python
42
5f376def75c3_.py
def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table( "block_data", sa.Column( "id", prefect.orion.utilities.database.UUID(), server_default=sa.text("(GEN_RANDOM_UUID())"), nullable=False, ), sa.Column( "created", prefect.orion.utilities.database.Timestamp(timezone=True), server_default=sa.text("CURRENT_TIMESTAMP"), nullable=False, ), sa.Column( "updated", prefect.orion.utilities.database.Timestamp(timezone=True), server_default=sa.text("CURRENT_TIMESTAMP"), nullable=False, ), sa.Column("name", sa.String(), nullable=False), sa.Column("blockref", sa.String(), nullable=False), sa.Column( "data", prefect.orion.utilities.database.JSON(astext_type=sa.Text()), server_default="{}", nullable=False, ), sa.PrimaryKeyConstraint("id", name=op.f("pk_block_data")), ) op.create_index(op.f("ix_block_data__name"), "block_data", ["name"], unique=True) op.create_index( op.f("ix_block_data__updated"), "block_data", ["updated"], unique=False ) # ### end Alembic commands ###
e41e3a0b19d7fdada1c7feff4dffe9841b39269e
243
https://github.com/PrefectHQ/prefect.git
351
def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table( "block_data", sa.Column( "id", prefect.orion.utilities.database.UUID(), server_default=sa.text("(GEN_RANDOM_UUID())"), nullable=False, ), sa.Column( "created", prefect.orion.utilities.database.Timestamp(timezone=True), server_default=sa.text("CURRENT_TIMESTAMP"), nullable=False, ), sa.Column( "updated", prefect.orion.utilities.database.Timestamp(timezone=True),
24
389
upgrade
52
1
2
13
test/mitmproxy/test_eventsequence.py
252,350
Add support for raw UDP. (#5414)
mitmproxy
12
Python
31
test_eventsequence.py
def test_udp_flow(err): f = tflow.tudpflow(err=err) i = eventsequence.iterate(f) assert isinstance(next(i), layers.udp.UdpStartHook) assert len(f.messages) == 0 assert isinstance(next(i), layers.udp.UdpMessageHook) assert len(f.messages) == 1 assert isinstance(next(i), layers.udp.UdpMessageHook) assert len(f.messages) == 2 if err: assert isinstance(next(i), layers.udp.UdpErrorHook) else: assert isinstance(next(i), layers.udp.UdpEndHook) @pytest.mark.parametrize( "resp, err", [ (False, False), (True, False), (False, True), (True, True), ], )
cd4a74fae7cbd8119afc3900597f798ec1604db7
@pytest.mark.parametrize( "resp, err", [ (False, False), (True, False), (False, True), (True, True), ], )
125
https://github.com/mitmproxy/mitmproxy.git
130
def test_udp_flow(err): f = tflow.tudpflow(err=err) i = eventsequence.iterate(f) assert is
21
244
test_udp_flow
15
0
1
5
keras/legacy_tf_layers/variable_scope_shim_test.py
274,478
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
9
Python
11
variable_scope_shim_test.py
def testGetVar(self): vs = variable_scope._get_default_variable_store() v = vs.get_variable("v", [1]) v1 = vs.get_variable("v", [1]) self.assertIs(v, v1)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
44
https://github.com/keras-team/keras.git
42
def testGetVar(self): vs = variable_scope._get_default_variable_store() v = vs.get_variable("v", [1]) v1 = vs.get_varia
9
73
testGetVar
87
0
2
20
tools/ci_code_validator/tests/test_tools.py
3,855
🎉 Single py checker (#10246)
airbyte
18
Python
67
test_tools.py
def test_tool(tmp_path, toml_config_file, cmd, package_dir, expected_file): cmd = cmd.format(package_dir=package_dir, toml_config_file=toml_config_file) proc = subprocess.Popen(cmd.split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, _ = proc.communicate() file_log = tmp_path / "temp.log" file_log.write_bytes(out) assert file_log.is_file() is True issues_file = tmp_path / "issues.json" with requests_mock.Mocker() as m: m.get('/api/authentication/validate', json={"valid": True}) m.get("/api/rules/search", json={"rules": []}) m.post("/api/rules/create", json={}) parser = LogParser(issues_file, host="http://fake.com/", token="fake_token") assert getattr(parser, f'from_{cmd.split(" ")[0]}')(file_log) == 0 assert issues_file.is_file() is True data = json.loads(issues_file.read_text()) for issue in data["issues"]: issue["primaryLocation"]["filePath"] = "/".join(issue["primaryLocation"]["filePath"].split("/")[-2:]) expected_data = json.loads(Path(expected_file).read_text()) assert json.dumps(data, sort_keys=True, separators=(',', ': ')) == json.dumps(expected_data, sort_keys=True, separators=(',', ': '))
61b0e9e196ea07795d47effc670bcb981117c030
272
https://github.com/airbytehq/airbyte.git
191
def test_tool(tmp_path, toml_config_file, cmd, package_dir, expected_file): cmd = cmd.format(package_dir=package_dir, toml_config_file=toml_config_file) proc = subprocess.Popen(cmd.split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, _ = proc.communicate() file_log = tmp_path / "temp.log" file_log.write_bytes(out) assert file_log.is_file() is True issues_file = tmp_path / "issues.json" with requests_mock.Mocker() as m: m.get('/api/authentication/validate', json={"valid": True}) m.get("/api/rules/search", json={"rules": []}) m.post("/api/rules/create", json={}) parser = LogParser(issues_file, host="http://fake.com/", token="fake_token") assert getattr(parser, f'from_{cmd.split(" ")[0]}')(file_log) == 0 assert issues_file.is_file() is True data = json.loads(issues_file.read_text()) for issue in data["issues"]: issue["primaryLocation"]["filePath"] = "/".join(issue["primaryLocation"]["filePath"].split("/")[-2:]) expected_data = json.loads(Path(expected_file).read_text()) assert json.dumps(data, sort_keys=True, separators=(',', ': ')) == json.dumps(expected_data, sort_keys=True, separators=(',', ': '))
42
474
test_tool
8
0
1
3
python/ray/util/ml_utils/tests/test_mlflow.py
133,181
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
9
Python
8
test_mlflow.py
def test_experiment_id(self): self.mlflow_util.setup_mlflow(tracking_uri=self.tracking_uri, experiment_id="0") assert self.mlflow_util.experiment_id == "0"
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
29
https://github.com/ray-project/ray.git
21
def test_experiment_id(self): self.mlflow_util.setup_mlflow(tracking_uri=self.tracki
6
49
test_experiment_id
52
0
1
26
tests/bulk_create/tests.py
201,941
Refs #33476 -- Reformatted code with Black.
django
12
Python
35
tests.py
def _test_update_conflicts_two_fields(self, unique_fields): TwoFields.objects.bulk_create( [ TwoFields(f1=1, f2=1, name="a"), TwoFields(f1=2, f2=2, name="b"), ] ) self.assertEqual(TwoFields.objects.count(), 2) conflicting_objects = [ TwoFields(f1=1, f2=1, name="c"), TwoFields(f1=2, f2=2, name="d"), ] TwoFields.objects.bulk_create( conflicting_objects, update_conflicts=True, unique_fields=unique_fields, update_fields=["name"], ) self.assertEqual(TwoFields.objects.count(), 2) self.assertCountEqual( TwoFields.objects.values("f1", "f2", "name"), [ {"f1": 1, "f2": 1, "name": "c"}, {"f1": 2, "f2": 2, "name": "d"}, ], )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
180
https://github.com/django/django.git
302
def _test_update_conflicts_two_fields(self, unique_fields): TwoFields.objects.bulk_create( [ TwoFields(f1=1, f2=1, name="a"), TwoFields(f1=2, f2=2, name="b"), ] ) self.assertEqual(TwoFields.objects.count(), 2) conflicting_objects = [ TwoFields(f1=1, f2=1, name="c"), TwoFields(f1=2, f2=2, name="d"), ] TwoFields.objects.bulk_create( conflicting_objects, update_conflicts=True, unique_fields=unique_fields, update_fields=["name"], ) self.assertEqual(TwoFields.objects.count(), 2) self.assertCountEqual( TwoFields.objects.values("f1", "f2", "name"),
16
287
_test_update_conflicts_two_fields
10
0
1
5
tests/components/skybell/__init__.py
303,258
Add config flow to skybell (#70887)
core
11
Python
10
__init__.py
def _patch_skybell() -> None: return patch( "homeassistant.components.skybell.config_flow.Skybell.async_send_request", return_value={"id": USER_ID}, )
a502a8798ff74eb6185473df7f69553fc4663634
20
https://github.com/home-assistant/core.git
29
def _patch_skybell() -> None: return patch( "homeassistant.components.skybell.config_flow.Skybell.async_send_request", return_value={"id": USER_ID}, )
4
35
_patch_skybell
156
0
5
40
python/ccxt/async_support/gateio.py
18,323
1.72.78 [ci skip]
ccxt
12
Python
102
gateio.py
def parse_transaction(self, transaction, currency=None): # # deposits # # { # "id": "d33361395", # "currency": "USDT_TRX", # "address": "TErdnxenuLtXfnMafLbfappYdHtnXQ5U4z", # "amount": "100", # "txid": "ae9374de34e558562fe18cbb1bf9ab4d9eb8aa7669d65541c9fa2a532c1474a0", # "timestamp": "1626345819", # "status": "DONE", # "memo": "" # } # # withdrawals id = self.safe_string(transaction, 'id') type = None amount = self.safe_string(transaction, 'amount') if id[0] == 'b': # GateCode handling type = 'deposit' if Precise.string_gt(amount, '0') else 'withdrawal' amount = Precise.string_abs(amount) elif id is not None: type = self.parse_transaction_type(id[0]) currencyId = self.safe_string(transaction, 'currency') code = self.safe_currency_code(currencyId) txid = self.safe_string(transaction, 'txid') rawStatus = self.safe_string(transaction, 'status') status = self.parse_transaction_status(rawStatus) address = self.safe_string(transaction, 'address') fee = self.safe_number(transaction, 'fee') tag = self.safe_string(transaction, 'memo') if tag == '': tag = None timestamp = self.safe_timestamp(transaction, 'timestamp') return { 'info': transaction, 'id': id, 'txid': txid, 'currency': code, 'amount': self.parse_number(amount), 'network': None, 'address': address, 'addressTo': None, 'addressFrom': None, 'tag': tag, 'tagTo': None, 'tagFrom': None, 'status': status, 'type': type, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'updated': None, 'fee': fee, }
6a6664b154a2f3a123e4a750457e1ec39fd74e22
260
https://github.com/ccxt/ccxt.git
688
def parse_transaction(self, transaction, currency=None): # # deposits # # { # "id": "d33361395", # "currency": "USDT_TRX", # "address": "TErdnxenuLtXfnMafLbfappYdHtnXQ5U4z", # "amount": "100", # "txid": "ae9374de34e558562fe18cbb1bf9ab4d9eb8aa7669d65541c9fa2a532c1474a0", # "timestamp": "1626345819", # "status": "DONE", # "memo": "" # } # # withdrawals id = self.safe_string(transaction, 'id') type = None amount = self.safe_string(transaction, 'amount') if id[0] == 'b': # GateCode handling type = 'deposit' if Precise.string_gt(amount, '0') else 'withdrawal' amount = Precise.string_abs(amount) elif id is not None: type = self.parse_transaction_type(id[0]) currencyId = self.safe_string(transaction, 'currency') code = self.safe_currency_code(currencyId) txid = self.safe_string(transaction, 'txid') rawSta
27
457
parse_transaction
72
0
7
22
erpnext/accounts/doctype/invoice_discounting/test_invoice_discounting.py
64,869
style: format code with black
erpnext
12
Python
49
test_invoice_discounting.py
def create_invoice_discounting(invoices, **args): args = frappe._dict(args) inv_disc = frappe.new_doc("Invoice Discounting") inv_disc.posting_date = args.posting_date or nowdate() inv_disc.company = args.company or "_Test Company" inv_disc.bank_account = args.bank_account inv_disc.short_term_loan = args.short_term_loan inv_disc.accounts_receivable_credit = args.accounts_receivable_credit inv_disc.accounts_receivable_discounted = args.accounts_receivable_discounted inv_disc.accounts_receivable_unpaid = args.accounts_receivable_unpaid inv_disc.short_term_loan = args.short_term_loan inv_disc.bank_charges_account = args.bank_charges_account inv_disc.bank_account = args.bank_account inv_disc.loan_start_date = args.start or nowdate() inv_disc.loan_period = args.period or 30 inv_disc.bank_charges = flt(args.bank_charges) for d in invoices: inv_disc.append("invoices", {"sales_invoice": d}) inv_disc.insert() if not args.do_not_submit: inv_disc.submit() return inv_disc
494bd9ef78313436f0424b918f200dab8fc7c20b
165
https://github.com/frappe/erpnext.git
50
def create_invoice_discounting(invoices, **args): args = frappe._dict(args) inv_disc = frappe.new_doc("Invoice Discounting") inv_disc.posting_date = args.posting_date or nowdate() inv_disc.company = args.company or "_Test Company" inv_disc.bank_account = args.bank_account inv_disc.short_term_loan = args.short_term_loan inv_disc.accounts_receivable_credit = args.accounts_receivable_credit inv_disc.accounts_receivable_discounted = args.accounts_receivable_discounted inv_disc.accounts_receivable_unpaid = args.accounts_receivable_unpaid inv_disc.short_term_loan = args.short_term_loan inv_disc.bank_charges_account = args.bank_charges_account inv_disc.bank_account = args.bank_account inv_disc.loan_start_date = args.start or
27
271
create_invoice_discounting
138
0
13
89
jina/orchestrate/flow/base.py
12,415
fix: success box ui
jina
17
Python
78
base.py
def _get_address_table(self, address_table): _protocol = str(self.protocol) if self.gateway_args.ssl_certfile and self.gateway_args.ssl_keyfile: _protocol = f'{self.protocol}S' address_table.add_row( ':link:', 'Protocol', f':closed_lock_with_key: {_protocol}' ) else: address_table.add_row(':link:', 'Protocol', _protocol) _protocol = _protocol.lower() address_table.add_row( ':house:', 'Local', f'[link={_protocol}://{self.host}:{self.port}]{self.host}:{self.port}[/]', ) address_table.add_row( ':lock:', 'Private', f'[link={_protocol}://{self.address_private}:{self.port}]{self.address_private}:{self.port}[/]', ) if self.address_public: address_table.add_row( ':earth_africa:', 'Public', f'[link={_protocol}://{self.address_public}:{self.port}]{self.address_public}:{self.port}[/]', ) if self.protocol == GatewayProtocolType.HTTP: _address = [ f'[link={_protocol}://localhost:{self.port}/docs]Local[/]', f'[link={_protocol}://{self.address_private}:{self.port}/docs]Private[/]', ] if self.address_public: _address.append( f'[link={_protocol}://{self.address_public}:{self.port}/docs]Public[/]' ) address_table.add_row( ':speech_balloon:', 'Swagger UI [dim](/docs)[/]', '·'.join(_address), ) _address = [ f'[link={_protocol}://localhost:{self.port}/redoc]Local[/]', f'[link={_protocol}://{self.address_private}:{self.port}/redoc]Private[/]', ] if self.address_public: _address.append( f'[link={_protocol}://{self.address_public}:{self.port}/redoc]Public[/]' ) address_table.add_row( ':books:', 'Redoc [dim](/redoc)[/]', '·'.join(_address), ) if self.gateway_args.expose_graphql_endpoint: _address = [ f'[link={_protocol}://localhost:{self.port}/graphql]Local[/]', f'[link={_protocol}://{self.address_private}:{self.port}/graphql]Private[/]', ] if self.address_public: _address.append( f'[link={_protocol}://{self.address_public}:{self.port}/graphql]Public[/]' ) address_table.add_row( ':strawberry:', 'GraphQL UI [dim](/graphql)[/]', '·'.join(_address), ) if self.monitoring: for name, deployment in self: _address = [ f'[link=http://localhost:{deployment.args.port_monitoring}]Local[/]', f'[link=http://{self.address_private}:{deployment.args.port_monitoring}]Private[/]', ] if self.address_public: _address.append( f'[link=http://{self.address_public}:{deployment.args.port_monitoring}]Public[/]' ) if deployment.args.monitoring: address_table.add_row( ':bar_chart:', f'Monitor [b]{name}:{deployment.args.port_monitoring}[/]', '·'.join(_address), ) return self[GATEWAY_NAME].args.port_monitoring else: return self._common_kwargs.get( 'port_monitoring', __default_port_monitoring__ ) return address_table
674e8121fb5dfdac4ce88a8ade1d248d16b75617
315
https://github.com/jina-ai/jina.git
1,345
def _get_address_table(self, address_table): _protocol = str(self.protocol) if self.gateway_args.ssl_certfile and self.gateway_args.ssl_keyfile: _protocol = f'{self.protocol}S' address_table.add_row( ':link:', 'Protocol', f':closed_lock_with_key: {_protocol}' ) else: address_table.add_row(':link:', 'Protocol', _protocol) _protocol = _protocol.lower() address_table.add_row( ':house:', 'Local', f'[link={_protocol}://{self.host}:{self.port}]{self.host}:{self.port}[/]', ) address_table.add_row( ':lock:', 'Private', f'[link={_protocol}://{self.address_private}:{self.port}]{self.address_private}:{self.port}[/]', ) if self.address_public: address_table.add_row( ':earth_africa:', 'Public', f'[link={_protocol}://{self.address_public}:{self.port}]{self.address_public}:{self.port}[/]', ) if self.protocol == GatewayProtocolType.HTTP: _address = [ f'[link={_protocol}://localhost:{self.port}/docs]Local[/]', f'[link={_protocol}://{self.address_private}:{self.port}/docs]Private[/]', ] if self.address_public: _address.append( f'[link={_protocol}://{self.address_public}:{self.port}/docs]Public[/]' ) address_table.add_row( ':speech_balloon:', 'Swagger UI [dim](/docs)[/]', '·'.join(_address), ) _address = [ f'[link={_protocol}://localhost:{self.port}/redoc]Local[/]', f'[link={_protocol}://{self.address_private}:{self.port}/redoc]Private[/]', ] if self.address_public: _address.append( f'[link={_protocol}
30
830
_get_address_table
16
0
1
4
plugins/dbms/extremedb/enumeration.py
123,582
Fixing DeprecationWarning (logger.warn)
sqlmap
7
Python
16
enumeration.py
def searchColumn(self): warnMsg = "on eXtremeDB it is not possible to search columns" logger.warning(warnMsg) return []
df4293473d2fb6e887e31522cab5aff95e201581
17
https://github.com/sqlmapproject/sqlmap.git
36
def searchColumn(self): warnMsg = "on eXtremeDB it is not possible to search columns" logger.warning(warnMsg
5
31
searchColumn
31
0
1
6
wagtail/images/tests/test_admin_views.py
75,114
Reformat with black
wagtail
10
Python
27
test_admin_views.py
def test_simple_with_collection_nesting(self): root_collection = Collection.get_first_root_node() evil_plans = root_collection.add_child(name="Evil plans") evil_plans.add_child(name="Eviler plans") response = self.get() # "Eviler Plans" should be prefixed with &#x21b3 (↳) and 4 non-breaking spaces. self.assertContains(response, "&nbsp;&nbsp;&nbsp;&nbsp;&#x21b3 Eviler plans")
d10f15e55806c6944827d801cd9c2d53f5da4186
45
https://github.com/wagtail/wagtail.git
72
def test_simple_with_collection_nesting(self): root_collection = Collection.get_first_root_node() evil_plans = root_collection.add_child(name="Evil plans") evil_plans.add_child(name="Eviler plans") response = self.get() # "Eviler Plans" should be prefixed with &#x21b3 (↳) and 4 non-breaking spaces.
11
81
test_simple_with_collection_nesting
40
0
5
14
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/plugin_utils/connection_base.py
268,503
Add `use_rsa_sha2_algorithms` option for paramiko (#78789) Fixes #76737 Fixes #77673 Co-authored-by: Matt Clay <matt@mystile.com>
ansible
15
Python
33
connection_base.py
def __getattr__(self, name): try: return self.__dict__[name] except KeyError: if not name.startswith("_"): plugin = self._sub_plugin.get("obj") if plugin: method = getattr(plugin, name, None) if method is not None: return method raise AttributeError( "'%s' object has no attribute '%s'" % (self.__class__.__name__, name) )
76b746655a36807fa9198064ca9fe7c6cc00083a
74
https://github.com/ansible/ansible.git
218
def __getattr__(self, name): try:
14
120
__getattr__
980
0
45
247
rllib/agents/sac/tests/test_sac.py
129,575
[RLlib] Preparatory PR for multi-agent multi-GPU learner (alpha-star style) #03 (#21652)
ray
24
Python
452
test_sac.py
def test_sac_loss_function(self): config = sac.DEFAULT_CONFIG.copy() # Run locally. config["num_workers"] = 0 config["learning_starts"] = 0 config["twin_q"] = False config["gamma"] = 0.99 # Switch on deterministic loss so we can compare the loss values. config["_deterministic_loss"] = True # Use very simple nets. config["Q_model"]["fcnet_hiddens"] = [10] config["policy_model"]["fcnet_hiddens"] = [10] # Make sure, timing differences do not affect trainer.train(). config["min_time_s_per_reporting"] = 0 # Test SAC with Simplex action space. config["env_config"] = {"simplex_actions": True} map_ = { # Action net. "default_policy/fc_1/kernel": "action_model._hidden_layers.0." "_model.0.weight", "default_policy/fc_1/bias": "action_model._hidden_layers.0." "_model.0.bias", "default_policy/fc_out/kernel": "action_model." "_logits._model.0.weight", "default_policy/fc_out/bias": "action_model._logits._model.0.bias", "default_policy/value_out/kernel": "action_model." "_value_branch._model.0.weight", "default_policy/value_out/bias": "action_model." "_value_branch._model.0.bias", # Q-net. "default_policy/fc_1_1/kernel": "q_net." "_hidden_layers.0._model.0.weight", "default_policy/fc_1_1/bias": "q_net." "_hidden_layers.0._model.0.bias", "default_policy/fc_out_1/kernel": "q_net._logits._model.0.weight", "default_policy/fc_out_1/bias": "q_net._logits._model.0.bias", "default_policy/value_out_1/kernel": "q_net." "_value_branch._model.0.weight", "default_policy/value_out_1/bias": "q_net." "_value_branch._model.0.bias", "default_policy/log_alpha": "log_alpha", # Target action-net. "default_policy/fc_1_2/kernel": "action_model." "_hidden_layers.0._model.0.weight", "default_policy/fc_1_2/bias": "action_model." "_hidden_layers.0._model.0.bias", "default_policy/fc_out_2/kernel": "action_model." "_logits._model.0.weight", "default_policy/fc_out_2/bias": "action_model." "_logits._model.0.bias", "default_policy/value_out_2/kernel": "action_model." "_value_branch._model.0.weight", "default_policy/value_out_2/bias": "action_model." "_value_branch._model.0.bias", # Target Q-net "default_policy/fc_1_3/kernel": "q_net." "_hidden_layers.0._model.0.weight", "default_policy/fc_1_3/bias": "q_net." "_hidden_layers.0._model.0.bias", "default_policy/fc_out_3/kernel": "q_net." "_logits._model.0.weight", "default_policy/fc_out_3/bias": "q_net." "_logits._model.0.bias", "default_policy/value_out_3/kernel": "q_net." "_value_branch._model.0.weight", "default_policy/value_out_3/bias": "q_net." "_value_branch._model.0.bias", "default_policy/log_alpha_1": "log_alpha", } env = SimpleEnv batch_size = 100 obs_size = (batch_size, 1) actions = np.random.random(size=(batch_size, 2)) # Batch of size=n. input_ = self._get_batch_helper(obs_size, actions, batch_size) # Simply compare loss values AND grads of all frameworks with each # other. prev_fw_loss = weights_dict = None expect_c, expect_a, expect_e, expect_t = None, None, None, None # History of tf-updated NN-weights over n training steps. tf_updated_weights = [] # History of input batches used. tf_inputs = [] for fw, sess in framework_iterator( config, frameworks=("tf", "torch"), session=True): # Generate Trainer and get its default Policy object. trainer = sac.SACTrainer(config=config, env=env) policy = trainer.get_policy() p_sess = None if sess: p_sess = policy.get_session() # Set all weights (of all nets) to fixed values. if weights_dict is None: # Start with the tf vars-dict. assert fw in ["tf2", "tf", "tfe"] weights_dict = policy.get_weights() if fw == "tfe": log_alpha = weights_dict[10] weights_dict = self._translate_tfe_weights( weights_dict, map_) else: assert fw == "torch" # Then transfer that to torch Model. model_dict = self._translate_weights_to_torch( weights_dict, map_) # Have to add this here (not a parameter in tf, but must be # one in torch, so it gets properly copied to the GPU(s)). model_dict["target_entropy"] = policy.model.target_entropy policy.model.load_state_dict(model_dict) policy.target_model.load_state_dict(model_dict) if fw == "tf": log_alpha = weights_dict["default_policy/log_alpha"] elif fw == "torch": # Actually convert to torch tensors (by accessing everything). input_ = policy._lazy_tensor_dict(input_) input_ = {k: input_[k] for k in input_.keys()} log_alpha = policy.model.log_alpha.detach().cpu().numpy()[0] # Only run the expectation once, should be the same anyways # for all frameworks. if expect_c is None: expect_c, expect_a, expect_e, expect_t = \ self._sac_loss_helper(input_, weights_dict, sorted(weights_dict.keys()), log_alpha, fw, gamma=config["gamma"], sess=sess) # Get actual outs and compare to expectation AND previous # framework. c=critic, a=actor, e=entropy, t=td-error. if fw == "tf": c, a, e, t, tf_c_grads, tf_a_grads, tf_e_grads = \ p_sess.run([ policy.critic_loss, policy.actor_loss, policy.alpha_loss, policy.td_error, policy.optimizer().compute_gradients( policy.critic_loss[0], [v for v in policy.model.q_variables() if "value_" not in v.name]), policy.optimizer().compute_gradients( policy.actor_loss, [v for v in policy.model.policy_variables() if "value_" not in v.name]), policy.optimizer().compute_gradients( policy.alpha_loss, policy.model.log_alpha)], feed_dict=policy._get_loss_inputs_dict( input_, shuffle=False)) tf_c_grads = [g for g, v in tf_c_grads] tf_a_grads = [g for g, v in tf_a_grads] tf_e_grads = [g for g, v in tf_e_grads] elif fw == "tfe": with tf.GradientTape() as tape: tf_loss(policy, policy.model, None, input_) c, a, e, t = policy.critic_loss, policy.actor_loss, \ policy.alpha_loss, policy.td_error vars = tape.watched_variables() tf_c_grads = tape.gradient(c[0], vars[6:10]) tf_a_grads = tape.gradient(a, vars[2:6]) tf_e_grads = tape.gradient(e, vars[10]) elif fw == "torch": loss_torch(policy, policy.model, None, input_) c, a, e, t = policy.get_tower_stats("critic_loss")[0], \ policy.get_tower_stats("actor_loss")[0], \ policy.get_tower_stats("alpha_loss")[0], \ policy.get_tower_stats("td_error")[0] # Test actor gradients. policy.actor_optim.zero_grad() assert all(v.grad is None for v in policy.model.q_variables()) assert all( v.grad is None for v in policy.model.policy_variables()) assert policy.model.log_alpha.grad is None a.backward() # `actor_loss` depends on Q-net vars (but these grads must # be ignored and overridden in critic_loss.backward!). assert not all( torch.mean(v.grad) == 0 for v in policy.model.policy_variables()) assert not all( torch.min(v.grad) == 0 for v in policy.model.policy_variables()) assert policy.model.log_alpha.grad is None # Compare with tf ones. torch_a_grads = [ v.grad for v in policy.model.policy_variables() if v.grad is not None ] check(tf_a_grads[2], np.transpose(torch_a_grads[0].detach().cpu())) # Test critic gradients. policy.critic_optims[0].zero_grad() assert all( torch.mean(v.grad) == 0.0 for v in policy.model.q_variables() if v.grad is not None) assert all( torch.min(v.grad) == 0.0 for v in policy.model.q_variables() if v.grad is not None) assert policy.model.log_alpha.grad is None c[0].backward() assert not all( torch.mean(v.grad) == 0 for v in policy.model.q_variables() if v.grad is not None) assert not all( torch.min(v.grad) == 0 for v in policy.model.q_variables() if v.grad is not None) assert policy.model.log_alpha.grad is None # Compare with tf ones. torch_c_grads = [v.grad for v in policy.model.q_variables()] check(tf_c_grads[0], np.transpose(torch_c_grads[2].detach().cpu())) # Compare (unchanged(!) actor grads) with tf ones. torch_a_grads = [ v.grad for v in policy.model.policy_variables() ] check(tf_a_grads[2], np.transpose(torch_a_grads[0].detach().cpu())) # Test alpha gradient. policy.alpha_optim.zero_grad() assert policy.model.log_alpha.grad is None e.backward() assert policy.model.log_alpha.grad is not None check(policy.model.log_alpha.grad, tf_e_grads) check(c, expect_c) check(a, expect_a) check(e, expect_e) check(t, expect_t) # Store this framework's losses in prev_fw_loss to compare with # next framework's outputs. if prev_fw_loss is not None: check(c, prev_fw_loss[0]) check(a, prev_fw_loss[1]) check(e, prev_fw_loss[2]) check(t, prev_fw_loss[3]) prev_fw_loss = (c, a, e, t) # Update weights from our batch (n times). for update_iteration in range(5): print("train iteration {}".format(update_iteration)) if fw == "tf": in_ = self._get_batch_helper(obs_size, actions, batch_size) tf_inputs.append(in_) # Set a fake-batch to use # (instead of sampling from replay buffer). buf = MultiAgentReplayBuffer.get_instance_for_testing() buf._fake_batch = in_ trainer.train() updated_weights = policy.get_weights() # Net must have changed. if tf_updated_weights: check( updated_weights["default_policy/fc_1/kernel"], tf_updated_weights[-1][ "default_policy/fc_1/kernel"], false=True) tf_updated_weights.append(updated_weights) # Compare with updated tf-weights. Must all be the same. else: tf_weights = tf_updated_weights[update_iteration] in_ = tf_inputs[update_iteration] # Set a fake-batch to use # (instead of sampling from replay buffer). buf = MultiAgentReplayBuffer.get_instance_for_testing() buf._fake_batch = in_ trainer.train() # Compare updated model. for tf_key in sorted(tf_weights.keys()): if re.search("_[23]|alpha", tf_key): continue tf_var = tf_weights[tf_key] torch_var = policy.model.state_dict()[map_[tf_key]] if tf_var.shape != torch_var.shape: check( tf_var, np.transpose(torch_var.detach().cpu()), atol=0.003) else: check(tf_var, torch_var, atol=0.003) # And alpha. check(policy.model.log_alpha, tf_weights["default_policy/log_alpha"]) # Compare target nets. for tf_key in sorted(tf_weights.keys()): if not re.search("_[23]", tf_key): continue tf_var = tf_weights[tf_key] torch_var = policy.target_model.state_dict()[map_[ tf_key]] if tf_var.shape != torch_var.shape: check( tf_var, np.transpose(torch_var.detach().cpu()), atol=0.003) else: check(tf_var, torch_var, atol=0.003) trainer.stop()
d5bfb7b7da6f8ec505dd8ed69f0be419decfdcc0
1,752
https://github.com/ray-project/ray.git
5,558
def test_sac_loss_function(self): config = sac.DEFAULT_CONFIG.copy() # Run locally. config["num_workers"] = 0 config["learning_starts"] = 0 config["twin_q"] = False config["gamma"] = 0.99 # Switch on deterministic loss so we can compare the loss values. config["_deterministic_loss"] = True # Use very simple nets. config["Q_model"]["fcnet_hiddens"] = [10] config["policy_model"]["fcnet_hiddens"] = [10] # Make sure, timing differences do not affect trainer.train(). config["min_time_s_per_reporting"] = 0 # Test SAC with Simplex action space. config["env_config"] = {"simplex_actions": True} map_ = { # Action net. "default_policy/fc_1/kernel": "action_model._hidden_layers.0." "_model.0.weight", "default_policy/fc_1/bias": "action_model._hidden_layers.0." "_model.0.bias", "default_policy/fc_out/kernel": "action_model." "_logits._model.0.weight", "default_policy/fc_out/bias": "action_model._logits._model.0.bias", "default_policy/value_out/kernel": "action_model." "_value_branch._model.0.weig
122
2,879
test_sac_loss_function
39
0
4
17
src/prefect/agent.py
54,456
Capture 404 errors explicitly so other http errors are not hidden
prefect
15
Python
34
agent.py
async def work_queue_id_from_name(self) -> Optional[UUID]: if not self.work_queue_name: raise ValueError("No work queue name provided.") try: work_queue = await self.client.read_work_queue_by_name(self.work_queue_name) return work_queue.id except httpx.HTTPStatusError as exc: if exc.response.status_code == status.HTTP_404_NOT_FOUND: self.logger.warn(f'No work queue found named "{self.work_queue_name}"') return None else: raise
ccb4cc008efa24ee39a85830c330f83d1fe2477a
73
https://github.com/PrefectHQ/prefect.git
167
async def work_queue_id_from_name(self) -> Optional[UUID]: if not self.work_queue_name: raise ValueError("No work queue name provided.") try: work_queue = await self.client.read_work_queue_by_name(self.work_queue_name) return work_queue.id except httpx.HTTPStatusError as exc: if exc.response.status_code == status.HTTP_404_NOT_FOUND: self.logger.warn(f'No work queue found n
19
132
work_queue_id_from_name
31
0
1
6
tests/utils/test_common.py
159,093
Configurable logging for libraries (#10614) * Make library level logging to be configurable Fixes https://github.com/RasaHQ/rasa/issues/10203 * Create log level documentation under cheatsheet in Rasa docs * Add log docs to `rasa shell --debug` (and others)
rasa
9
Python
23
test_common.py
def test_cli_missing_log_level_default_used(): configure_logging_and_warnings() rasa_logger = logging.getLogger("rasa") # Default log level is currently INFO rasa_logger.level == logging.INFO matplotlib_logger = logging.getLogger("matplotlib") # Default log level for libraries is currently ERROR matplotlib_logger.level == logging.ERROR
f00148b089d326c952880a0e5e6bd4b2dcb98ce5
38
https://github.com/RasaHQ/rasa.git
55
def test_cli_missing_log_level_default_used(): configure_logging_and_warnings() rasa_logger = logging.getLogger("rasa") # Default log level is currently INFO rasa_logger.level == logging.INFO matplotlib_logger = logging.getLogger("matplotlib") # Default log level for libraries is currently ERROR matplotlib_logger.level == logging.ERROR
9
72
test_cli_missing_log_level_default_used
47
0
1
9
tests/test_py_utils.py
105,458
Fix to dict conversion of `DatasetInfo`/`Features` (#4741) * Add custom asdict * Add test * One more test * Comment
datasets
14
Python
30
test_py_utils.py
def test_asdict(): input = A(x=1, y="foobar") expected_output = {"x": 1, "y": "foobar"} assert asdict(input) == expected_output input = {"a": {"b": A(x=10, y="foo")}, "c": [A(x=20, y="bar")]} expected_output = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]} assert asdict(input) == expected_output with pytest.raises(TypeError): asdict([1, A(x=10, y="foo")])
6c398c1098feaa6bac2a9ee5cb7dea63ed8dd37b
134
https://github.com/huggingface/datasets.git
74
def test_asdict(): input = A(x=1, y="foobar") expected_output = {"x": 1, "y": "foobar"} assert asdict(input) == expected_output
10
240
test_asdict
7
0
1
2
homeassistant/components/switchbee/entity.py
288,190
Add cover platform for switchbee integration (#78383) * Added Platform cover for switchbee integration * added cover to .coveragerc * Applied code review feedback from other PR * Addressed comments from other PRs * rebased * Re-add carriage return * Update homeassistant/components/switchbee/cover.py Co-authored-by: epenet <6771947+epenet@users.noreply.github.com> * Update homeassistant/components/switchbee/cover.py Co-authored-by: epenet <6771947+epenet@users.noreply.github.com> * Update homeassistant/components/switchbee/cover.py Co-authored-by: epenet <6771947+epenet@users.noreply.github.com> * Update homeassistant/components/switchbee/cover.py Co-authored-by: epenet <6771947+epenet@users.noreply.github.com> * addressed CR comments * fixes * fixes * more fixes * more fixes * separate entities for cover and somfy cover * fixed isort * more fixes * more fixes * Update homeassistant/components/switchbee/cover.py Co-authored-by: epenet <6771947+epenet@users.noreply.github.com> * Update homeassistant/components/switchbee/cover.py Co-authored-by: epenet <6771947+epenet@users.noreply.github.com> * more fixes * more fixes * more Co-authored-by: epenet <6771947+epenet@users.noreply.github.com>
core
10
Python
7
entity.py
def _get_coordinator_device(self) -> _DeviceTypeT: return cast(_DeviceTypeT, self.coordinator.data[self._device.id])
75510b8e90162a5b7a530d36d141cbada3df644c
25
https://github.com/home-assistant/core.git
13
def _get_coordinator_device(self) -> _DeviceTypeT: return cast(_DeviceTypeT, self.coordinator.data[self._device.id])
8
38
_get_coordinator_device
243
0
1
103
pipenv/patched/notpip/_vendor/idna/uts46data.py
20,152
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
pipenv
8
Python
146
uts46data.py
def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0x11FF2, 'X'), (0x11FFF, 'V'), (0x1239A, 'X'), (0x12400, 'V'), (0x1246F, 'X'), (0x12470, 'V'), (0x12475, 'X'), (0x12480, 'V'), (0x12544, 'X'), (0x12F90, 'V'), (0x12FF3, 'X'), (0x13000, 'V'), (0x1342F, 'X'), (0x14400, 'V'), (0x14647, 'X'), (0x16800, 'V'), (0x16A39, 'X'), (0x16A40, 'V'), (0x16A5F, 'X'), (0x16A60, 'V'), (0x16A6A, 'X'), (0x16A6E, 'V'), (0x16ABF, 'X'), (0x16AC0, 'V'), (0x16ACA, 'X'), (0x16AD0, 'V'), (0x16AEE, 'X'), (0x16AF0, 'V'), (0x16AF6, 'X'), (0x16B00, 'V'), (0x16B46, 'X'), (0x16B50, 'V'), (0x16B5A, 'X'), (0x16B5B, 'V'), (0x16B62, 'X'), (0x16B63, 'V'), (0x16B78, 'X'), (0x16B7D, 'V'), (0x16B90, 'X'), (0x16E40, 'M', '𖹠'), (0x16E41, 'M', '𖹡'), (0x16E42, 'M', '𖹢'), (0x16E43, 'M', '𖹣'), (0x16E44, 'M', '𖹤'), (0x16E45, 'M', '𖹥'), (0x16E46, 'M', '𖹦'), (0x16E47, 'M', '𖹧'), (0x16E48, 'M', '𖹨'), (0x16E49, 'M', '𖹩'), (0x16E4A, 'M', '𖹪'), (0x16E4B, 'M', '𖹫'), (0x16E4C, 'M', '𖹬'), (0x16E4D, 'M', '𖹭'), (0x16E4E, 'M', '𖹮'), (0x16E4F, 'M', '𖹯'), (0x16E50, 'M', '𖹰'), (0x16E51, 'M', '𖹱'), (0x16E52, 'M', '𖹲'), (0x16E53, 'M', '𖹳'), (0x16E54, 'M', '𖹴'), (0x16E55, 'M', '𖹵'), (0x16E56, 'M', '𖹶'), (0x16E57, 'M', '𖹷'), (0x16E58, 'M', '𖹸'), (0x16E59, 'M', '𖹹'), (0x16E5A, 'M', '𖹺'), (0x16E5B, 'M', '𖹻'), (0x16E5C, 'M', '𖹼'), (0x16E5D, 'M', '𖹽'), (0x16E5E, 'M', '𖹾'), (0x16E5F, 'M', '𖹿'), (0x16E60, 'V'), (0x16E9B, 'X'), (0x16F00, 'V'), (0x16F4B, 'X'), (0x16F4F, 'V'), (0x16F88, 'X'), (0x16F8F, 'V'), (0x16FA0, 'X'), (0x16FE0, 'V'), (0x16FE5, 'X'), (0x16FF0, 'V'), (0x16FF2, 'X'), (0x17000, 'V'), (0x187F8, 'X'), (0x18800, 'V'), (0x18CD6, 'X'), (0x18D00, 'V'), (0x18D09, 'X'), (0x1AFF0, 'V'), (0x1AFF4, 'X'), (0x1AFF5, 'V'), (0x1AFFC, 'X'), (0x1AFFD, 'V'), (0x1AFFF, 'X'), (0x1B000, 'V'), (0x1B123, 'X'), (0x1B150, 'V'), (0x1B153, 'X'), (0x1B164, 'V'), ]
f3166e673fe8d40277b804d35d77dcdb760fc3b3
693
https://github.com/pypa/pipenv.git
548
def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0x11FF2, 'X'), (0x11FFF, 'V'), (0x1239A, 'X'), (0x12400, 'V'), (0x1246F, 'X'), (0x12470, 'V'), (0x12475, 'X'), (0x12480, 'V'), (0x12544, 'X'), (0x12F90, 'V'), (0x12FF3, 'X'), (0x13000, 'V'), (0x1342F, 'X'), (0x14400, 'V'), (0x14647, 'X'), (0x16800, 'V'), (0x16A39, 'X'), (0x16A40, 'V'), (0x16A5F, 'X'), (0x16A60, 'V'), (0x16A6A, 'X'), (0x16A6E, 'V'), (0x16ABF, 'X'), (0x16AC0, 'V'), (0x16ACA, 'X'), (0x16AD0, 'V'), (0x16AEE, 'X'), (0x16AF0, 'V'), (0x16AF6, 'X'), (0x16B00, 'V'), (0x16B46, 'X'), (0x16B50, 'V'), (0x16B5A, 'X'), (0x16B5B, 'V'), (0x16B62, 'X'), (0x16B63, 'V'), (0x16B78, 'X'), (0x16B7D, 'V'), (0x16B90, 'X'), (0x16E40, 'M', '𖹠'), (0x16E41, 'M', '𖹡'), (0x16E42, 'M', '𖹢'), (0x16E43, 'M', '𖹣'), (0x16E44, 'M', '𖹤'), (0x16E45, 'M', '𖹥'), (0x16E46, 'M', '𖹦'), (0x16E47, 'M', '𖹧'), (0x16E48, 'M', '𖹨'), (0x16E49, 'M', '𖹩'), (0x16E4A, 'M', '𖹪'), (0x16E4B, 'M', '𖹫'), (0x16E4C, 'M', '𖹬'), (0x16E4D, 'M', '𖹭'), (0x16E4E, 'M', '𖹮'), (0x16E4F, 'M', '𖹯'), (0x16E50, 'M', '𖹰'), (0x16E51, 'M', '𖹱'), (0x16E52, 'M', '𖹲'), (0x16E53, 'M', '𖹳'), (0x16E54, 'M', '𖹴'), (0x16E55, 'M', '𖹵'), (0x16E56, 'M', '𖹶'), (0x16E57, 'M', '𖹷'), (0x16E58, 'M', '𖹸'), (0x16E59, 'M', '𖹹'), (0x16E5A, 'M', '𖹺'), (0x16E5B, 'M', '𖹻'), (0x16E5C, 'M', '𖹼'), (0x16E5D, 'M', '𖹽'), (0x16E5E, 'M', '𖹾'), (0x16E5F, 'M', '𖹿'), (0x16E60, 'V'), (0x16E9B, 'X'), (0x16F00, 'V'), (0x16F4B, 'X'), (0x16F4F, 'V'), (0x16F88, 'X'), (0x16F8F, 'V'), (0x16FA0, 'X'), (0x16FE0, 'V'), (0x16FE5, 'X'), (0x16FF0, 'V'), (0x16FF2, 'X'), (0x17000, 'V'), (0x187F8, 'X'), (0x18800, 'V'), (0x18CD6, 'X'), (0x18D00, 'V'), (0x18D09, 'X'), (0x1AFF0, 'V'), (0x1AFF4, 'X'), (0x1AFF5, 'V'), (0x1AFFC, 'X'), (0x1AFFD, 'V'), (0x1AFFF, 'X'), (0x1B000, 'V'), (0x1B123, 'X'), (0x1B150, 'V'), (0x1B153, 'X'), (0x1B164, 'V'), ]
6
1,068
_seg_59
26
0
1
5
pandas/tests/scalar/timedelta/test_constructors.py
169,557
API: Timedelta constructor pytimedelta, Tick preserve reso (#48918) * BUG: Timedelta.__new__ * remove assertion * GH refs * API: Timedelta(td64_obj) retain resolution * API: Timedelta constructor pytimedelta, Tick preserve reso * remove debugging variable * remove duplicate
pandas
9
Python
22
test_constructors.py
def test_from_pytimedelta_us_reso(): # pytimedelta has microsecond resolution, so Timedelta(pytd) inherits that td = timedelta(days=4, minutes=3) result = Timedelta(td) assert result.to_pytimedelta() == td assert result._reso == NpyDatetimeUnit.NPY_FR_us.value
ac05d29cf8cae186e96c83a03e2e80542ce2ad38
40
https://github.com/pandas-dev/pandas.git
40
def test_from_pytimedelta_us_reso(): # pytimedelta has microsecond resolution, so Timedelta(pytd) inherits that td = timedelta(days=4, minutes=3) result = Timedelta(td) assert result.to_pytimedelta() == td assert result._reso == NpyDate
12
65
test_from_pytimedelta_us_reso
10
0
1
7
homeassistant/components/logbook/queries/common.py
300,944
Add support for selecting device_ids from the logbook (#72039) Co-authored-by: Paulus Schoutsen <paulus@home-assistant.io>
core
8
Python
10
common.py
def select_states() -> Select: return select( *EVENT_COLUMNS_FOR_STATE_SELECT, *STATE_COLUMNS, NOT_CONTEXT_ONLY, )
c4fc84ec1e77a18ff392b34389baa86d52388246
19
https://github.com/home-assistant/core.git
40
def select_states() -> Select: return select( *EVENT_COLUMNS_FOR_S
6
32
select_states
77
0
1
14
test/test_table_reader.py
257,337
Add `run_batch` method to all nodes and `Pipeline` to allow batch querying (#2481) * Add run_batch methods for batch querying * Update Documentation & Code Style * Fix mypy * Update Documentation & Code Style * Fix mypy * Fix linter * Fix tests * Update Documentation & Code Style * Fix tests * Update Documentation & Code Style * Fix mypy * Fix rest api test * Update Documentation & Code Style * Add Doc strings * Update Documentation & Code Style * Add batch_size as attribute to nodes supporting batching * Adapt error messages * Adapt type of filters in retrievers * Revert change about truncation_warning in summarizer * Unify multiple_doc_lists tests * Use smaller models in extractor tests * Add return types to JoinAnswers and RouteDocuments * Adapt return statements in reader's run_batch method * Allow list of filters * Adapt error messages * Update Documentation & Code Style * Fix tests * Fix mypy * Adapt print_questions * Remove disabling warning about too many public methods * Add flag for pylint to disable warning about too many public methods in pipelines/base.py and document_stores/base.py * Add type check * Update Documentation & Code Style * Adapt tutorial 11 * Update Documentation & Code Style * Add query_batch method for DCDocStore * Update Documentation & Code Style Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
haystack
14
Python
66
test_table_reader.py
def test_table_reader_batch_single_query_single_doc_list(table_reader): data = { "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["58", "47", "60"], "number of movies": ["87", "53", "69"], "date of birth": ["18 december 1963", "11 november 1974", "6 may 1961"], } table = pd.DataFrame(data) query = "When was Di Caprio born?" prediction = table_reader.predict_batch(queries=query, documents=[Document(content=table, content_type="table")]) # Expected output: List of lists of answers assert isinstance(prediction["answers"], list) assert isinstance(prediction["answers"][0], list) assert isinstance(prediction["answers"][0][0], Answer) assert len(prediction["answers"]) == 1 # Predictions for 5 docs
738e008020f146ff9820c290311782f515749c48
134
https://github.com/deepset-ai/haystack.git
135
def test_table_reader_batch_single_query_single_doc_list(table_reader): data = { "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["58", "47", "60"], "number of movies": ["87", "53", "69"], "date of birth": ["18 december 1963", "11 november 1974
18
234
test_table_reader_batch_single_query_single_doc_list
61
0
4
12
test/test_nn.py
102,389
No-batch-dim support for ConvNd (#70506) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/70506 Test Plan: Imported from OSS Reviewed By: albanD Differential Revision: D33355034 Pulled By: jbschlosser fbshipit-source-id: 5a42645299b1d82cee7d461826acca1c5b35a71c
pytorch
18
Python
42
test_nn.py
def test_conv_modules_raise_error_on_incorrect_input_size(self): for dtype in [torch.bfloat16, torch.double, torch.float]: modules = [nn.Conv1d(3, 8, 3).to(dtype), nn.ConvTranspose1d(3, 8, 3).to(dtype), nn.Conv2d(3, 8, 3).to(dtype), nn.ConvTranspose2d(3, 8, 3).to(dtype), nn.Conv3d(3, 8, 3).to(dtype), nn.ConvTranspose3d(3, 8, 3).to(dtype)] invalid_input_dims = [(1, 4), (1, 4), (2, 5), (2, 5), (3, 6), (3, 6)] for invalid_dims, module in zip(invalid_input_dims, modules): for dims in invalid_dims: input = torch.empty(torch.Size((3, ) * dims)) self.assertRaises(RuntimeError, lambda: module(input))
7b8f73dd32a8a893dfb794433ce501e76c53bc89
208
https://github.com/pytorch/pytorch.git
263
def test_conv_modules_raise_error_on_incorrect_input_size(self): for dtype in [torch.bfloat16, torch.double, torch.float]: modules = [nn.Conv1d(3, 8, 3).to(dtype), nn.ConvTranspose1d(3, 8, 3).to(dtype), nn.Conv2d(3, 8, 3).to(dtype), nn.ConvTranspose2d(3, 8, 3).to(dtype), nn.Conv3d(3, 8, 3).to(dtype), nn.ConvTranspose3d(3, 8, 3).to(dtype)] invalid_input_dims = [(1, 4), (1, 4),
26
290
test_conv_modules_raise_error_on_incorrect_input_size
61
0
1
22
keras/callbacks_test.py
270,007
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
12
Python
36
callbacks_test.py
def test_TensorBoard_autoTrace_profileTwiceGraphMode(self): tf.compat.v1.disable_eager_execution() inp = keras.Input((1,)) out = keras.layers.Dense(units=1)(inp) model = keras.Model(inp, out) model.compile(gradient_descent.SGD(1), "mse") logdir = os.path.join(self.get_temp_dir(), "tb1") model.fit( np.zeros((64, 1)), np.zeros((64, 1)), batch_size=32, callbacks=[keras.callbacks.TensorBoard(logdir, profile_batch=1)], ) # Verifies trace exists in the first logdir. self.assertEqual(1, self._count_trace_file(logdir=logdir)) logdir = os.path.join(self.get_temp_dir(), "tb2") model.fit( np.zeros((64, 1)), np.zeros((64, 1)), batch_size=32, callbacks=[keras.callbacks.TensorBoard(logdir, profile_batch=2)], ) # Verifies trace exists in the second logdir. self.assertEqual(1, self._count_trace_file(logdir=logdir))
84afc5193d38057e2e2badf9c889ea87d80d8fbf
221
https://github.com/keras-team/keras.git
253
def test_TensorBoard_autoTrace_profileTwiceGraphMode(self): tf.compat.v1.disable_eager_execution() inp = keras.In
32
340
test_TensorBoard_autoTrace_profileTwiceGraphMode
25
0
1
7
dask/dataframe/io/tests/test_hdf.py
155,999
If hdf file is empty, don't fail on meta creation (#8809)
dask
11
Python
25
test_hdf.py
def test_hdf_empty_dataframe(): pytest.importorskip("tables") # https://github.com/dask/dask/issues/8707 from dask.dataframe.io.hdf import dont_use_fixed_error_message df = pd.DataFrame({"A": [], "B": []}, index=[]) df.to_hdf("data.h5", format="fixed", key="df", mode="w") with pytest.raises(TypeError, match=dont_use_fixed_error_message): dd.read_hdf("data.h5", "df")
e0d34a54ce4930528bbe3c8ded1d85c0c2be7fe6
81
https://github.com/dask/dask.git
49
def test_hdf_empty_dataframe(): pytest.importorskip("tables")
21
142
test_hdf_empty_dataframe
75
0
3
11
ivy/backends/numpy/core/random.py
213,538
renamed dev_str arg to dev for all methods.
ivy
13
Python
58
random.py
def multinomial(population_size, num_samples, batch_size, probs=None, replace=True, dev=None): if probs is None: probs = _np.ones((batch_size, population_size,)) / population_size orig_probs_shape = list(probs.shape) num_classes = orig_probs_shape[-1] probs_flat = _np.reshape(probs, (-1, orig_probs_shape[-1])) probs_flat = probs_flat / _np.sum(probs_flat, -1, keepdims=True) probs_stack = _np.split(probs_flat, probs_flat.shape[0]) samples_stack = [_np.random.choice(num_classes, num_samples, replace, p=prob[0]) for prob in probs_stack] samples_flat = _np.stack(samples_stack) return _np.asarray(_np.reshape(samples_flat, orig_probs_shape[:-1] + [num_samples])) randint = lambda low, high, shape, dev=None: _np.random.randint(low, high, shape) seed = lambda seed_value=0: _np.random.seed(seed_value) shuffle = _np.random.permutation
d743336b1f3654cd0315f380f43eed4116997c1d
165
https://github.com/unifyai/ivy.git
105
def multinomial(population_size, num_samples, batch_size, probs=None, replace=True, dev=None): if probs is None: probs = _np.ones((batch_size, population_size,)) / population_size orig_probs_shape = list(probs.shape) num_classes = orig_probs_shape[-1] probs_flat = _np.reshape(probs, (-1, orig_probs_shape[-1])) probs_flat = probs_flat / _np.sum(probs_flat, -1, keepdims=True) probs_stack = _np.split(probs_flat, probs_flat.shape[0]) samples_stack
34
311
multinomial
90
1
1
17
tests/integration/smpc/tensor/tensor_abstraction_test.py
2,882
Fix requested changes: Replace block_timeout() -> get(timeout)
PySyft
13
Python
66
tensor_abstraction_test.py
def test_tensor_abstraction_pointer(get_clients, op_str) -> None: clients = get_clients(3) op = getattr(operator, op_str) data_1 = Tensor(child=np.array([[15, 34], [32, 89]], dtype=DEFAULT_INT_NUMPY_TYPE)) data_2 = Tensor(child=np.array([[567, 98], [78, 25]], dtype=DEFAULT_INT_NUMPY_TYPE)) data_3 = Tensor( child=np.array([[125, 10], [124, 28]], dtype=DEFAULT_INT_NUMPY_TYPE) ) tensor_pointer_1 = data_1.send(clients[0]) tensor_pointer_2 = data_2.send(clients[1]) tensor_pointer_3 = data_3.send(clients[2]) # creates an MPCTensor between party 1 and party 2 mpc_1_2 = op(tensor_pointer_1, tensor_pointer_2) # creates and MPCTensor between party 1,2,3 mpc_1_2_3 = op(mpc_1_2, tensor_pointer_3) exp_res = op(data_1, data_2) assert (mpc_1_2.reconstruct(timeout_secs=40) == exp_res.child).all() exp_res = op(exp_res, data_3) assert (mpc_1_2_3.reconstruct(timeout_secs=40) == exp_res.child).all() @pytest.mark.smpc_abstract @pytest.mark.parametrize("op_str", ["add", "sub", "mul"])
a8c5abf1494356f854a81631b814e5928bc0eb8b
@pytest.mark.smpc_abstract @pytest.mark.parametrize("op_str", ["add", "sub", "mul"])
213
https://github.com/OpenMined/PySyft.git
145
def test_tensor_abstraction_pointer(get_clients, op_str) -> None: clients = get_clients(3) op = getattr(operator, op_str) data_1 = Tensor(child=np.array([[15, 34], [32, 89]], dtype=DEFAULT_INT_NUMPY_TYPE)) data_2 = Tensor(child=np.array([[567, 98], [78, 25]], dtype=DEFAULT_INT_NUM
30
361
test_tensor_abstraction_pointer
14
0
1
4
packages/python/plotly/plotly/graph_objs/_figure.py
241,438
upgrade Plotly.js to 2.13.2
plotly.py
8
Python
14
_figure.py
def select_selections(self, selector=None, row=None, col=None, secondary_y=None): return self._select_annotations_like( "selections", selector=selector, row=row, col=col, secondary_y=secondary_y )
a51932f920c5f2407827f10b89b5569c27c13b4b
45
https://github.com/plotly/plotly.py.git
46
def select_selections(self, selector=None, row=None, col=None, secondary_y=None): return self._select_annotations_like( "selections", selector=selector, r
7
66
select_selections
63
0
7
18
asv_bench/benchmarks/array.py
172,149
PERF: ArrowExtensionArray.to_numpy (#49973)
pandas
13
Python
38
array.py
def setup(self, dtype, hasna): N = 100_000 if dtype == "boolean[pyarrow]": data = np.random.choice([True, False], N, replace=True) elif dtype == "float64[pyarrow]": data = np.random.randn(N) elif dtype == "int64[pyarrow]": data = np.arange(N) elif dtype == "string[pyarrow]": data = tm.rands_array(10, N) elif dtype == "timestamp[ns][pyarrow]": data = pd.date_range("2000-01-01", freq="s", periods=N) else: raise NotImplementedError arr = pd.array(data, dtype=dtype) if hasna: arr[::2] = pd.NA self.arr = arr
026a83e06447b749385beddd3d03abe97d48e8f5
134
https://github.com/pandas-dev/pandas.git
209
def setup(self, dtype, hasna): N = 100_000 if dtype == "boolean[pyarrow]": data = np.random.choice([True, False], N, replace=True) elif dtype == "float64[pyarrow]": data = np.random.randn(N) elif dtype == "int64[pyarrow]": data =
22
220
setup
65
0
3
15
rllib/env/wrappers/tests/test_kaggle_wrapper.py
143,421
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
15
Python
53
test_kaggle_wrapper.py
def test_football_env_run_30_steps(self): from ray.rllib.env.wrappers.kaggle_wrapper import KaggleFootballMultiAgentEnv env = KaggleFootballMultiAgentEnv() # use the built-in agents in the kaggle environment run_right_agent = env.kaggle_env.agents["run_right"] do_nothing_agent = env.kaggle_env.agents["do_nothing"] obs = env.reset() self.assertEqual(list(obs.keys()), ["agent0", "agent1"]) done = {"__all__": False} num_steps_completed = 0 while not done["__all__"] and num_steps_completed <= 30: action0 = run_right_agent(structify(obs["agent0"]))[0] action1 = do_nothing_agent(structify(obs["agent1"]))[0] action_dict = {"agent0": action0, "agent1": action1} obs, _, done, _ = env.step(action_dict) num_steps_completed += 1
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
147
https://github.com/ray-project/ray.git
189
def test_football_env_run_30_steps(self): from ray.rllib.env.wrappers.kaggle_wrapper import KaggleFootballMultiAgentEnv env = KaggleFootballMultiAgentEnv() # use the built-in agents in the kaggle environment run_right_agent = env.kaggle_env.agents["run_right"] do_nothing_agent = env.kaggle_env.agents["do_nothing"] obs = env.reset() self.assertEqual(list(obs.keys()), ["agent0", "agent1"]) done = {"__all__": False} num_steps_completed = 0 while not done["__all__"] and num_steps_completed <= 30: action0 = run_right_agent(structify(obs["agent0"]))[0] action1 = do_nothing_agent(structify(obs["agent1"]))[0] action_dict = {"agent0": action0, "agent1": action1} obs, _, done
25
245
test_football_env_run_30_steps
14
1
1
4
lib/matplotlib/tests/test_ticker.py
108,365
Improve consistency in LogLocator and LogFormatter API
matplotlib
10
Python
14
test_ticker.py
def test_bad_locator_subs(sub): ll = mticker.LogLocator() with pytest.raises(ValueError): ll.set_params(subs=sub) @pytest.mark.parametrize('numticks', [1, 2, 3, 9]) @mpl.style.context('default')
1bc33e99efc9e4be433f99c6a74c7e3b30147dac
@pytest.mark.parametrize('numticks', [1, 2, 3, 9]) @mpl.style.context('default')
28
https://github.com/matplotlib/matplotlib.git
24
def test_bad_locator_subs(sub): ll = mticker.LogLocator() with pytest.raises(ValueError): ll.set_params(subs=sub) @pytest.mark.parametrize('numticks', [1, 2, 3, 9]) @mpl
15
93
test_bad_locator_subs
95
0
8
31
python/ray/autoscaler/_private/_kubernetes/config.py
130,341
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
17
Python
57
config.py
def _configure_autoscaler_role_binding(namespace, provider_config): binding_field = "autoscaler_role_binding" if binding_field not in provider_config: logger.info(log_prefix + not_provided_msg(binding_field)) return binding = provider_config[binding_field] if "namespace" not in binding["metadata"]: binding["metadata"]["namespace"] = namespace elif binding["metadata"]["namespace"] != namespace: raise InvalidNamespaceError(binding_field, namespace) for subject in binding["subjects"]: if "namespace" not in subject: subject["namespace"] = namespace elif subject["namespace"] != namespace: raise InvalidNamespaceError( binding_field + " subject '{}'".format(subject["name"]), namespace ) name = binding["metadata"]["name"] field_selector = "metadata.name={}".format(name) accounts = ( auth_api() .list_namespaced_role_binding(namespace, field_selector=field_selector) .items ) if len(accounts) > 0: assert len(accounts) == 1 logger.info(log_prefix + using_existing_msg(binding_field, name)) return logger.info(log_prefix + not_found_msg(binding_field, name)) auth_api().create_namespaced_role_binding(namespace, binding) logger.info(log_prefix + created_msg(binding_field, name))
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
215
https://github.com/ray-project/ray.git
268
def _configure_autoscaler_role_binding(namespace, provider_config): binding_field = "autoscaler_role_binding" if binding_field not in provider_config: logger.info(log_prefix + not_provided_msg(binding_field)) return binding = provider_config[binding_field] if "namespace" not in binding["metadata"]: binding["metadata"]["namespace"] = namespace elif binding["metadata"]["namespace"] != namespace: raise InvalidNamespaceError(binding_field, namespace) for subject in binding["subjects"]: if "namespace" not in subject: subject["namespace"] = namespace elif subject["namespace"] != namespace: raise InvalidNamespaceError( binding_field + " subject '{}'".format(subject["name"]), namespace ) name = binding["metadata"]["name"] field_selector = "metadata.name={}".format(name) accounts = ( auth_api() .list_namespaced_role_binding(namespace, field_selector=field_selector) .items )
23
360
_configure_autoscaler_role_binding
87
0
1
17
sympy/polys/numberfields/tests/test_primes.py
197,842
Improve `PrimeIdeal` reduction methods.
sympy
14
Python
48
test_primes.py
def test_PrimeIdeal_reduce(): k = QQ.alg_field_from_poly(Poly(x ** 3 + x ** 2 - 2 * x + 8)) Zk = k.maximal_order() P = k.primes_above(2) frp = P[2] # reduce_element a = Zk.parent(to_col([23, 20, 11]), denom=6) a_bar_expected = Zk.parent(to_col([11, 5, 2]), denom=6) a_bar = frp.reduce_element(a) assert a_bar == a_bar_expected # reduce_ANP a = k([QQ(11, 6), QQ(20, 6), QQ(23, 6)]) a_bar_expected = k([QQ(2, 6), QQ(5, 6), QQ(11, 6)]) a_bar = frp.reduce_ANP(a) assert a_bar == a_bar_expected # reduce_alg_num a = k.to_alg_num(a) a_bar_expected = k.to_alg_num(a_bar_expected) a_bar = frp.reduce_alg_num(a) assert a_bar == a_bar_expected
af44b30d68265acb25340374b648e198fb5570e7
196
https://github.com/sympy/sympy.git
143
def test_PrimeIdeal_reduce(): k = QQ.alg_field_from_poly(Poly(x ** 3 + x ** 2 - 2 * x + 8)) Zk = k.maximal_order() P = k.primes_above(2) frp = P[2] # reduce_element a = Zk.parent(to_col([23, 20, 11]), denom=6) a_bar_expected = Zk.parent(to_col([11, 5, 2]), denom=6) a_bar = frp.reduce_element(a) assert a_bar == a_bar_expected # reduce_ANP a = k([QQ(11, 6), QQ(20, 6), QQ(23, 6)]) a_bar_expected = k([QQ(2, 6), QQ(5, 6), QQ(11, 6)]) a_bar = frp.reduce_ANP(a) assert a_bar == a_bar_expected # reduce_alg_num a = k.to_alg_num(a) a_bar_expected = k.to_alg_num(a_bar_expected) a_
21
300
test_PrimeIdeal_reduce
43
0
1
13
python3.10.4/Lib/distutils/tests/test_register.py
223,281
add python 3.10.4 for windows
XX-Net
9
Python
33
test_register.py
def test_register_invalid_long_description(self): description = ':funkie:`str`' # mimic Sphinx-specific markup metadata = {'url': 'xxx', 'author': 'xxx', 'author_email': 'xxx', 'name': 'xxx', 'version': 'xxx', 'long_description': description} cmd = self._get_cmd(metadata) cmd.ensure_finalized() cmd.strict = True inputs = Inputs('2', 'tarek', 'tarek@ziade.org') register_module.input = inputs self.addCleanup(delattr, register_module, 'input') self.assertRaises(DistutilsSetupError, cmd.run)
8198943edd73a363c266633e1aa5b2a9e9c9f526
88
https://github.com/XX-net/XX-Net.git
163
def test_register_invalid_long_description(self): description = ':funkie:`str`' # mimic Sphinx-specific markup metadata = {'url': 'xxx', 'author': 'xxx', 'author_email': 'xxx', 'name': 'xxx', 'version': 'xxx', 'long_description': description} cmd = self._get_cmd(metadata) cmd.ensure_finalized() cmd.strict = True inputs
17
164
test_register_invalid_long_description
10
0
1
3
tests/test_markup.py
161,832
fix invalid escapes
rich
11
Python
9
test_markup.py
def test_markup_escape(): result = str(render("[dim white][url=[/]")) assert result == "[url="
90a7224ee672ca7f58399f3c8bec9d38341b1423
17
https://github.com/Textualize/rich.git
15
def test_markup_escape(): result = str(rende
4
33
test_markup_escape
102
0
2
49
tests/sentry/lang/javascript/test_processor.py
86,506
feat(empty-stacktraces): Tag events with stack traces from JS console errors (#39335) Add a new tag, `empty_stacktrace.js_console`, to tag JavaScript console errors.
sentry
19
Python
68
test_processor.py
def test_no_suspected_console_error(self): project = self.create_project() release = self.create_release(project=project, version="12.31.12") data = { "is_exception": True, "platform": "javascript", "project": project.id, "exception": { "values": [ { "type": "SyntaxError", "mechanism": { "type": "onerror", }, "value": ("value"), "stacktrace": { "frames": [ { "abs_path": "http://example.com/foo.js", "filename": "<anonymous>", "function": "name", "lineno": 4, "colno": 0, }, { "abs_path": "http://example.com/foo.js", "filename": "<anonymous>", "function": "new name", "lineno": 4, "colno": 0, }, ] }, } ] }, } stacktrace_infos = [ stacktrace for stacktrace in find_stacktraces_in_data(data, with_exceptions=True) ] processor = JavaScriptStacktraceProcessor( data={"release": release.version, "dist": "foo", "timestamp": 123.4}, project=project, stacktrace_infos=stacktrace_infos, ) frames = processor.get_valid_frames() assert processor.suspected_console_errors(frames) is False processor.tag_suspected_console_errors(frames) assert get_tag(processor.data, "empty_stacktrace.js_console") is False
e0dddfaa4b466e7eccff4ed075cc319fcc922688
211
https://github.com/getsentry/sentry.git
1,045
def test_no_suspected_console_error(self): project = self.create_project() release = self.create_release(project=project, version="12.31.12") data = { "is_exception": True, "platform": "javascript", "project": project.id, "exception": { "values": [ { "type": "SyntaxError", "mechanism": { "type": "onerror", }, "value": ("value"), "stacktrace": { "frames": [ { "abs_path": "http://example.com/foo.js", "filename": "<anonymous>", "function": "name", "lineno": 4, "colno": 0, }, { "abs_path": "http://example.com/foo.js", "filename": "<anonymous>", "function": "new name", "lineno": 4, "colno": 0, }, ] }, } ] }, } stacktrace_infos = [ stacktrace for stacktrace in find_stacktraces_in_data(data, with_exceptions=True)
20
371
test_no_suspected_console_error
54
0
4
15
nuitka/importing/Importing.py
178,301
Plugins: Massive cleanup and API improvements and Kivy support * Added method to locate a DLL and to create a DLL entry point as expected, removing need for imports and making it more clear as an API. * The location of modules had already an API, but it wasn' used where it could be. * Moved implicit imports and DLL usage for Gi to its plugin, solving a TODO for it. * Make sure sure to only yield, and not return, that is just more error prone. * Also allow generators for implicit dependencies, such that generators work in a yield from fashion. * With this, Kivy apps work on at least Linux.
Nuitka
11
Python
39
Importing.py
def locateModule(module_name, parent_package, level): module_package, module_filename, finding = findModule( module_name=module_name, parent_package=parent_package, level=level, ) assert module_package is None or ( type(module_package) is ModuleName and module_package != "" ), repr(module_package) if module_filename is not None: module_filename = os.path.normpath(module_filename) module_name, module_kind = getModuleNameAndKindFromFilename(module_filename) assert module_kind is not None, module_filename module_name = ModuleName.makeModuleNameInPackage(module_name, module_package) return module_name, module_filename, finding
56eb59d93f13815e66d0dea07e7669dfe275fa10
100
https://github.com/Nuitka/Nuitka.git
131
def locateModule(module_name, parent_package, level): module_package, module_filename, finding = findModule( module_name=module_name, parent_package=parent_package, level=level, ) assert module_package is None or ( type(module_package) is ModuleName and module_package != "" ), repr(module_package) if module_filename is not None: module_filename = os.path.normpath(module_filename) module_name, module_kind = getModuleNameAndKindFromFilename(module_filename) assert
17
151
locateModule
45
1
1
6
python/ray/ml/tests/test_torch_predictor.py
140,389
[AIR] Directly convert `TorchPredictor` `ndarray` inputs to tensors (#25190) If you pass a multidimensional input to `TorchPredictor.predict`, AIR errors. For more information about the error, see #25194. Co-authored-by: Amog Kamsetty <amogkamsetty@yahoo.com>
ray
12
Python
37
test_torch_predictor.py
def test_predict_dataframe(): predictor = TorchPredictor(model=torch.nn.Linear(2, 1, bias=False)) data_batch = pd.DataFrame({"X0": [0.0, 0.0, 0.0], "X1": [0.0, 0.0, 0.0]}) predictions = predictor.predict(data_batch, dtype=torch.float) assert len(predictions) == 3 assert predictions.to_numpy().flatten().tolist() == [0.0, 0.0, 0.0] @pytest.mark.parametrize( ("input_dtype", "expected_output_dtype"), ( (torch.float16, np.float16), (torch.float64, np.float64), (torch.int32, np.int32), (torch.int64, np.int64), ), )
692335440b10b487641641d71413d4c03c85a362
@pytest.mark.parametrize( ("input_dtype", "expected_output_dtype"), ( (torch.float16, np.float16), (torch.float64, np.float64), (torch.int32, np.int32), (torch.int64, np.int64), ), )
114
https://github.com/ray-project/ray.git
94
def test_predict_dataframe(): predictor = TorchPredictor(model=torch.nn.Linear(2, 1, bias=False)) data_batch = pd.DataFrame({"X0": [0.0, 0.0, 0.0], "X1": [0.0, 0.0, 0.0]}) predictions = predictor.predict(data_batch, dtype=torch.float) assert len(predictions) == 3 assert predictions.to_numpy().flatten().tolist() == [0.0, 0.0, 0.0] @pytest.mark.parametrize( ("input_dtype",
27
228
test_predict_dataframe
109
0
18
43
src/textual/message_pump.py
183,689
combine updates, cache arrangements
textual
22
Python
60
message_pump.py
async def _process_messages(self) -> None: _rich_traceback_guard = True while not self._closed: try: message = await self.get_message() except MessagePumpClosed: break except CancelledError: raise except Exception as error: raise error from None # Combine any pending messages that may supersede this one while not (self._closed or self._closing): try: pending = self.peek_message() except MessagePumpClosed: break if pending is None or not message.can_replace(pending): break try: message = await self.get_message() except MessagePumpClosed: break try: await self.dispatch_message(message) except CancelledError: raise except Exception as error: self.app.on_exception(error) break finally: if self._message_queue.empty(): if not self._closed: event = events.Idle(self) for _cls, method in self._get_dispatch_methods( "on_idle", event ): try: await invoke(method, event) except Exception as error: self.app.on_exception(error) break log("CLOSED", self)
55543479ad3049c6f9d1507d034c7c5bedf3981a
192
https://github.com/Textualize/textual.git
814
async def _process_messages(self) -> None: _rich_traceback_guard = True while not self._closed: try: message = await self.get_message() except MessagePumpClosed
27
330
_process_messages
21
0
1
5
lib/matplotlib/tests/test_contour.py
110,787
Support only positional args for data in contour
matplotlib
11
Python
20
test_contour.py
def test_contour_no_args(): fig, ax = plt.subplots() data = [[0, 1], [1, 0]] with pytest.raises(TypeError, match=r"contour\(\) takes from 1 to 4"): ax.contour(Z=data)
756eb1e539aff1aa7c9a73c42b527c6b6f204419
49
https://github.com/matplotlib/matplotlib.git
36
def test_contour_no_args(): fig, ax = plt.subplots() data = [[0, 1], [1, 0]] with pytest.raises(TypeError, match=r"contour\(\) takes from 1 to 4"): ax.contour
12
78
test_contour_no_args
32
0
1
13
onnx/backend/test/case/node/stringnormalizer.py
255,088
Use Python type annotations rather than comments (#3962) * These have been supported since Python 3.5. ONNX doesn't support Python < 3.6, so we can use the annotations. Diffs generated by https://pypi.org/project/com2ann/. Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * Remove MYPY conditional logic in gen_proto.py It breaks the type annotations and shouldn't be needed. Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * Get rid of MYPY bool from more scripts Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * move Descriptors class above where its referenced in type annotation Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fixes Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * remove extra blank line Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix type annotations Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix type annotation in gen_docs Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix Operators.md Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix TestCoverage.md Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix protoc-gen-mypy.py Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
onnx
12
Python
27
stringnormalizer.py
def export_monday_casesensintive_lower() -> None: input = np.array([u'monday', u'tuesday', u'wednesday', u'thursday']).astype(object) output = np.array([u'tuesday', u'wednesday', u'thursday']).astype(object) stopwords = [u'monday'] node = onnx.helper.make_node( 'StringNormalizer', inputs=['x'], outputs=['y'], case_change_action='LOWER', is_case_sensitive=1, stopwords=stopwords ) expect(node, inputs=[input], outputs=[output], name='test_strnormalizer_export_monday_casesensintive_lower')
83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd
113
https://github.com/onnx/onnx.git
139
def export_monday_casesensintive_lower() -> None: input = np.array([u'monday', u'tuesday', u'wednesday', u'thursday']).astype(object) output = np.array([u'tuesday', u'wednesday', u'thursday']
18
178
export_monday_casesensintive_lower
29
0
1
13
tests/www/views/test_views_tasks.py
44,086
Return to the same place when triggering a DAG (#20955)
airflow
13
Python
26
test_views_tasks.py
def test_dag_details_trigger_origin_dag_details_view(app, admin_client): app.dag_bag.get_dag('test_graph_view').create_dagrun( run_type=DagRunType.SCHEDULED, execution_date=DEFAULT_DATE, data_interval=(DEFAULT_DATE, DEFAULT_DATE), start_date=timezone.utcnow(), state=State.RUNNING, ) url = 'dag_details?dag_id=test_graph_view' resp = admin_client.get(url, follow_redirects=True) params = {'dag_id': 'test_graph_view', 'origin': '/dag_details?dag_id=test_graph_view'} href = f"/trigger?{html.escape(urllib.parse.urlencode(params))}" check_content_in_response(href, resp)
928dafe6c495bbf3e03d14473753fce915134a46
87
https://github.com/apache/airflow.git
84
def test_dag_details_trigger_origin_dag_details_view(app, admin_client): app.dag_bag.get_dag('test_graph_view').create_da
30
164
test_dag_details_trigger_origin_dag_details_view
105
0
3
18
jax/_src/random.py
122,115
[typing] use jax.Array annotations in random.py
jax
13
Python
82
random.py
def _truncated_normal(key, lower, upper, shape, dtype) -> Array: if shape is None: shape = lax.broadcast_shapes(np.shape(lower), np.shape(upper)) else: _check_shape("truncated_normal", shape, np.shape(lower), np.shape(upper)) sqrt2 = np.array(np.sqrt(2), dtype) lower = lax.convert_element_type(lower, dtype) upper = lax.convert_element_type(upper, dtype) a = lax.erf(lower / sqrt2) b = lax.erf(upper / sqrt2) if not jnp.issubdtype(dtype, np.floating): raise TypeError("truncated_normal only accepts floating point dtypes.") u = uniform(key, shape, dtype, minval=a, maxval=b) out = sqrt2 * lax.erf_inv(u) # Clamp the value to the open interval (lower, upper) to make sure that # rounding (or if we chose `a` for `u`) doesn't push us outside of the range. return jnp.clip( out, lax.nextafter(lax.stop_gradient(lower), np.array(np.inf, dtype=dtype)), lax.nextafter(lax.stop_gradient(upper), np.array(-np.inf, dtype=dtype)))
aed46f3312c970de257afbeb6cd775e79dd8e04e
221
https://github.com/google/jax.git
141
def _truncated_normal(key, lower, upper, shape, dtype) -> Array: if shape is None: shape = lax.broadcast_shapes(np.shape(lower), np.shape(upper)) else: _check_shape("truncated_normal", shape, np.shape(lower), np.shape(upper)) sqrt2 = np.array(np.sqrt(2), dtype) lower = lax.convert_element_type(lower, dtype) upper = lax.convert_element_type(upper, dtype) a = lax.erf(lower / sqrt2) b = lax.erf(upper / sqrt2) if not jnp.issubdtype(dtype, np.floating): raise TypeError("truncated_normal only accepts floating point dtypes.") u = uniform(key, shape, dtype, minval=a, maxval=b) out = sqrt2 * lax.erf_inv(u) # Clamp the value to the open interval (lower, upper) to make sure that # rounding (or if we chose `a` for `u`) doesn't push us outside of the range. return jnp.clip( out, lax.nextafter(lax.stop_gradient(lower),
32
336
_truncated_normal
103
0
7
20
.venv/lib/python3.8/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py
62,572
upd; format
transferlearning
14
Python
56
__init__.py
def getTreeBuilder(treeType, implementation=None, **kwargs): treeType = treeType.lower() if treeType not in treeBuilderCache: if treeType == "dom": from . import dom # Come up with a sane default (pref. from the stdlib) if implementation is None: from xml.dom import minidom implementation = minidom # NEVER cache here, caching is done in the dom submodule return dom.getDomModule(implementation, **kwargs).TreeBuilder elif treeType == "lxml": from . import etree_lxml treeBuilderCache[treeType] = etree_lxml.TreeBuilder elif treeType == "etree": from . import etree if implementation is None: implementation = default_etree # NEVER cache here, caching is done in the etree submodule return etree.getETreeModule(implementation, **kwargs).TreeBuilder else: raise ValueError( % treeType) return treeBuilderCache.get(treeType)
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
123
https://github.com/jindongwang/transferlearning.git
320
def getTreeBuilder(treeType, implementation=None, **kwargs): treeType = treeType.lower() if treeType not in treeBuilderCache: if treeType == "dom": from . import dom # Come up with a sane default (pref. from the stdlib) if implementation is None: from xml.dom import minidom implementation = minidom # NEVER cache here, caching is done in the dom submodule return dom.getDomModule(implementation, **kwargs).TreeBuilder elif treeType == "lxml": from . import etree_lxml treeBuilderCache[treeType] = etree_lxml.TreeBuilder elif treeT
17
211
getTreeBuilder
631
0
18
111
sklearn/linear_model/_quantile.py
260,449
MAINT Param validation for QuantileRegressor (#23808) Co-authored-by: jeremie du boisberranger <jeremiedbb@yahoo.fr>
scikit-learn
17
Python
332
_quantile.py
def fit(self, X, y, sample_weight=None): self._validate_params() X, y = self._validate_data( X, y, accept_sparse=["csc", "csr", "coo"], y_numeric=True, multi_output=False, ) sample_weight = _check_sample_weight(sample_weight, X) n_features = X.shape[1] n_params = n_features if self.fit_intercept: n_params += 1 # Note that centering y and X with _preprocess_data does not work # for quantile regression. # The objective is defined as 1/n * sum(pinball loss) + alpha * L1. # So we rescale the penalty term, which is equivalent. alpha = np.sum(sample_weight) * self.alpha if self.solver == "warn": warnings.warn( "The default solver will change from 'interior-point' to 'highs' in " "version 1.4. Set `solver='highs'` or to the desired solver to silence " "this warning.", FutureWarning, ) solver = "interior-point" elif self.solver in ( "highs-ds", "highs-ipm", "highs", ) and sp_version < parse_version("1.6.0"): raise ValueError( f"Solver {self.solver} is only available " f"with scipy>=1.6.0, got {sp_version}" ) else: solver = self.solver if solver == "interior-point" and sp_version >= parse_version("1.11.0"): raise ValueError( f"Solver {solver} is not anymore available in SciPy >= 1.11.0." ) if sparse.issparse(X) and solver not in ["highs", "highs-ds", "highs-ipm"]: raise ValueError( f"Solver {self.solver} does not support sparse X. " "Use solver 'highs' for example." ) # make default solver more stable if self.solver_options is None and solver == "interior-point": solver_options = {"lstsq": True} else: solver_options = self.solver_options # After rescaling alpha, the minimization problem is # min sum(pinball loss) + alpha * L1 # Use linear programming formulation of quantile regression # min_x c x # A_eq x = b_eq # 0 <= x # x = (s0, s, t0, t, u, v) = slack variables >= 0 # intercept = s0 - t0 # coef = s - t # c = (0, alpha * 1_p, 0, alpha * 1_p, quantile * 1_n, (1-quantile) * 1_n) # residual = y - X@coef - intercept = u - v # A_eq = (1_n, X, -1_n, -X, diag(1_n), -diag(1_n)) # b_eq = y # p = n_features # n = n_samples # 1_n = vector of length n with entries equal one # see https://stats.stackexchange.com/questions/384909/ # # Filtering out zero sample weights from the beginning makes life # easier for the linprog solver. indices = np.nonzero(sample_weight)[0] n_indices = len(indices) # use n_mask instead of n_samples if n_indices < len(sample_weight): sample_weight = sample_weight[indices] X = _safe_indexing(X, indices) y = _safe_indexing(y, indices) c = np.concatenate( [ np.full(2 * n_params, fill_value=alpha), sample_weight * self.quantile, sample_weight * (1 - self.quantile), ] ) if self.fit_intercept: # do not penalize the intercept c[0] = 0 c[n_params] = 0 if solver in ["highs", "highs-ds", "highs-ipm"]: # Note that highs methods always use a sparse CSC memory layout internally, # even for optimization problems parametrized using dense numpy arrays. # Therefore, we work with CSC matrices as early as possible to limit # unnecessary repeated memory copies. eye = sparse.eye(n_indices, dtype=X.dtype, format="csc") if self.fit_intercept: ones = sparse.csc_matrix(np.ones(shape=(n_indices, 1), dtype=X.dtype)) A_eq = sparse.hstack([ones, X, -ones, -X, eye, -eye], format="csc") else: A_eq = sparse.hstack([X, -X, eye, -eye], format="csc") else: eye = np.eye(n_indices) if self.fit_intercept: ones = np.ones((n_indices, 1)) A_eq = np.concatenate([ones, X, -ones, -X, eye, -eye], axis=1) else: A_eq = np.concatenate([X, -X, eye, -eye], axis=1) b_eq = y result = linprog( c=c, A_eq=A_eq, b_eq=b_eq, method=solver, options=solver_options, ) solution = result.x if not result.success: failure = { 1: "Iteration limit reached.", 2: "Problem appears to be infeasible.", 3: "Problem appears to be unbounded.", 4: "Numerical difficulties encountered.", } warnings.warn( "Linear programming for QuantileRegressor did not succeed.\n" f"Status is {result.status}: " + failure.setdefault(result.status, "unknown reason") + "\n" + "Result message of linprog:\n" + result.message, ConvergenceWarning, ) # positive slack - negative slack # solution is an array with (params_pos, params_neg, u, v) params = solution[:n_params] - solution[n_params : 2 * n_params] self.n_iter_ = result.nit if self.fit_intercept: self.coef_ = params[1:] self.intercept_ = params[0] else: self.coef_ = params self.intercept_ = 0.0 return self
a0623cec4a253ce3b5c5e4cf3b080651c84a53a9
655
https://github.com/scikit-learn/scikit-learn.git
2,114
def fit(self, X, y, sample_weight=None): self._validate_params() X, y = self._validate_data( X, y, accept_sparse=["csc", "csr", "coo"], y_numeric=True, multi_output=False, ) sample_weight = _check_sample_weight(sample_weight, X) n_features = X.shape[1] n_params = n_features if self.fit_intercept: n_params += 1 # Note that centering y and X with _preprocess_data does not work # for quantile regression. # The objective is defined as 1/n * sum(pinball loss) + alpha * L1. # So we rescale the penalty term, which is equivalent. alpha = np.sum(sample_weight) * self.alpha if self.solver == "warn": warnings.warn( "The default solver will change from 'interior-point' to 'highs' in " "version 1.4. Set `solver='highs'` or to the desired solver to silence " "this warning.", FutureWarning, ) solver = "interior-point" elif self.solver in ( "highs-ds", "highs-ipm", "highs", ) and sp_version < parse_version("1.6.0"): raise ValueError( f"Solver {self.solver} is only available " f"with scipy>=1.6.0, got {sp_version}" ) else: solver = self.solver if solver == "interior-point" and sp_version >= parse_version("1.11.0"): raise ValueError( f"Solver {solver} is not anymore available in SciPy >= 1.11.0." ) if sparse.issparse(X) and solver not in ["highs", "highs-ds", "highs-ipm"]: raise ValueError( f"Solver {self.solver} does not support sparse X. " "Use solver 'highs' for example." ) # make default solver more stable if self.solver_options is None and solver == "interior-point": solver_options = {"lstsq": True} else: solver_options = self.solver_options # After rescaling alpha, the minimization problem is # min sum(pinb
64
1,110
fit
208
0
9
45
sklearn/datasets/tests/test_lfw.py
261,542
MAINT bump up CI dependencies (#24803) [scipy-dev] [pypy]
scikit-learn
16
Python
131
test_lfw.py
def setup_module(): Image = pytest.importorskip("PIL.Image") global SCIKIT_LEARN_DATA, SCIKIT_LEARN_EMPTY_DATA, LFW_HOME SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_") LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, "lfw_home") SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_") if not os.path.exists(LFW_HOME): os.makedirs(LFW_HOME) random_state = random.Random(42) np_rng = np.random.RandomState(42) # generate some random jpeg files for each person counts = {} for name in FAKE_NAMES: folder_name = os.path.join(LFW_HOME, "lfw_funneled", name) if not os.path.exists(folder_name): os.makedirs(folder_name) n_faces = np_rng.randint(1, 5) counts[name] = n_faces for i in range(n_faces): file_path = os.path.join(folder_name, name + "_%04d.jpg" % i) uniface = np_rng.randint(0, 255, size=(250, 250, 3)) img = Image.fromarray(uniface.astype(np.uint8)) img.save(file_path) # add some random file pollution to test robustness with open(os.path.join(LFW_HOME, "lfw_funneled", ".test.swp"), "wb") as f: f.write(b"Text file to be ignored by the dataset loader.") # generate some pairing metadata files using the same format as LFW with open(os.path.join(LFW_HOME, "pairsDevTrain.txt"), "wb") as f: f.write(b"10\n") more_than_two = [name for name, count in counts.items() if count >= 2] for i in range(5): name = random_state.choice(more_than_two) first, second = random_state.sample(range(counts[name]), 2) f.write(("%s\t%d\t%d\n" % (name, first, second)).encode()) for i in range(5): first_name, second_name = random_state.sample(FAKE_NAMES, 2) first_index = np_rng.choice(np.arange(counts[first_name])) second_index = np_rng.choice(np.arange(counts[second_name])) f.write( ( "%s\t%d\t%s\t%d\n" % (first_name, first_index, second_name, second_index) ).encode() ) with open(os.path.join(LFW_HOME, "pairsDevTest.txt"), "wb") as f: f.write(b"Fake place holder that won't be tested") with open(os.path.join(LFW_HOME, "pairs.txt"), "wb") as f: f.write(b"Fake place holder that won't be tested")
63f92d4adb61aed58d656544cc6caa9d68cb6065
460
https://github.com/scikit-learn/scikit-learn.git
564
def setup_module(): Image = pytest.importorskip("PIL.Image") global SCIKIT_LEARN_DATA, SCIKIT_LEARN_EMPTY_DATA, LFW_HOME SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_") LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, "lfw_home") SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_") if not os.path.exists(LFW_HOME): os.makedirs(LFW_HOME) random_state = random.Random(42) np_rng = np.random.RandomState(42) # generate some random jpeg files for each person counts = {} for name in FAKE_NAMES: folder_name = os.path.join(LFW_HOME, "lfw_funneled", name) if not os.path.exists(folder_name): os.makedirs(folder_name) n_faces = np_rng.randint(1, 5) counts[name] = n_faces for i in range(n_faces): file_path = os.path.join(folder_name, name + "_%04d.jpg" % i) uniface = np_rng.randint(0, 255, size=(250, 250, 3)) img = Image.fromarray(uniface.astype(np.uint8)) img.save(file_path) # add some random file pollution to test robustness with open(os.path.join(LFW_HOME, "lfw_funneled", ".test.swp"), "wb") as f: f.write(b"Text file to be ignored by the dataset loader.") # generate some pairing metadata files using the same format as LFW with open(os.path.join(LFW_HOME, "pairsDevTrain.txt"), "wb") as f: f.write(b"10\n") more_than_two = [name for name, count in counts.items() if count >= 2] for i in range(5): name = random_state.choice(more_than_two) first, second = random_state.sample(range(counts[name]), 2) f.write(("%s\t%d\t%d\n" % (name, first, second)).encode()) for i in range(5): first_name, second_name = random_state.sample(FAKE_NAMES, 2) first_index = np_rng.choice(np.arange(counts[first_name])) second_index = np_rng.choice(np.arange(counts[second_name])) f.write( ( "%s\t%d\t%s\t%d\n" % (first_name, first_index, second_name, second_index) ).encode() ) with open(os.path.join(LFW_HOME, "pairsDevTest.txt"), "wb") as f: f.write(b"Fake place holder that won't b
53
764
setup_module
47
1
1
16
tests/components/plugwise/conftest.py
313,041
Cleanup existing Plugwise tests and test fixtures (#66282) * Cleanup existing Plugwise tests and test fixtures * More cleanup
core
11
Python
37
conftest.py
def mock_stretch() -> Generator[None, MagicMock, None]: chosen_env = "stretch_v31" with patch( "homeassistant.components.plugwise.gateway.Smile", autospec=True ) as smile_mock: smile = smile_mock.return_value smile.gateway_id = "259882df3c05415b99c2d962534ce820" smile.heater_id = None smile.smile_version = "3.1.11" smile.smile_type = "stretch" smile.smile_hostname = "stretch98765" smile.smile_name = "Stretch" smile.connect.return_value = True smile.async_update.return_value = _read_json(chosen_env, "all_data") yield smile @pytest.fixture
bd920aa43de584f6a4db934902d64b39aabbd6d6
@pytest.fixture
85
https://github.com/home-assistant/core.git
135
def mock_stretch() -> Generator[None, MagicMock, None]: chosen_env = "stretch_v31" with patch( "homeassistant.components.plugwise.gateway.Smile", autospec=True ) as smile_mock: smile = smile_mock.return_value smile.gateway_id = "259882df3c
20
161
mock_stretch
27
1
4
9
nuitka/utils/FileOperations.py
178,587
macOS: Proper adhoc signing of created distribution * With this homebrew works on M1 and macOS 12
Nuitka
11
Python
21
FileOperations.py
def withPreserveFileMode(filenames): if type(filenames) is str: filenames = [filenames] old_modes = {} for filename in filenames: old_modes[filename] = os.stat(filename).st_mode yield for filename in filenames: os.chmod(filename, old_modes[filename]) @contextmanager
e188ede8767cda1750cd41c08bed82c00888aebe
@contextmanager
57
https://github.com/Nuitka/Nuitka.git
61
def withPreserveFileMode(filenames): if type(filenames) is str: filenames = [filenames] old_modes = {} for filename in filenames: old_modes[filename] = os.stat(filename).st_mode yield
11
94
withPreserveFileMode
20
0
2
22
gamestonk_terminal/economy/fred/prediction/pred_controller.py
281,475
Terminal Wide Rich (#1161) * My idea for how we handle Rich moving forward * remove independent consoles * FIxed pylint issues * add a few vars * Switched print to console * More transitions * Changed more prints * Replaced all prints * Fixing tabulate * Finished replace tabulate * Finished removing rich from Tabulate * add Panel around menu * add GST watermark under feature flag * Fixed 46 tests * Delete test_screener[False].yaml * Delete test_screener[True].yaml * Fixed the rest of the tests * add help and source color vars and use rgb * rich on stocks/options * update rich on disc, dps, sia * rich in gov, ins and scr menus * ba and ca menus with rich * Fixed import issue * Fixed some tests * removed termcolor * Removed prettytable * add rich to remaining stocks menus * FIxed linting issue * Added James' changes * Updated dependencies * Add rich to cryptocurrency menu * refactor economy and forex * refactor etf with rich * refactor mfunds * refactor rich rest * not specify style so default color works well on any background * Fixing mypy issues * Updated tests * More test fixes * James' test fixes * Updating tests : stocks/screener - fix cassettes using BR * Updating tests : crypto * Updating tests : disable DEBUG_MODE * Updating tests : stocks/fa/yfinance * minor fixes that escape * Improve the rich table function (that replaces tabulate :D ) * Fixed bad code * delete rogue file + dcf fix + NoConsole * sia mypy * fuck you linter * fuck you linter pt 2 * skip hehe * i hate the black linter * ubuntu mypy attempt * Update : rich_config + gtff * Updating tests : conftest * Updating tests : stocks * Update : rich_config * Updating : rich_config * make panel configurable for Theodore :b * colors update * Merged * Updating : rich_config + feature_flags * Updating : rich_config * Updating tests : stocks * Updating : feature_flags Co-authored-by: DidierRLopes <dro.lopes@campus.fct.unl.pt> Co-authored-by: Chavithra PARANA <chavithra@gmail.com> Co-authored-by: james <jmaslek11@gmail.com> Co-authored-by: jose-donato <zmcdonato@gmail.com>
OpenBBTerminal
12
Python
18
pred_controller.py
def print_help(self): id_string = "" for s_id, sub_dict in self.current_series.items(): id_string += f" [cyan]{s_id.upper()}[/cyan] : {sub_dict['title']}" help_string = f console.print(help_string)
82747072c511beb1b2672846ae2ee4aec53eb562
36
https://github.com/OpenBB-finance/OpenBBTerminal.git
69
def print_help(self): id_string = "" for s_id, sub_dict in self.current_series.items(): id_string += f" [cyan]{s_id.upper()}[/cyan]
12
96
print_help
78
0
2
18
modin/test/interchange/dataframe_protocol/hdk/test_protocol.py
155,297
REFACTOR-#5303: Fix code scanning alert - Unused local variable (#5304) Signed-off-by: Myachev <anatoly.myachev@intel.com> Co-authored-by: Mahesh Vashishtha <mvashishtha@users.noreply.github.com>
modin
16
Python
62
test_protocol.py
def test_zero_copy_export_for_primitives(data_has_nulls): data = get_data_of_all_types( has_nulls=data_has_nulls, include_dtypes=["int", "uint", "float"] ) at = pa.Table.from_pydict(data) md_df = from_arrow(at) protocol_df = md_df.__dataframe__(allow_copy=False) for i, col in enumerate(protocol_df.get_columns()): col_arr, _ = primitive_column_to_ndarray(col) exported_ptr = col_arr.__array_interface__["data"][0] producer_ptr = at.column(i).chunks[0].buffers()[-1].address # Verify that the pointers of produce and exported objects point to the same data assert producer_ptr == exported_ptr # Can't export `md_df` zero-copy no more as it has delayed 'fillna' operation md_df = md_df.fillna({"float32": 32.0}) non_zero_copy_protocol_df = md_df.__dataframe__(allow_copy=False) with pytest.raises(RuntimeError): primitive_column_to_ndarray( non_zero_copy_protocol_df.get_column_by_name("float32") )
eb99c500a40c5565012e3fe83c5e6ef333d1b487
151
https://github.com/modin-project/modin.git
178
def test_zero_copy_export_for_primitives(data_has_nulls): data = get_data_of_all_types( has_nulls=data_has_nulls, include_dtypes=["int", "uint", "float"] ) at = pa.Table.from_pydict(data) md_df = from_arrow(at) protocol_df = md_df.__dataframe__(allow_copy=False) for i, col in enumerate(protocol_df.get_columns()): col_arr, _ = pri
35
252
test_zero_copy_export_for_primitives
20
0
3
7
apps/DeepFaceLive/backend/CameraSource.py
179,102
CameraSource now shows names of video input devices in Windows
DeepFaceLive
11
Python
17
CameraSource.py
def on_cs_device_idx_selected(self, device_idx, device_name): cs, state = self.get_control_sheet(), self.get_state() if state.device_idx != device_idx: state.device_idx = device_idx self.save_state() if self.is_started(): self.restart()
fa7fddca2869dec8fb1c7c9691fb77f1cc8805b6
53
https://github.com/iperov/DeepFaceLive.git
81
def on_cs_device_idx_selected(self, device_idx, device_name): cs, state = self.get_control_sheet(), self.get_state() if state.device_idx != device_idx: state.device_idx = devic
11
87
on_cs_device_idx_selected
38
0
1
12
wagtail/users/tests/test_admin_views.py
76,211
Reformat with black
wagtail
14
Python
31
test_admin_views.py
def test_user_can_delete_other_superuser(self): response = self.client.get( reverse("wagtailusers_users:delete", args=(self.superuser.pk,)) ) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "wagtailusers/users/confirm_delete.html") response = self.client.post( reverse("wagtailusers_users:delete", args=(self.superuser.pk,)) ) # Should redirect back to index self.assertRedirects(response, reverse("wagtailusers_users:index")) # Check that the user was deleted users = get_user_model().objects.filter(email="testsuperuser@email.com") self.assertEqual(users.count(), 0)
d10f15e55806c6944827d801cd9c2d53f5da4186
108
https://github.com/wagtail/wagtail.git
136
def test_user_can_delete_other_superuser(self): response = self.client.get( reverse(
20
179
test_user_can_delete_other_superuser
85
0
5
17
cps/gdriveutils.py
172,506
Refactor rename author/title on gdrive
calibre-web
16
Python
69
gdriveutils.py
def moveGdriveFolderRemote(origin_file, target_folder): drive = getDrive(Gdrive.Instance().drive) previous_parents = ",".join([parent["id"] for parent in origin_file.get('parents')]) children = drive.auth.service.children().list(folderId=previous_parents).execute() gFileTargetDir = getFileFromEbooksFolder(None, target_folder) if not gFileTargetDir or gFileTargetDir['title'] != target_folder: # Folder is not existing, create, and move folder drive.auth.service.files().patch(fileId=origin_file['id'], body={'title': target_folder}, fields='title').execute() #gFileTargetDir = drive.CreateFile( # {'title': target_folder, 'parents': [{"kind": "drive#fileLink", 'id': getEbooksFolderId()}], # "mimeType": "application/vnd.google-apps.folder"}) #gFileTargetDir.Upload() else: # Move the file to the new folder drive.auth.service.files().update(fileId=origin_file['id'], addParents=gFileTargetDir['id'], removeParents=previous_parents, fields='id, parents').execute() # if previous_parents has no children anymore, delete original fileparent if len(children['items']) == 1: deleteDatabaseEntry(previous_parents) drive.auth.service.files().delete(fileId=previous_parents).execute()
d8f5bdea6df3a0217f49062d4209cedc80caad0e
192
https://github.com/janeczku/calibre-web.git
388
def moveGdriveFolderRemote(origin_file, target_folder): drive = getDrive(Gdrive.Instance().drive) previous_parents = ",".join([parent["id"] for parent in origin_file.get('parents')]) children = drive.auth.service.children().list(folderId=previous_parents).execute() gFileTargetDir = getFileFromEbooksFolder(None, target_folder) if not gFileTargetDir or gFileTargetDir['title'] != target_folder: # Folder is not existing, create, and move folder drive.auth.service.files().patch(fileId=origin_file['id'], body={'title': target_folder}, fields='title').execute() #gFileTargetDir = drive.CreateFile( # {'title': target_folder, 'parents': [{"kind": "drive#fileLink", 'id': getEbooksFolderId()}], # "mimeType": "application/vnd.google-apps.folder"}) #gFi
30
328
moveGdriveFolderRemote
15
1
1
6
tests/components/hassio/test_addon_manager.py
290,489
Move zwave_js addon manager to hassio integration (#81354)
core
11
Python
15
test_addon_manager.py
def stop_addon_fixture() -> Generator[AsyncMock, None, None]: with patch( "homeassistant.components.hassio.addon_manager.async_stop_addon" ) as stop_addon: yield stop_addon @pytest.fixture(name="create_backup")
9ded2325223de3918e3f69aab8732487323b2214
@pytest.fixture(name="create_backup")
24
https://github.com/home-assistant/core.git
37
def stop_addon_fixture() -> Generator[AsyncMock, None, None]:
8
62
stop_addon_fixture
26
0
1
14
ludwig/features/audio_feature.py
6,750
Torchaudio fixes (#2007) * hotfix for shape broadcast issue * Reverted [1] index on padded audio feature, set up test for feature creation observation * Changed default audio type since raw takes too long * Removed debug code
ludwig
10
Python
25
audio_feature.py
def preprocessing_defaults(): return { "audio_file_length_limit_in_s": 7.5, "missing_value_strategy": BACKFILL, "in_memory": True, "padding_value": 0, "norm": None, "audio_feature": { TYPE: "fbank", "window_length_in_s": 0.04, "window_shift_in_s": 0.02, "num_filter_bands": 80, }, }
5209b1aed23a98c092a0e2682ed13b7f61623e20
54
https://github.com/ludwig-ai/ludwig.git
176
def preprocessing_defaults(): return { "audio_file_length_limit_in_s": 7.5, "missing_value_strategy": BACKFILL, "in_memory": True, "padding_value": 0, "norm": None, "audio_feature": { TYPE: "fbank", "window_length_in_s": 0.04, "window_shift_in_s": 0.02, "num_filter_bands": 80,
3
85
preprocessing_defaults