complexity
int64
1
139
fun_name
stringlengths
1
80
code
stringlengths
101
62.2k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
3.11k
ast_levels
int64
6
36
file_name
stringlengths
5
79
n_ast_nodes
int64
17
19.2k
commit_message
stringlengths
3
15.3k
d_id
int64
12
121k
n_ast_errors
int64
0
9
n_whitespaces
int64
4
10.8k
token_counts
int64
5
3.06k
vocab_size
int64
4
1.11k
id
int64
20
338k
n_words
int64
4
4.82k
repo
stringlengths
3
22
n_identifiers
int64
2
176
path
stringlengths
7
134
language
stringclasses
1 value
nloc
int64
1
413
documentation
dict
url
stringlengths
31
59
12
_expire_url_cache_data
async def _expire_url_cache_data(self) -> None: assert self._worker_run_media_background_jobs now = self.clock.time_msec() logger.debug("Running url preview cache expiry") if not (await self.store.db_pool.updates.has_completed_background_updates()): logger.debug("Still running DB updates; skipping url preview cache expiry") return
57f6c496d0e26b1b455de936bd950e1899a5ae25
13
preview_url_resource.py
92
URL preview cache expiry logs: INFO -> DEBUG, text clarifications (#12720)
72,189
0
86
329
25
248,275
29
synapse
12
synapse/rest/media/v1/preview_url_resource.py
Python
68
{ "docstring": "Clean up expired url cache content, media and thumbnails.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/matrix-org/synapse.git
2
reset_format
def reset_format(self): self._check_values_type() for dataset in self.values(): dataset.set_format()
e35be138148333078284b942ccc9ed7b1d826f97
9
dataset_dict.py
45
Update docs to new frontend/UI (#3690) * WIP: update docs to new UI * make style * Rm unused * inject_arrow_table_documentation __annotations__ * hasattr(arrow_table_method, "__annotations__") * Update task_template.rst * Codeblock PT-TF-SPLIT * Convert loading scripts * Convert docs to mdx * Fix mdx * Add <Tip> * Convert mdx tables * Fix codeblock * Rm unneded hashlinks * Update index.mdx * Redo dev change * Rm circle ci `build_doc` & `deploy_doc` * Rm unneeded files * Update docs reamde * Standardize to `Example::` * mdx logging levels doc * Table properties inject_arrow_table_documentation * ``` to ```py mdx * Add Tips mdx * important,None -> <Tip warning={true}> * More misc * Center imgs * Update instllation page * `setup.py` docs section * Rm imgs since they are in hf.co * Update docs/source/access.mdx Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update index mdx * Update docs/source/access.mdx Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * just `Dataset` obj * Addedversion just italics * Update ReadInstruction doc example syntax * Change docstring for `prepare_for_task` * Chore * Remove `code` syntax from headings * Rm `code` syntax from headings * Hashlink backward compatability * S3FileSystem doc * S3FileSystem doc updates * index.mdx updates * Add darkmode gifs * Index logo img css classes * Index mdx dataset logo img size * Docs for DownloadMode class * Doc DownloadMode table * format docstrings * style * Add doc builder scripts (#3790) * add doc builder scripts * fix docker image * Docs new UI actions no self hosted (#3793) * No self hosted * replace doc injection by actual docstrings * Docstring formatted Co-authored-by: Quentin Lhoest <lhoest.q@gmail.com> Co-authored-by: Mishig Davaadorj <dmishig@gmail.com> Co-authored-by: Lysandre Debut <lysandre.debut@reseau.eseo.fr> Co-authored-by: Mishig Davaadorj <dmishig@gmail.com> * Rm notebooks from docs actions since they dont exi * Update tsting branch * More docstring * Chore * bump up node version * bump up node * ``` -> ```py for audio_process.mdx * Update .github/workflows/build_documentation.yml Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com> * Uodate dev doc build * remove run on PR * fix action * Fix gh doc workflow * forgot this change when merging master * Update build doc Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> Co-authored-by: Quentin Lhoest <lhoest.q@gmail.com> Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com> Co-authored-by: Lysandre Debut <lysandre.debut@reseau.eseo.fr>
21,824
0
40
25
8
104,387
8
datasets
6
src/datasets/dataset_dict.py
Python
4
{ "docstring": "Reset ``__getitem__`` return format to python objects and all columns.\n The transformation is applied to all the datasets of the dataset dictionary.\n\n Same as ``self.set_format()``\n ", "language": "en", "n_whitespaces": 46, "n_words": 25, "vocab_size": 22 }
https://github.com/huggingface/datasets.git
1
_assert_text_deltas
def _assert_text_deltas(self, scriptrunner, text_deltas): self.assertEqual(text_deltas, scriptrunner.text_deltas())
704eab3478cf69847825b23dabf15813a8ac9fa2
9
script_runner_test.py
36
Rename and refactor `Report` machinery (#4141) This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app".
26,337
0
20
22
6
118,633
6
streamlit
5
lib/tests/streamlit/scriptrunner/script_runner_test.py
Python
2
{ "docstring": "Asserts that the scriptrunner's ForwardMsgQueue contains text deltas\n with the given contents.\n\n Parameters\n ----------\n scriptrunner : TestScriptRunner\n text_deltas : List[str]\n\n ", "language": "en", "n_whitespaces": 62, "n_words": 20, "vocab_size": 18 }
https://github.com/streamlit/streamlit.git
6
typename
def typename(typ, short=False) -> str: if not isinstance(typ, type): return typename(type(typ)) try: if not typ.__module__ or typ.__module__ == "builtins": return typ.__name__ else: if short: module, *_ = typ.__module__.split(".") else: module = typ.__module__ return module + "." + typ.__name__ except AttributeError: return str(typ)
261bf174931580230717abca93fe172e166cc1e8
16
utils.py
150
Add mild typing to common utils functions (#8848)
36,605
0
156
88
29
156,222
42
dask
12
dask/utils.py
Python
28
{ "docstring": "\n Return the name of a type\n\n Examples\n --------\n >>> typename(int)\n 'int'\n\n >>> from dask.core import literal\n >>> typename(literal)\n 'dask.core.literal'\n >>> typename(literal, short=True)\n 'dask.literal'\n ", "language": "en", "n_whitespaces": 57, "n_words": 23, "vocab_size": 20 }
https://github.com/dask/dask.git
1
test_batch_mapper_pandas_data_format
def test_batch_mapper_pandas_data_format(ds_with_expected_pandas_numpy_df): ds, expected_df, expected_numpy_df = ds_with_expected_pandas_numpy_df
9c39a28ba2f6221ffd8327fa21cb8294f0390fee
7
test_batch_mapper.py
23
[AIR][Numpy] Add numpy narrow waist to `Preprocessor` and `BatchMapper` (#28418) Co-authored-by: Eric Liang <ekhliang@gmail.com> Co-authored-by: Clark Zinzow <clarkzinzow@gmail.com> Co-authored-by: Amog Kamsetty <amogkamsetty@yahoo.com>
28,613
0
13
145
7
128,150
7
ray
5
python/ray/data/tests/test_batch_mapper.py
Python
20
{ "docstring": "Tests batch mapper functionality for pandas data format.\n\n Note:\n For single column pandas dataframes, we automatically convert it to\n single column tensor with column name as `__value__`.\n ", "language": "en", "n_whitespaces": 47, "n_words": 27, "vocab_size": 23 }
https://github.com/ray-project/ray.git
3
lexer
def lexer(self) -> Optional[Lexer]: if isinstance(self._lexer, Lexer): return self._lexer try: return get_lexer_by_name( self._lexer, stripnl=False, ensurenl=True, tabsize=self.tab_size, ) except ClassNotFound: return None
f3166e673fe8d40277b804d35d77dcdb760fc3b3
11
syntax.py
83
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
3,587
0
153
54
19
20,845
21
pipenv
12
pipenv/patched/notpip/_vendor/rich/syntax.py
Python
16
{ "docstring": "The lexer for this syntax, or None if no lexer was found.\n\n Tries to find the lexer by name if a string was passed to the constructor.\n ", "language": "en", "n_whitespaces": 41, "n_words": 27, "vocab_size": 21 }
https://github.com/pypa/pipenv.git
2
get_placement_group_id
def get_placement_group_id(self) -> Optional[str]: pg_id = self.worker.placement_group_id return pg_id.hex() if not pg_id.is_nil() else None
90cea203befa8f2e86e9c1c18bb3972296358e7b
9
runtime_context.py
55
Ray 2.0 API deprecation (#26116) Ray 2.0 API deprecation for: ray.remote(): placement_group ray.remote(): placement_group_bundle_index ray.remote(): placement_group_capture_child_tasks ray.get_dashboard_url() ray.get_resource_ids() ray.disconnect() ray.connect() ray.util.ActorGroup ray.util.ActorPool Add get_xx_id() to return hex (rather than object), and then deprecate the xx_id() (which returns Cython object): the xx here can be node, task etc. ray start: --plasma-store-socket-name ray start: --raylet-socket-name
27,934
0
35
33
14
125,637
14
ray
9
python/ray/runtime_context.py
Python
8
{ "docstring": "Get the current Placement group ID of this worker.\n\n Returns:\n The current placement group id in hex format of this worker.\n ", "language": "en", "n_whitespaces": 46, "n_words": 21, "vocab_size": 16 }
https://github.com/ray-project/ray.git
1
test_array_vs_scalar_is_equal
def test_array_vs_scalar_is_equal(self): a = np.array([1., 1., 1.]) b = 1. self._test_equal(a, b)
cafec60a5e28af98fb8798049edd7942720d2d74
9
test_utils.py
50
ENH: Add strict parameter to assert_array_equal. (#21595) Fixes #9542 Co-authored-by: Bas van Beek <43369155+BvB93@users.noreply.github.com>
38,756
0
40
35
11
160,837
12
numpy
7
numpy/testing/tests/test_utils.py
Python
4
{ "docstring": "Test comparing an array with a scalar when all values are equal.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
https://github.com/numpy/numpy.git
5
generate_tex_file
def generate_tex_file(expression, environment=None, tex_template=None): if tex_template is None: tex_template = config["tex_template"] if environment is not None: output = tex_template.get_texcode_for_expression_in_env(expression, environment) else: output = tex_template.get_texcode_for_expression(expression) tex_dir = config.get_dir("tex_dir") if not os.path.exists(tex_dir): os.makedirs(tex_dir) result = os.path.join(tex_dir, tex_hash(output)) + ".tex" if not os.path.exists(result): logger.info(f"Writing {expression} to %(path)s", {"path": f"{result}"}) with open(result, "w", encoding="utf-8") as outfile: outfile.write(output) return result
5b11a0e48b5564cdf02c11dd177f7f5c9f0b9f7a
14
tex_file_writing.py
245
Improved Error in :mod:`.utils.tex_file_writing` (#2574) * Better Error and insight * Do not use keywords as identifiers * add_tests * Nasty comma * Windows does its own thing * Use os.path.join for windows * Do not log path * Include Insights * Full stop. Co-authored-by: Darylgolden <darylgolden@gmail.com> * Full stop to test data. Co-authored-by: Darylgolden <darylgolden@gmail.com>
46,189
0
135
140
41
189,736
55
manim
23
manim/utils/tex_file_writing.py
Python
16
{ "docstring": "Takes a tex expression (and an optional tex environment),\n and returns a fully formed tex file ready for compilation.\n\n Parameters\n ----------\n expression : :class:`str`\n String containing the TeX expression to be rendered, e.g. ``\\\\sqrt{2}`` or ``foo``\n environment : Optional[:class:`str`], optional\n The string containing the environment in which the expression should be typeset, e.g. ``align*``\n tex_template : Optional[:class:`~.TexTemplate`], optional\n Template class used to typesetting. If not set, use default template set via `config[\"tex_template\"]`\n\n Returns\n -------\n :class:`str`\n Path to generated TeX file\n ", "language": "en", "n_whitespaces": 138, "n_words": 80, "vocab_size": 59 }
https://github.com/ManimCommunity/manim.git
5
has_leading_dir
def has_leading_dir(paths): # type: (Iterable[str]) -> bool common_prefix = None for path in paths: prefix, rest = split_leading_dir(path) if not prefix: return False elif common_prefix is None: common_prefix = prefix elif prefix != common_prefix: return False return True
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
11
unpacking.py
76
upd; format
12,506
0
114
45
29
61,320
38
transferlearning
7
.venv/lib/python3.8/site-packages/pip/_internal/utils/unpacking.py
Python
11
{ "docstring": "Returns true if all the paths have the same leading path name\n (i.e., everything is in one subdirectory in an archive)", "language": "en", "n_whitespaces": 23, "n_words": 21, "vocab_size": 19 }
https://github.com/jindongwang/transferlearning.git
4
test_stream_with_splitting_properties_with_new_record
def test_stream_with_splitting_properties_with_new_record(self, requests_mock, common_params, api, fake_properties_list): parsed_properties = list(split_properties(fake_properties_list)) self.set_mock_properties(requests_mock, "/properties/v2/deal/properties", fake_properties_list) test_stream = Deals(**common_params) deal_stage_history_response = { "deals": [ { "portalId": 123, "dealId": 111, "isDeleted": False, "associations": None, "properties": { "dealstage": { "value": "appointmentscheduled", "timestamp": 1610533842221, "source": "API", "sourceId": None, "updatedByUserId": None, "versions": [ { "name": "dealstage", "value": "appointmentscheduled", "timestamp": 1610533842221, "source": "API", "sourceVid": [], "requestId": "19f07c43-b187-4ab6-9fab-4a0f261f0a8c", } ], } }, "stateChanges": [], }, { "portalId": 123, "dealId": 112, "isDeleted": False, "associations": None, "properties": { "dealstage": { "value": "appointmentscheduled", "timestamp": 1610533911154, "source": "API", "sourceId": None, "updatedByUserId": None, "versions": [ { "name": "dealstage", "value": "appointmentscheduled", "timestamp": 1610533911154, "source": "API", "sourceVid": [], "requestId": "41a1eeff-569b-4193-ba80-238d3bd13f56", } ], } }, "stateChanges": [], }, ] } requests_mock.register_uri( "GET", test_stream._stage_history.path(), [ { "json": deal_stage_history_response, "status_code": 200, } ], ) ids_list = ["6043593519", "1092593519", "1092593518", "1092593517", "1092593516"] for property_slice in parsed_properties: record_responses = [ { "json": { "results": [ {**self.BASE_OBJECT_BODY, **{"id": id, "properties": {p: "fake_data" for p in property_slice}}} for id in ids_list ], "paging": {}, }, "status_code": 200, } ] requests_mock.register_uri("GET", f"{test_stream.url}?properties={','.join(property_slice)}", record_responses) ids_list.append("1092593513") stream_records = list(test_stream.read_records(sync_mode=SyncMode.incremental)) assert len(stream_records) == 6 @pytest.fixture(name="configured_catalog")
2282a4ae0221b1fb88e16eca8bc14a166998d2d2
@pytest.fixture(name="configured_catalog")
21
test_source.py
673
🎉 Source Hubspot: Migrate to CDK (#10177) * migrate SourceHubspot to cdk * refactor discover method * change method name * deleted Client class * remove comment * added get_updated_state * fix setting initial state * fix stream_state dict key * fix cursor_field * change check test case status * refactor streams method * remove comment * remove TODOs * remove comments * fix get_updated_state * refactor chunk_read * override _read_incremental * fix unit tests * remove comments * fix test_check_connection_backoff_on_server_error * fix test_check_connection_backoff_on_server_error 2 * fix test_check_connection_backoff_on_limit_reached * fix unit tests * clear comments * override read method on Source * added comments to overriding methods * some improvements * reafactor overridden _read_incremental * format code * refactor discovery * remove discover * format code 2 * added return types * refactor template stream classes * remove comments * remove _name field * rename api.py to streams.py * move to HttpStream * refactor FormSubmissions * refactor Campaings * refactor ContactsListMemberships * CRMSearchStream refactor * CRMSearchStream refactor 2 * CRMObjectStream refactor * DealStageHistoryStream refactor * Deals refactor * Engagements refactor * path method refactor * refactor authentication * fix check_connection * fix call parse_response * fix Engagements stream * fix CRMSearchStream * fix CRMObjectIncremental stream * override _read_incremental * remove commented codes * format code * update cdk version * fix cursor field * fix unit tests * removed client * clear comments * clear comments 2 * clear comments 3 * clear comments 4 * override backoff_time * remove comment * format code * backoff_time modified * refactor backoff_time * format code * added return typing * format code * removed cursor_paths * bump version * updated spec and def yaml Co-authored-by: auganbay <auganenu@gmail.com>
582
1
1,988
357
92
3,876
177
airbyte
34
airbyte-integrations/connectors/source-hubspot/unit_tests/test_source.py
Python
88
{ "docstring": "\n Check working stream `workflows` with large list of properties using new functionality with splitting properties\n ", "language": "en", "n_whitespaces": 30, "n_words": 15, "vocab_size": 13 }
https://github.com/airbytehq/airbyte.git
1
test_cartesian_mix_types
def test_cartesian_mix_types(arrays, output_dtype): output = cartesian(arrays) assert output.dtype == output_dtype
86080bbd5fe9513cd42cf34148ea5907a1a9fc6c
8
test_extmath.py
34
ENH cartesian accepts mixed dtypes arrays (#25067) Co-authored-by: Christian Lorentzen <lorentzen.ch@gmail.com>
76,936
0
19
20
10
261,691
10
scikit-learn
6
sklearn/utils/tests/test_extmath.py
Python
3
{ "docstring": "Check that the cartesian product works with mixed types.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/scikit-learn/scikit-learn.git
1
get_optimizer
def get_optimizer(self) -> List: optimizer1 = get_optimizer( self.config.optimizer, self.config.optimizer_params, self.config.lr_gen, self.model_g ) optimizer2 = get_optimizer( self.config.optimizer, self.config.optimizer_params, self.config.lr_disc, self.model_d ) return [optimizer2, optimizer1]
a0a9279e4b8c306875b6437f853bdcc31ee5f1cf
10
gan.py
98
Fix GAN optimizer order commit 212d330929c22d0cd970be2023770dc1e39449ab Author: Edresson Casanova <edresson1@gmail.com> Date: Fri Apr 29 16:29:44 2022 -0300 Fix unit test commit 44456b0483bf42b1337a8e408ac17af38b26b1fa Author: Edresson Casanova <edresson1@gmail.com> Date: Fri Apr 29 07:28:39 2022 -0300 Fix style commit d545beadb932758eb7d1c632778fe317d467a6a4 Author: Edresson Casanova <edresson1@gmail.com> Date: Thu Apr 28 17:08:04 2022 -0300 Change order of HIFI-GAN optimizers to be equal than the original repository commit 657c5442e5339581e5c09168f5212112a342d97a Author: Edresson Casanova <edresson1@gmail.com> Date: Thu Apr 28 15:40:16 2022 -0300 Remove audio padding before mel spec extraction commit 76b274e6901495ffe62ec745fd8ca9fd010f4857 Merge: 379ccd7b 6233f4fc Author: Edresson Casanova <edresson1@gmail.com> Date: Wed Apr 27 07:28:48 2022 -0300 Merge pull request #1541 from coqui-ai/comp_emb_fix Bug fix in compute embedding without eval partition commit 379ccd7ba6b7e7b550e7d6acf55760c6d0623ba8 Author: WeberJulian <julian.weber@hotmail.fr> Date: Wed Apr 27 10:42:26 2022 +0200 returns y_mask in VITS inference (#1540) * returns y_mask * make style
77,223
0
87
66
18
262,448
23
TTS
12
TTS/vocoder/models/gan.py
Python
15
{ "docstring": "Initiate and return the GAN optimizers based on the config parameters.\n\n It returnes 2 optimizers in a list. First one is for the generator and the second one is for the discriminator.\n\n Returns:\n List: optimizers.\n ", "language": "en", "n_whitespaces": 67, "n_words": 35, "vocab_size": 26 }
https://github.com/coqui-ai/TTS.git
3
_update_slice
def _update_slice(operand, update, start_indices, update_dims): operand_shape = operand.shape operand = lax.pad(operand, jnp.array(0, operand.dtype), [(0, d, 0) for d in update.shape]) start_indices = tuple(jnp.int32(i) for i in start_indices) t = lax.dynamic_slice(operand, start_indices, update.shape) t = _mask(update, update_dims, t) operand = lax.dynamic_update_slice(operand, t, start_indices) return lax.slice(operand, [0] * operand.ndim, operand_shape)
b64e36b60fca9661ca2c8ae51a56fae07bf5efe6
11
eigh.py
173
Make QDWH-eig implementation jit-table. Move QDWH-eig from jax._src.scipy.eigh to jax._src.lax.eigh, in preparation for using it to back `lax.eigh` in a future change. PiperOrigin-RevId: 449362382
26,903
0
94
120
37
120,628
48
jax
22
jax/_src/lax/eigh.py
Python
10
{ "docstring": "\n Similar to lax.dynamic_update_slice, but handles padded updates where padding\n values should not overwrite existing values in the array.\n\n Args:\n operand: the array to update\n update: the padded array to write\n start_indices: the offset at which to write `update`.\n update_dims: the true dimensions of the padded update `update`. Only values\n inside the rectangle given by `update_dims` will be overwritten.", "language": "en", "n_whitespaces": 68, "n_words": 58, "vocab_size": 41 }
https://github.com/google/jax.git
4
async_step_link
async def async_step_link(self, user_input=None): errors = {} if user_input is not None: # Do not authenticate if the host is already configured self._async_abort_entries_match({CONF_HOST: self._host}) try: info = await authenticate( self.hass, self._host, self._port, self._servers ) except InvalidAuth: errors["base"] = "invalid_auth" except Exception: # pylint: disable=broad-except _LOGGER.exception("Unexpected exception") errors["base"] = "unknown" else: return self.async_create_entry(title=DEFAULT_NAME, data=info) return self.async_show_form(step_id="link", errors=errors)
23264c8fd4a3f8bcff5961ed11cab6388d3c67a4
14
config_flow.py
182
Improve roon integraton (#66000) * Update to new library, revise discovery to work with new library, specify port to work with new library. * Move user gui to fallback. * Revise tests. * Handle old config. * Improve debugging, refresh faster on load. * Remove duplicate. * Bump library version. * Fix docstring per review. * Review suggestion Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Review suggestion Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Add check for duplicate host. * Add error message to strings. * Tidy. * Review changes. * Remove default. Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
95,433
0
260
107
46
296,453
56
core
22
homeassistant/components/roon/config_flow.py
Python
16
{ "docstring": "Handle linking and authenticting with the roon server.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/home-assistant/core.git
2
_test_migrate_stream_data
def _test_migrate_stream_data(self): self.apply_migration() instances = self.model.objects.all().annotate( raw_content=Cast(F("content"), JSONField()) ) for instance in instances: prev_content = self.original_raw_data[instance.id] self.assertBlocksRenamed( old_content=prev_content, new_content=instance.raw_content ) # TODO test multiple operations applied in one migration
ad65741b94f36fbe793cf15f0ab002482070cdb6
14
test_migrations.py
113
Add tests for streamfield migration helpers Currently failing due to wagtail-factories being broken on Wagtail 4.1: https://github.com/wagtail/wagtail-factories/issues/65
17,011
0
126
68
26
80,125
29
wagtail
19
wagtail/tests/streamfield_migrations/test_migrations.py
Python
10
{ "docstring": "Test whether the stream data of the model instances have been updated properly\n\n Apply the migration and then query the raw data of the updated instances. Compare with\n original raw data and check whether all relevant `char1` blocks have been renamed and\n whether ids and other block types are intact.\n ", "language": "en", "n_whitespaces": 78, "n_words": 50, "vocab_size": 34 }
https://github.com/wagtail/wagtail.git
2
synchronized_output_end_sequence
def synchronized_output_end_sequence(self) -> str: if self.synchronised_output: return TERMINAL_MODES_ANSI_SEQUENCES[Mode.SynchronizedOutput]["end_sync"] return ""
7f27e70440c177b2a047b7f74a78ed5cd5b4b596
10
_terminal_features.py
45
[terminal buffering] Address PR feedback
44,257
0
42
25
9
183,574
10
textual
7
src/textual/_terminal_features.py
Python
13
{ "docstring": "\n Returns the ANSI sequence that we should send to the terminal to tell it that\n it should stop buffering the content we're about to send.\n If the terminal doesn't seem to support synchronised updates the string will be empty.\n\n Returns:\n str: the \"synchronised output stop\" ANSI sequence. It will be ab empty string\n if the terminal emulator doesn't seem to support the \"synchronised updates\" mode.\n ", "language": "en", "n_whitespaces": 127, "n_words": 65, "vocab_size": 41 }
https://github.com/Textualize/textual.git
2
num_rows
def num_rows(self) -> int: if self._indices is not None: return self._indices.num_rows return self._data.num_rows
445107bae3fcd6ac9eeae503232960fa4ba8ccfd
9
arrow_dataset.py
46
Add code examples to API docs (#4168) * add code examples for functions related to the base dataset class * ✨ make style * 🖍 make each code example fully reproducible where applicable * 🖍 show parameter usage for some functions * 🖍 add examples for DatasetInfo functions
21,960
0
45
28
12
104,765
13
datasets
5
src/datasets/arrow_dataset.py
Python
15
{ "docstring": "Number of rows in the dataset (same as :meth:`Dataset.__len__`).\n\n Example:\n\n ```py\n >>> from datasets import load_dataset\n >>> ds = load_dataset(\"rotten_tomatoes\", split=\"validation\")\n >>> ds.num_rows\n 1066\n ```\n ", "language": "en", "n_whitespaces": 81, "n_words": 25, "vocab_size": 23 }
https://github.com/huggingface/datasets.git
2
current_cover_position
def current_cover_position(self) -> int | None: position = None if self.roller.type != 7: position = 100 - self.roller.closed_percent return position
10dc38e0ec27f7bef990ee431459342f9c3c52b4
11
cover.py
55
Adjust CoverEntity property type hints in components (#73943) * Adjust CoverEntity property type hints in components * Revert changes to rflink * Revert changes to wilight
113,004
0
59
33
17
314,397
20
core
7
homeassistant/components/acmeda/cover.py
Python
9
{ "docstring": "Return the current position of the roller blind.\n\n None is unknown, 0 is closed, 100 is fully open.\n ", "language": "en", "n_whitespaces": 32, "n_words": 18, "vocab_size": 15 }
https://github.com/home-assistant/core.git
1
TextParser
def TextParser(*args, **kwds) -> TextFileReader: kwds["engine"] = "python" return TextFileReader(*args, **kwds)
e48c9c3973286e257f6da1966c91806d86b917e0
8
readers.py
49
TYP: more return annotations for io/* (#47524) * TYP: more return annotations for io/* * import future
40,021
0
20
27
10
167,449
11
pandas
4
pandas/io/parsers/readers.py
Python
58
{ "docstring": "\n Converts lists of lists/tuples into DataFrames with proper type inference\n and optional (e.g. string to datetime) conversion. Also enables iterating\n lazily over chunks of large files\n\n Parameters\n ----------\n data : file-like object or list\n delimiter : separator character to use\n dialect : str or csv.Dialect instance, optional\n Ignored if delimiter is longer than 1 character\n names : sequence, default\n header : int, default 0\n Row to use to parse column labels. Defaults to the first row. Prior\n rows will be discarded\n index_col : int or list, optional\n Column or columns to use as the (possibly hierarchical) index\n has_index_names: bool, default False\n True if the cols defined in index_col have an index name and are\n not in the header.\n na_values : scalar, str, list-like, or dict, optional\n Additional strings to recognize as NA/NaN.\n keep_default_na : bool, default True\n thousands : str, optional\n Thousands separator\n comment : str, optional\n Comment out remainder of line\n parse_dates : bool, default False\n keep_date_col : bool, default False\n date_parser : function, optional\n skiprows : list of integers\n Row numbers to skip\n skipfooter : int\n Number of line at bottom of file to skip\n converters : dict, optional\n Dict of functions for converting values in certain columns. Keys can\n either be integers or column labels, values are functions that take one\n input argument, the cell (not column) content, and return the\n transformed content.\n encoding : str, optional\n Encoding to use for UTF when reading/writing (ex. 'utf-8')\n squeeze : bool, default False\n returns Series if only one column.\n infer_datetime_format: bool, default False\n If True and `parse_dates` is True for a column, try to infer the\n datetime format based on the first datetime string. If the format\n can be inferred, there often will be a large parsing speed-up.\n float_precision : str, optional\n Specifies which converter the C engine should use for floating-point\n values. The options are `None` or `high` for the ordinary converter,\n `legacy` for the original lower precision pandas converter, and\n `round_trip` for the round-trip converter.\n\n .. versionchanged:: 1.2\n ", "language": "en", "n_whitespaces": 588, "n_words": 331, "vocab_size": 197 }
https://github.com/pandas-dev/pandas.git
2
_nodb_cursor
def _nodb_cursor(self): conn = self.__class__({**self.settings_dict, "NAME": None}, alias=NO_DB_ALIAS) try: with conn.cursor() as cursor: yield cursor finally: conn.close()
9c19aff7c7561e3a82978a272ecdaad40dda5c00
12
base.py
85
Refs #33476 -- Reformatted code with Black.
50,892
0
82
47
17
204,805
17
django
9
django/db/backends/base/base.py
Python
7
{ "docstring": "\n Return a cursor from an alternative connection to be used when there is\n no need to access the main database, specifically for test db\n creation/deletion. This also prevents the production database from\n being exposed to potential child threads while (or after) the test\n database is destroyed. Refs #10868, #17786, #16969.\n ", "language": "en", "n_whitespaces": 93, "n_words": 50, "vocab_size": 42 }
https://github.com/django/django.git
6
test_command_runner_interface_abstraction_violation
def test_command_runner_interface_abstraction_violation(): cmd_runner_interface_public_functions = dir(CommandRunnerInterface) allowed_public_interface_functions = { func for func in cmd_runner_interface_public_functions if not func.startswith("_") } for subcls in [SSHCommandRunner, DockerCommandRunner, KubernetesCommandRunner]: subclass_available_functions = dir(subcls) subclass_public_functions = { func for func in subclass_available_functions if not func.startswith("_") } assert allowed_public_interface_functions == subclass_public_functions
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
14
test_command_runner.py
108
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,533
0
117
66
23
131,451
42
ray
13
python/ray/tests/test_command_runner.py
Python
13
{ "docstring": "Enforces the CommandRunnerInterface functions on the subclasses.\n\n This is important to make sure the subclasses do not violate the\n function abstractions. If you need to add a new function to one of\n the CommandRunnerInterface subclasses, you have to add it to\n CommandRunnerInterface and all of its subclasses.\n ", "language": "en", "n_whitespaces": 62, "n_words": 47, "vocab_size": 32 }
https://github.com/ray-project/ray.git
5
resolve_granularity
def resolve_granularity(self) -> Granularity: start = cast(datetime, self.params["start"]) end = cast(datetime, self.params["end"]) duration = (end - start).seconds # TODO: could probably allow some leeway on the start & end (a few minutes) and use a bigger granularity # eg. yesterday at 11:59pm to tomorrow at 12:01am could still use the day bucket # Query is at least an hour if start.minute == end.minute == 0 and duration % 3600 == 0: # we're going from midnight -> midnight which aligns with our daily buckets if start.hour == end.hour == 0 and duration % 86400 == 0: granularity = 86400 # we're roughly going from start of hour -> next which aligns with our hourly buckets else: granularity = 3600 # We're going from one random minute to another, we could use the 10s bucket, but no reason for that precision # here else: granularity = 60 return Granularity(granularity)
cf30c11a194aa5e61d8d7c7fc506764f846fcf82
11
builder.py
160
feat(MEP): Add initial framework for metric queries (#31649) - This adds a MetricsQueryBuilder, which works very similarily to our QueryBuilder, but with specific handlers for how metrics construct queries - This MetricsQueryBuilder does not yet construct snql queries, and will not because table queries will require multiple queries to construct similar table data - that is, if we want [transaction, p95, count_unique(user)], we need a query against distributions with [transaction, p95] followed by a second query for [transaction, count_unique(user)] against the sets table - This is so we can maintain a sortby
19,289
0
317
95
91
96,190
148
sentry
13
src/sentry/search/events/builder.py
Python
20
{ "docstring": "Granularity impacts metric queries even when they aren't timeseries because the data needs to be\n pre-aggregated\n\n Granularity is determined by checking the alignment of our start & end timestamps with the timestamps in\n snuba. eg. we can only use the daily granularity if the query starts and ends at midnight\n Seconds are ignored under the assumption that there currently isn't a valid use case to have\n to-the-second accurate information\n ", "language": "en", "n_whitespaces": 111, "n_words": 69, "vocab_size": 60 }
https://github.com/getsentry/sentry.git
2
test_vr_connector_respects_training_or_inference_vr_flags
def test_vr_connector_respects_training_or_inference_vr_flags(self): view_rq_dict = { "both": ViewRequirement( "obs", used_for_training=True, used_for_compute_actions=True ), "only_inference": ViewRequirement( "obs", used_for_training=False, used_for_compute_actions=True ), "none": ViewRequirement( "obs", used_for_training=False, used_for_compute_actions=False ), "only_training": ViewRequirement( "obs", used_for_training=True, used_for_compute_actions=False ), } obs_arr = np.array([0, 1, 2, 3]) agent_data = dict(obs=obs_arr) data = AgentConnectorDataType(0, 1, agent_data) ctx = ConnectorContext(view_requirements=view_rq_dict) # TODO @jun What is the expected behavior of this test? for_action_expected_list = [ # is_training = False SampleBatch({"both": obs_arr, "only_inference": obs_arr}), # is_training = True SampleBatch({"both": obs_arr, "only_inference": obs_arr}), ] for_training_expected_list = [ # is_training = False None, # is_training = True agent_data, ] for is_training in [True, False]: c = ViewRequirementAgentConnector(ctx) c.is_training(is_training) processed = c([data]) for_training = processed[0].data.for_training for_training_expected = for_training_expected_list[is_training] for_action = processed[0].data.for_action for_action_expected = for_action_expected_list[is_training] print("-" * 30) print(f"is_training = {is_training}") print("for action:") print(for_action) print("for training:") print(for_training) # TODO @jun is for_training expected to always be equal to data? check(for_training, for_training_expected) check(for_action, for_action_expected)
5030a4c1d384e4bb1a25169384d7465e718e99a5
12
test_agent.py
414
[RLlib] Simplify agent collector (#26803)
27,968
0
647
250
88
125,751
144
ray
30
rllib/connectors/tests/test_agent.py
Python
43
{ "docstring": "Tests that the connector respects the flags within view_requirements (i.e.\n used_for_training, used_for_compute_actions) under different is_training modes.\n For inference,\n the returned data should be state -> obs\n For training,\n the returned data should be the data itself. The higher level policy\n collector in env_runner will construct the proper data structure.\n ", "language": "en", "n_whitespaces": 110, "n_words": 49, "vocab_size": 37 }
https://github.com/ray-project/ray.git
8
fixture_dirs
def fixture_dirs(self): dirs = [] fixture_dirs = settings.FIXTURE_DIRS if len(fixture_dirs) != len(set(fixture_dirs)): raise ImproperlyConfigured("settings.FIXTURE_DIRS contains duplicates.") for app_config in apps.get_app_configs(): app_label = app_config.label app_dir = os.path.join(app_config.path, "fixtures") if app_dir in fixture_dirs: raise ImproperlyConfigured( "'%s' is a default fixture directory for the '%s' app " "and cannot be listed in settings.FIXTURE_DIRS." % (app_dir, app_label) ) if self.app_label and app_label != self.app_label: continue if os.path.isdir(app_dir): dirs.append(app_dir) dirs.extend(fixture_dirs) dirs.append("") return [os.path.realpath(d) for d in dirs]
9c19aff7c7561e3a82978a272ecdaad40dda5c00
14
loaddata.py
225
Refs #33476 -- Reformatted code with Black.
50,825
0
311
134
57
204,647
72
django
22
django/core/management/commands/loaddata.py
Python
21
{ "docstring": "\n Return a list of fixture directories.\n\n The list contains the 'fixtures' subdirectory of each installed\n application, if it exists, the directories in FIXTURE_DIRS, and the\n current directory.\n ", "language": "en", "n_whitespaces": 63, "n_words": 27, "vocab_size": 23 }
https://github.com/django/django.git
1
get_config_file_for_auto_config
def get_config_file_for_auto_config(self) -> Optional[Text]: return self.config_file
6339856514897056716bb531acb8489c9cf05d26
6
importer.py
26
Add support for different recipes (#10641) * Add support for different recipes Fixes https://github.com/RasaHQ/rasa/issues/10473 * Update docs/docs/graph-recipe.mdx Co-authored-by: Joe Juzl <joejuzl@gmail.com>
38,195
0
20
15
6
159,323
6
rasa
5
rasa/shared/importers/importer.py
Python
3
{ "docstring": "Returns config file path for auto-config only if there is a single one.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
https://github.com/RasaHQ/rasa.git
1
receive_file
def receive_file(filename="example.txt"): with open(filename, "wb") as out_file: ftp.retrbinary("RETR " + filename, out_file.write, 1024) ftp.quit()
f0af0c43340763724f139fa68aa1e5a9ffe458b4
11
ftp_send_receive.py
69
refactor: clean code Signed-off-by: slowy07 <slowy.arfy@gmail.com>
4,380
0
30
36
14
22,626
14
Python
8
ftp_send_receive.py
Python
4
{ "docstring": "\n\tThe file which will be sent via the FTP server\n\tThe file send will be send to the current working directory\n", "language": "en", "n_whitespaces": 19, "n_words": 21, "vocab_size": 15 }
https://github.com/geekcomputers/Python.git
2
get_queryset
def get_queryset(self): qs = self.model_admin.get_queryset(self.request) qs = qs.complex_filter(self.source_field.get_limit_choices_to()) qs, search_use_distinct = self.model_admin.get_search_results( self.request, qs, self.term ) if search_use_distinct: qs = qs.distinct() return qs
9c19aff7c7561e3a82978a272ecdaad40dda5c00
11
autocomplete.py
104
Refs #33476 -- Reformatted code with Black.
50,441
0
94
64
16
203,545
23
django
12
django/contrib/admin/views/autocomplete.py
Python
9
{ "docstring": "Return queryset based on ModelAdmin.get_search_results().", "language": "en", "n_whitespaces": 4, "n_words": 5, "vocab_size": 5 }
https://github.com/django/django.git
1
test_hub_not_support_wireless
async def test_hub_not_support_wireless(hass, mock_device_registry_devices): await setup_mikrotik_entry(hass, support_wireless=False) device_1 = hass.states.get("device_tracker.device_1") assert device_1 assert device_1.state == "home" # device_2 is added from DHCP device_2 = hass.states.get("device_tracker.device_2") assert device_2 assert device_2.state == "home"
b09aaba421d6d6178d582bef9ea363017e55639d
9
test_device_tracker.py
95
Add type hints and code cleanup for mikrotik (#74296) * Add type hints and code cleanup for mikrotik * update test and increase coverage * move setup_mikrotik_entry to __init__.py
114,071
0
58
53
22
315,483
31
core
10
tests/components/mikrotik/test_device_tracker.py
Python
8
{ "docstring": "Test device_trackers created when hub doesn't support wireless.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/home-assistant/core.git
1
__dask_postcompute__
def __dask_postcompute__(self) -> tuple[PostComputeCallable, tuple]: raise NotImplementedError("Inheriting class must implement this method.")
1e783d9a714160e968936cb22d54d085959ab09e
8
typing.py
32
Collection Protocol (#8674) [PEP 544](https://www.python.org/dev/peps/pep-0544/) introduces the `Protocol` class to the `typing` module in Python 3.8 (the soon be the minimum supported version, https://github.com/dask/community/issues/213). Writing new Dask collections for [dask-awkward](https://github.com/ContinuumIO/dask-awkward/) has had me thinking about working on a `DaskCollection` protocol. I imagine the benefits to be: - usage with static type checkers - other activity in this area at - #8295 - #8706 - #8854 - Python supporting IDEs take advantage of typing - self-documenting; some improvements to [the custom collections page](https://docs.dask.org/en/latest/custom-collections.html) of the docs. The protocol docs can be autogenerated and added to that page. - purely opt-in feature The `typing.runtime_checkable` decorator allows use of `isinstance(x, DaskCollection)` in any code base that uses Dask collections; for example: ```python >>> from dask.typing import DaskCollection >>> import dask.array as da >>> x = da.zeros((10, 3)) >>> isinstance(x, DaskCollection) True ``` (though this is an order of magnitude slower than `dask.base.is_dask_collection` which only checks for `x.__dask_graph__() is not None`; static typing checking & built-in interface documentation are the core benefits IMO) Something else that came up in the brief discussion on a call last week was having `{Scheduler,Worker,Nanny}Plugin` protocols in `distributed`; and perhaps those are better places to start introducing protocols to Dask since on the user side typically more folks would write plugins than new collections.
36,663
0
26
18
12
156,510
12
dask
5
dask/typing.py
Python
23
{ "docstring": "Finalizer function and optional arguments to construct final result.\n\n Upon computation each key in the collection will have an in\n memory result, the postcompute function combines each key's\n result into a final in memory representation. For example,\n dask.array.Array concatenates the arrays at each chunk into a\n final in-memory array.\n\n Returns\n -------\n PostComputeCallable\n Callable that recieves the sequence of the results of each\n final key along with optional arguments. An example signature\n would be ``finalize(results: Sequence[Any], *args)``.\n tuple[Any, ...]\n Optional arguments passed to the function following the\n key results (the `*args` part of the\n ``PostComputeCallable``. If no additional arguments are to\n be passed then this must be an empty tuple.\n\n ", "language": "en", "n_whitespaces": 256, "n_words": 109, "vocab_size": 75 }
https://github.com/dask/dask.git
1
require_intel_extension_for_pytorch
def require_intel_extension_for_pytorch(test_case): return unittest.skipUnless(is_ipex_available(), "test requires Intel Extension for PyTorch")(test_case)
34097b3304d79ace845316d4929220623279c8bc
10
testing_utils.py
37
Extend Transformers Trainer Class to Enable CPU AMP and Integrate Intel Extension for PyTorch (#17138) * init PR * fix import ipex * minor fix on bf16 * refine optimizer * refine args notes * refine code * refine ipex optimize args * refine half_precision_backend * black format * isort format * isort format files * flake8 format * doc builder format * refine codes * remove jit and optim bits * black preview format * Update src/transformers/trainer.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * refine code * refine notes * Update src/transformers/trainer.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/trainer.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * code refine * add ipex ut * add performance cpu doc * link to the cpu doc from main perf doc * install ipex into CI's docker * Update perf_train_cpu.mdx * Update docs/source/en/perf_train_cpu.mdx Co-authored-by: Stas Bekman <stas00@users.noreply.github.com> * Update perf_train_cpu.mdx * Update perf_train_cpu.mdx Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Stas Bekman <stas@stason.org> Co-authored-by: Stas Bekman <stas00@users.noreply.github.com>
5,681
0
16
20
10
31,114
10
transformers
5
src/transformers/testing_utils.py
Python
2
{ "docstring": "\n Decorator marking a test that requires Intel Extension for PyTorch.\n\n These tests are skipped when Intel Extension for PyTorch isn't installed.\n\n ", "language": "en", "n_whitespaces": 31, "n_words": 21, "vocab_size": 18 }
https://github.com/huggingface/transformers.git
2
options_spec
def options_spec() -> str: if not hasattr(options_spec, 'ans'): OPTIONS = setattr(options_spec, 'ans', OPTIONS.format( appname=appname, conf_name=appname, config_help=CONFIG_HELP.format(appname=appname, conf_name=appname), )) ans: str = getattr(options_spec, 'ans') return ans
45bbe17559a541289699643ef0b541a2138a09d6
15
cli.py
105
Docs: Minor improvements to the kitty cli help documentation Add some text roles. Use `kitty --hold`. Use `appname` and `conf_name`. `appname` is also applied to the system-wide configuration path.
21,638
0
77
65
24
103,383
25
kitty
12
kitty/cli.py
Python
166
{ "docstring": "\n--class\ndest=cls\ndefault={appname}\ncondition=not is_macos\nSet the class part of the :italic:`WM_CLASS` window property. On Wayland, it\nsets the app id.\n\n\n--name\ncondition=not is_macos\nSet the name part of the :italic:`WM_CLASS` property. Defaults to using the\nvalue from :option:`{appname} --class`.\n\n\n--title -T\nSet the OS window title. This will override any title set by the program running\ninside kitty, permanently fixing the OS window's title. So only use this if you\nare running a program that does not set titles.\n\n\n--config -c\ntype=list\n{config_help}\n\n\n--override -o\ntype=list\nOverride individual configuration options, can be specified multiple times.\nSyntax: :italic:`name=value`. For example: :option:`{appname} -o` font_size=20\n\n\n--directory --working-directory -d\ndefault=.\nChange to the specified directory when launching.\n\n\n--detach\ntype=bool-set\ncondition=not is_macos\nDetach from the controlling terminal, if any.\n\n\n--session\nPath to a file containing the startup :italic:`session` (tabs, windows, layout,\nprograms). Use - to read from STDIN. See the :file:`README` file for details and\nan example.\n\n\n--hold\ntype=bool-set\nRemain open after child process exits. Note that this only affects the first\nwindow. You can quit by either using the close window shortcut or pressing any\nkey.\n\n\n--single-instance -1\ntype=bool-set\nIf specified only a single instance of :italic:`{appname}` will run. New\ninvocations will instead create a new top-level window in the existing\n:italic:`{appname}` instance. This allows :italic:`{appname}` to share a single\nsprite cache on the GPU and also reduces startup time. You can also have\nseparate groups of :italic:`{appname}` instances by using the :option:`{appname}\n--instance-group` option.\n\n\n--instance-group\nUsed in combination with the :option:`{appname} --single-instance` option. All\n:italic:`{appname}` invocations with the same :option:`{appname}\n--instance-group` will result in new windows being created in the first\n:italic:`{appname}` instance within that group.\n\n\n--wait-for-single-instance-window-close\ntype=bool-set\nNormally, when using :option:`{appname} --single-instance`, :italic:`{appname}`\nwill open a new window in an existing instance and quit immediately. With this\noption, it will not quit till the newly opened window is closed. Note that if no\nprevious instance is found, then :italic:`{appname}` will wait anyway,\nregardless of this option.\n\n\n--listen-on\nListen on the specified socket address for control messages. For example,\n:option:`{appname} --listen-on`=unix:/tmp/mykitty or\n:option:`{appname} --listen-on`=tcp:localhost:12345. On Linux systems, you can\nalso use abstract UNIX sockets, not associated with a file, like this:\n:option:`{appname} --listen-on`=unix:@mykitty. Environment variables are\nexpanded and relative paths are resolved with respect to the temporary\ndirectory. To control kitty, you can send commands to it with\n:italic:`{appname} @` using the :option:`{appname} @ --to` option to specify\nthis address. Unless you enabled :opt:`allow_remote_control` in\n:file:`{conf_name}.conf`, this option will be ignored. Note that if you run\n:italic:`{appname} @` within a kitty window, there is\nno need to specify the :option:`{appname} @ --to` option as it will\nautomatically read from the environment. For UNIX sockets, this can also be\nspecified in :file:`{conf_name}.conf`.\n\n\n--start-as\ntype=choices\ndefault=normal\nchoices=normal,fullscreen,maximized,minimized\nControl how the initial kitty window is created.\n\n\n# Debugging options\n\n--version -v\ntype=bool-set\nThe current {appname} version.\n\n\n--dump-commands\ntype=bool-set\nOutput commands received from child process to STDOUT.\n\n\n--replay-commands\nReplay previously dumped commands. Specify the path to a dump file previously\ncreated by :option:`{appname} --dump-commands`. You\ncan open a new kitty window to replay the commands with::\n\n {appname} --hold {appname} --replay-commands /path/to/dump/file\n\n\n--dump-bytes\nPath to file in which to store the raw bytes received from the child process.\n\n\n--debug-rendering --debug-gl\ntype=bool-set\nDebug rendering commands. This will cause all OpenGL calls to check for errors\ninstead of ignoring them. Also prints out miscellaneous debug information.\nUseful when debugging rendering problems.\n\n\n--debug-input --debug-keyboard\ndest=debug_keyboard\ntype=bool-set\nPrint out key and mouse events as they are received.\n\n\n--debug-font-fallback\ntype=bool-set\nPrint out information about the selection of fallback fonts for characters not\npresent in the main font.\n\n\n--watcher\nThis option is deprecated in favor of the :opt:`watcher` option in\n:file:`{conf_name}.conf` and should not be used.\n\n\n--execute -e\ntype=bool-set\n!\n", "language": "en", "n_whitespaces": 511, "n_words": 617, "vocab_size": 336 }
https://github.com/kovidgoyal/kitty.git
16
execute
def execute(): warehouse_perm = frappe.get_all( "User Permission", fields=["count(*) as p_count", "is_default", "user"], filters={"allow": "Warehouse"}, group_by="user", ) if not warehouse_perm: return execute_patch = False for perm_data in warehouse_perm: if perm_data.p_count == 1 or ( perm_data.p_count > 1 and frappe.get_all( "User Permission", filters={"user": perm_data.user, "allow": "warehouse", "is_default": 1}, limit=1, ) ): execute_patch = True break if not execute_patch: return for doctype in ["Sales Invoice", "Delivery Note"]: if not frappe.get_meta(doctype + " Item").get_field("target_warehouse").hidden: continue cond = "" if doctype == "Sales Invoice": cond = " AND parent_doc.update_stock = 1" data = frappe.db.sql( .format( doctype=doctype, cond=cond ), as_dict=1, ) if data: names = [d.child_name for d in data] frappe.db.sql( .format( doctype, ",".join(["%s"] * len(names)) ), tuple(names), ) frappe.db.sql( .format( doctype, ",".join(["%s"] * len(names)) ), tuple(names), ) parent_names = list(set([d.name for d in data])) for d in parent_names: doc = frappe.get_doc(doctype, d) if doc.docstatus != 1: continue doc.docstatus = 2 doc.update_stock_ledger() doc.make_gl_entries_on_cancel(repost_future_gle=False) # update stock & gl entries for submit state of PR doc.docstatus = 1 doc.update_stock_ledger() doc.make_gl_entries() if frappe.get_meta("Sales Order Item").get_field("target_warehouse").hidden: frappe.db.sql( ) frappe.db.sql( )
494bd9ef78313436f0424b918f200dab8fc7c20b
18
repost_stock_ledger_entries_for_target_warehouse.py
599
style: format code with black
14,274
0
105
351
103
66,656
171
erpnext
39
erpnext/patches/v12_0/repost_stock_ledger_entries_for_target_warehouse.py
Python
79
{ "docstring": " SELECT parent_doc.name as name, child_doc.name as child_name\n\t\t\tFROM\n\t\t\t\t`tab{doctype}` parent_doc, `tab{doctype} Item` child_doc\n\t\t\tWHERE\n\t\t\t\tparent_doc.name = child_doc.parent AND parent_doc.docstatus < 2\n\t\t\t\tAND child_doc.target_warehouse is not null AND child_doc.target_warehouse != ''\n\t\t\t\tAND child_doc.creation > '2020-04-16' {cond}\n\t\t UPDATE `tab{0} Item` set target_warehouse = null\n\t\t\t\tWHERE name in ({1}) UPDATE `tabPacked Item` set target_warehouse = null\n\t\t\t\tWHERE parenttype = '{0}' and parent_detail_docname in ({1})\n\t\t\t UPDATE `tabSales Order Item` set target_warehouse = null\n\t\t\tWHERE creation > '2020-04-16' and docstatus < 2 UPDATE `tabPacked Item` set target_warehouse = null\n\t\t\tWHERE creation > '2020-04-16' and docstatus < 2 and parenttype = 'Sales Order' ", "language": "en", "n_whitespaces": 90, "n_words": 97, "vocab_size": 47 }
https://github.com/frappe/erpnext.git
6
build_wheel
def build_wheel(self): # type: () -> S need_delete = False if not self.pyproject.exists(): if not self.build_requires: build_requires = '"setuptools", "wheel"' else: build_requires = ", ".join( ['"{0}"'.format(r) for r in self.build_requires] ) self.pyproject.write_text( str( .format( build_requires, self.build_backend ).strip() ) ) need_delete = True parsed = urlparse(str(self.ireq.link)) subdir = parse_qs(parsed.fragment).get('subdirectory', []) if subdir: directory = f"{self.base_dir}/{subdir[0]}" else: directory = self.base_dir result = build_pep517( directory, self.extra_kwargs["build_dir"], config_settings=self.pep517_config, dist_type="wheel", ) if need_delete: self.pyproject.unlink() return result # noinspection PyPackageRequirements
2669b4ce0696de02610cbea1b7547d53cead85bb
17
setup_info.py
279
patch newly occuring test failure where the base_dir does not contain the subdirectory.
2,994
0
440
156
53
19,478
74
pipenv
30
pipenv/vendor/requirementslib/models/setup_info.py
Python
36
{ "docstring": "\n[build-system]\nrequires = [{0}]\nbuild-backend = \"{1}\"\n ", "language": "en", "n_whitespaces": 20, "n_words": 7, "vocab_size": 6 }
https://github.com/pypa/pipenv.git
1
make_rev_options
def make_rev_options(cls, rev=None, extra_args=None): # type: (Optional[str], Optional[CommandArgs]) -> RevOptions return RevOptions(cls, rev, extra_args=extra_args)
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
8
versioncontrol.py
39
upd; format
12,556
0
35
25
14
61,411
14
transferlearning
5
.venv/lib/python3.8/site-packages/pip/_internal/vcs/versioncontrol.py
Python
2
{ "docstring": "\n Return a RevOptions object.\n\n Args:\n rev: the name of a revision to install.\n extra_args: a list of extra options.\n ", "language": "en", "n_whitespaces": 59, "n_words": 19, "vocab_size": 16 }
https://github.com/jindongwang/transferlearning.git
14
generate_module_groups
def generate_module_groups(self) -> Dict[int, List[str]]: assert self.bound_model is not None, 'No model bounded in this compressor, please use Compressor.reset(model, config_list) to set it.' assert self.config_list is not None, 'No config_list set in this compressor, please use Compressor.reset(model, config_list) to set it.' self._unwrap_model() module_groups = {} for name, module in self.bound_model.named_modules(): if module == self.bound_model: continue layer = LayerInfo(name, module) ret = None for idx, config in enumerate(self.config_list): config = config.copy() # expand config if key `default` is in config['op_types'] if 'op_types' in config and 'default' in config['op_types']: expanded_op_types = [] for op_type in config['op_types']: if op_type == 'default': expanded_op_types.extend(weighted_modules) else: expanded_op_types.append(op_type) config['op_types'] = expanded_op_types # check if condition is satisified if 'op_types' in config and layer.type not in config['op_types']: continue if 'op_names' in config and layer.name not in config['op_names']: continue ret = (idx, config) if ret is not None and 'exclude' not in ret[1]: module_groups.setdefault(ret[0], []) module_groups[ret[0]].append(name) self._wrap_model() return module_groups
cbac2c5c0f7606aca8ccf08fbd418ffe3adfe427
18
compressor.py
377
[Compression] fix typehints (#4800)
24,731
0
606
227
78
112,723
150
nni
28
nni/algorithms/compression/v2/pytorch/base/compressor.py
Python
38
{ "docstring": "\n Get all module names in each config in config_list.\n\n Returns\n -------\n Dict[int, List[str]]\n A dict. The key is the config idx in config_list, the value is the module name list. i.e., {1: ['layer.0', 'layer.2']}.\n ", "language": "en", "n_whitespaces": 81, "n_words": 34, "vocab_size": 27 }
https://github.com/microsoft/nni.git
1
send_welcome
def send_welcome(message): text = markdown = telebot.types.InlineKeyboardMarkup() markdown.add( telebot.types.InlineKeyboardButton( "Star us on GitHub", url="https://github.com/GamestonkTerminal/GamestonkTerminal", ) ) markdown.add( telebot.types.InlineKeyboardButton( "Join us on Discord", url="https://discord.gg/XHsYvvjjWg" ) ) bot.send_message( chat_id=message.chat.id, text=text, reply_markup=markdown, parse_mode="MARKDOWN" ) # bot.reply_to(markdown, text, parse_mode="MARKDOWN") # bot.reply_to(message, text, parse_mode="MARKDOWN") @bot.message_handler(commands=["cmds", "commands"])
635851cbf633a6ea9423ba31fe8f68ab34423a8c
@bot.message_handler(commands=["cmds", "commands"])
11
run_telegram.py
154
Telegram Bot (#1458) * added initial code for telegram * improving suggestions and messages * improving messages * typo Co-authored-by: teh_coderer <me@tehcoderer.com>
84,305
1
138
76
29
282,800
41
OpenBBTerminal
19
bots/telegram/run_telegram.py
Python
21
{ "docstring": "\nWelcome to *Gamestonk Terminal Bot* 🦋\nInvestment Research for Everyone\nCheck the available commands with /cmds\n ", "language": "en", "n_whitespaces": 17, "n_words": 16, "vocab_size": 16 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
6
filter
def filter(names, pat): result = [] pat = os.path.normcase(pat) match = _compile_pattern(pat) if os.path is posixpath: # normcase on posix is NOP. Optimize it away from the loop. for name in names: if match(name): result.append(name) else: for name in names: if match(os.path.normcase(name)): result.append(name) return result
8198943edd73a363c266633e1aa5b2a9e9c9f526
15
fnmatch.py
132
add python 3.10.4 for windows
54,738
0
139
80
34
217,382
45
XX-Net
12
python3.10.4/Lib/fnmatch.py
Python
13
{ "docstring": "Construct a list from those elements of the iterable NAMES that match PAT.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
https://github.com/XX-net/XX-Net.git
1
test_stream_unsupported_by_bulk
def test_stream_unsupported_by_bulk(stream_config, stream_api, caplog): stream_name = "AcceptedEventRelation" stream = _generate_stream(stream_name, stream_config, stream_api) assert not isinstance(stream, BulkSalesforceStream)
0a3713a5a52995dc0dc205d8edfd097bf625899f
8
unit_test.py
50
Source Salesforce: Deprecate API Type parameter (#9302) * use BULK for the first sync, REST for incremental sync * if stream contains compound data or/and base64 use always REST * fix get stream state from connector state * fix integration test * refactor catalog name * format code * refactor unit tests * refactor unit tests 2 * format code 2 * Set additionalProperties to true not to break test temporarily * fix unit test and remove unnecessary filtering fields * bump version * updated spec and def yaml Co-authored-by: auganbay <auganenu@gmail.com>
469
0
28
31
15
3,401
16
airbyte
9
airbyte-integrations/connectors/source-salesforce/unit_tests/unit_test.py
Python
4
{ "docstring": "\n Stream `AcceptedEventRelation` is not supported by BULK API, so that REST API stream will be used for it.\n ", "language": "en", "n_whitespaces": 25, "n_words": 18, "vocab_size": 18 }
https://github.com/airbytehq/airbyte.git
8
call_exception_handler
def call_exception_handler(self, context): if self._exception_handler is None: try: self.default_exception_handler(context) except (SystemExit, KeyboardInterrupt): raise except BaseException: # Second protection layer for unexpected errors # in the default implementation, as well as for subclassed # event loops with overloaded "default_exception_handler". logger.error('Exception in default exception handler', exc_info=True) else: try: self._exception_handler(self, context) except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: # Exception in the user set custom exception handler. try: # Let's try default handler. self.default_exception_handler({ 'message': 'Unhandled error in exception handler', 'exception': exc, 'context': context, }) except (SystemExit, KeyboardInterrupt): raise except BaseException: # Guard 'default_exception_handler' in case it is # overloaded. logger.error('Exception in default exception handler ' 'while handling an unexpected error ' 'in custom exception handler', exc_info=True)
8198943edd73a363c266633e1aa5b2a9e9c9f526
18
base_events.py
202
add python 3.10.4 for windows
55,979
0
708
113
69
220,367
115
XX-Net
12
python3.10.4/Lib/asyncio/base_events.py
Python
28
{ "docstring": "Call the current event loop's exception handler.\n\n The context argument is a dict containing the following keys:\n\n - 'message': Error message;\n - 'exception' (optional): Exception object;\n - 'future' (optional): Future instance;\n - 'task' (optional): Task instance;\n - 'handle' (optional): Handle instance;\n - 'protocol' (optional): Protocol instance;\n - 'transport' (optional): Transport instance;\n - 'socket' (optional): Socket instance;\n - 'asyncgen' (optional): Asynchronous generator that caused\n the exception.\n\n New keys maybe introduced in the future.\n\n Note: do not overload this method in an event loop subclass.\n For custom exception handling, use the\n `set_exception_handler()` method.\n ", "language": "en", "n_whitespaces": 228, "n_words": 91, "vocab_size": 64 }
https://github.com/XX-net/XX-Net.git
1
test_sept_child
def test_sept_child() -> None: rows = 10_000 cols = 7 # these times and sizes are based on the above constants and Madhavas MacBook Pro 2019 expected_sept_mem_size = 0.8035125732421875 expected_sept_ser_size = 1.4993972778320312 macbook_pro_2019_ser_time = 0.03371272199999975 macbook_pro_2019_de_time = 0.02922678500000009 sept = make_sept(rows=rows, cols=cols) start = timeit.default_timer() ser = sy.serialize(sept, to_bytes=True) end = timeit.default_timer() time_ser = end - start start = timeit.default_timer() de = sy.deserialize(ser, from_bytes=True) end = timeit.default_timer() time_de = end - start assert sept == de current_sept_mem_size = size(sept) mem_diff = (current_sept_mem_size / expected_sept_mem_size * 100) - 100 current_sept_bytes_size = size(ser) bytes_diff = (current_sept_bytes_size / expected_sept_ser_size * 100) - 100 ser_time_diff = (time_ser / macbook_pro_2019_ser_time * 100) - 100 de_time_diff = (time_de / macbook_pro_2019_de_time * 100) - 100 print("SEPT Stats") print("==========") print("In-memory size of SEPT", size(sept)) print("Serialized size of SEPT", size(ser)) print(f"Serializing {rows}x{cols} took {time_ser} secs") print(f"Deserializing {rows}x{cols} took {time_de} secs") print("Current Results") print("===============") print(f"In-memory size delta: {mem_diff}%") print(f"Serialized size delta: {bytes_diff}%") print(f"Serializing time delta: {ser_time_diff}%") print(f"Deserializing time delta: {de_time_diff}%") # we want to assert that our calculated values are smaller than the old values with # some tolerance assert (current_sept_mem_size - expected_sept_mem_size) < 1e-3 assert (current_sept_bytes_size - expected_sept_ser_size) < 2e-3 # TODO: make time benchmarks stable (probably can't run in parallel) # assert (time_ser - macbook_pro_2019_ser_time) < 2e-1 # assert (time_de - macbook_pro_2019_de_time) < 2e-1
10ae1d589044a6ae4722ead7aedc63fcdc4923b5
10
tensor_serde_test.py
449
Started DPTensor resource optimization - Added initial REPT and SEPT benchmarking tests - Deleted unused old Tensor classes - Added pympler for memory size tests Co-authored-by: @IshanMi Co-authored-by: @rasswanth-s
45
0
345
251
123
71
216
PySyft
30
packages/syft/tests/syft/core/tensor/tensor_serde_test.py
Python
38
{ "docstring": "We need to benchmark both the size and time to serialize and deserialize SEPTs", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 12 }
https://github.com/OpenMined/PySyft.git
1
test_xcom_pull_after_deferral
def test_xcom_pull_after_deferral(self, create_task_instance, session): key = 'xcom_key' value = 'xcom_value' ti = create_task_instance( dag_id='test_xcom', schedule_interval='@monthly', task_id='test_xcom', pool='test_xcom', ) ti.run(mark_success=True) ti.xcom_push(key=key, value=value) ti.next_method = "execute" session.merge(ti) session.commit() ti.run(ignore_all_deps=True) assert ti.xcom_pull(task_ids='test_xcom', key=key) == value
8b687ec82a7047fc35410f5c5bb0726de434e749
10
test_taskinstance.py
165
Do not clear XCom when resuming from deferral (#22932)
9,082
0
160
96
28
47,362
32
airflow
20
tests/models/test_taskinstance.py
Python
16
{ "docstring": "\n tests xcom will not clear before a task runs its next method after deferral.\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 14 }
https://github.com/apache/airflow.git
2
test_pisa_ssd_head_loss
def test_pisa_ssd_head_loss(self): s = 300 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, }] cfg = Config( dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0., ignore_iof_thr=-1, gt_max_assign_all=False), sampler=dict(type='PseudoSampler'), smoothl1_beta=1., allowed_border=-1, pos_weight=-1, neg_pos_ratio=3, debug=False)) pisa_ssd_head = PISASSDHead( num_classes=4, in_channels=(1, 1, 1, 1, 1, 1), anchor_generator=dict( type='SSDAnchorGenerator', scale_major=False, input_size=s, basesize_ratio_range=(0.15, 0.9), strides=[8, 16, 32, 64, 100, 300], ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]), train_cfg=cfg) # PISA SSD head expects a multiple levels of features per image feats = ( torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0])) for stride in pisa_ssd_head.prior_generator.strides) cls_scores, bbox_preds = pisa_ssd_head.forward(feats) # test without isr and carl # Test that empty ground truth encourages the network to # predict background gt_instances = InstanceData() gt_instances.bboxes = torch.empty((0, 4)) gt_instances.labels = torch.LongTensor([]) empty_gt_losses = pisa_ssd_head.loss(cls_scores, bbox_preds, [gt_instances], img_metas) # When there is no truth, cls_loss and box_loss should all be zero. empty_cls_loss = sum(empty_gt_losses['loss_cls']) empty_box_loss = sum(empty_gt_losses['loss_bbox']) self.assertEqual( empty_cls_loss.item(), 0, 'there should be no cls loss when there are no true boxes') self.assertEqual( empty_box_loss.item(), 0, 'there should be no box loss when there are no true boxes') # When truth is non-empty then both cls and box loss # should be nonzero for random inputs gt_instances = InstanceData() gt_instances.bboxes = torch.Tensor( [[23.6667, 23.8757, 238.6326, 151.8874]]) gt_instances.labels = torch.LongTensor([2]) one_gt_losses = pisa_ssd_head.loss(cls_scores, bbox_preds, [gt_instances], img_metas) onegt_cls_loss = sum(one_gt_losses['loss_cls']) onegt_box_loss = sum(one_gt_losses['loss_bbox']) self.assertGreater(onegt_cls_loss.item(), 0, 'cls loss should be non-zero') self.assertGreater(onegt_box_loss.item(), 0, 'box loss should be non-zero') pisa_ssd_head.train_cfg.update( dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) # test with isr and carl # Test that empty ground truth encourages the network to # predict background gt_instances = InstanceData() gt_instances.bboxes = torch.empty((0, 4)) gt_instances.labels = torch.LongTensor([]) empty_gt_losses = pisa_ssd_head.loss(cls_scores, bbox_preds, [gt_instances], img_metas) # When there is no truth, cls_loss and box_loss should all be zero. empty_cls_loss = sum(empty_gt_losses['loss_cls']) empty_box_loss = sum(empty_gt_losses['loss_bbox']) self.assertEqual( empty_cls_loss.item(), 0, 'there should be no cls loss when there are no true boxes') self.assertEqual( empty_box_loss.item(), 0, 'there should be no box loss when there are no true boxes') # When truth is non-empty then both cls and box loss # should be nonzero for random inputs gt_instances = InstanceData() gt_instances.bboxes = torch.Tensor( [[23.6667, 23.8757, 238.6326, 151.8874]]) gt_instances.labels = torch.LongTensor([2]) one_gt_losses = pisa_ssd_head.loss(cls_scores, bbox_preds, [gt_instances], img_metas) onegt_cls_loss = sum(one_gt_losses['loss_cls']) onegt_box_loss = sum(one_gt_losses['loss_bbox']) self.assertGreater(onegt_cls_loss.item(), 0, 'cls loss should be non-zero') self.assertGreater(onegt_box_loss.item(), 0, 'box loss should be non-zero')
9d7511d8c35df1f9c13b17eb770136859bf370be
15
test_pisa_ssd_head.py
1,025
Update SSD and PISA-SSD model config
70,500
0
1,565
698
162
244,730
382
mmdetection
63
tests/test_models/test_dense_heads/test_pisa_ssd_head.py
Python
88
{ "docstring": "Tests pisa ssd head loss when truth is empty and non-empty.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/open-mmlab/mmdetection.git
2
get_installation_order
def get_installation_order(self, req_set): # type: (RequirementSet) -> List[InstallRequirement] # The current implementation, which we may change at any point # installs the user specified things in the order given, except when # dependencies must come earlier to achieve topological order. order = [] ordered_reqs = set() # type: Set[InstallRequirement]
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
8
resolver.py
36
upd; format
12,403
0
99
36
41
61,060
49
transferlearning
6
.venv/lib/python3.8/site-packages/pip/_internal/resolution/legacy/resolver.py
Python
7
{ "docstring": "Create the installation order.\n\n The installation order is topological - requirements are installed\n before the requiring thing. We break cycles at an arbitrary point,\n and make no other guarantees.\n ", "language": "en", "n_whitespaces": 57, "n_words": 29, "vocab_size": 27 }
https://github.com/jindongwang/transferlearning.git
3
bump_client_version
def bump_client_version(self): path = os.path.join(".", "client", "package.json") input_file = io.open(path, "r") try: package = json.loads(input_file.read().decode("utf-8")) except (ValueError) as e: print("Unable to read " + path + " " + e) # noqa raise SystemExit(1) package["version"] = __semver__ try: with io.open(path, "w", encoding="utf-8") as f: f.write(str(json.dumps(package, indent=2, ensure_ascii=False))) except (IOError) as e: print("Error setting the version for front-end assets: " + str(e)) # noqa raise SystemExit(1)
d10f15e55806c6944827d801cd9c2d53f5da4186
16
setup.py
247
Reformat with black
16,486
0
204
138
45
76,287
65
wagtail
26
wagtail/utils/setup.py
Python
15
{ "docstring": "\n Writes the current Wagtail version number into package.json\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
https://github.com/wagtail/wagtail.git
2
native_min_value
def native_min_value(self) -> float: return (self.max_value * -1) if self._is_bidirectional else 6
5b32eea3d04d223b01efddb5c13a88e540df8741
9
number.py
38
Add support for bidirectional chargers to Wallbox integration (#74313) * Add support for the Quasar bidirectional charger to the Wallbox integration, including ability to control charger while discharging, set a negative charge rate and monitor discharged amount * Make code more generic in order to support other bidirectional models in the future * Updates to files to comply with HA formatting rules * Change const file to fix black check failure * Remove unnecessay loop in number entity
115,406
0
26
23
12
316,830
12
core
5
homeassistant/components/wallbox/number.py
Python
3
{ "docstring": "Return the minimum available current based on charger type - some chargers can discharge.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 14 }
https://github.com/home-assistant/core.git
1
test_cache_limit_ttl_greater_than_default_cache_ttl
def test_cache_limit_ttl_greater_than_default_cache_ttl(self): page1 = create_page('test page 1', 'nav_playground.html', 'en', published=True) page1_url = page1.get_absolute_url() limit_page_cache_ttl_function = ".".join([PlaceholderCacheTestCase.__module__, limit_page_cache_ttl_test_500.__name__]) with self.settings(CMS_LIMIT_TTL_CACHE_FUNCTION=limit_page_cache_ttl_function): page1.publish('en') request = self.get_request(page1_url) request.current_page = Page.objects.get(pk=page1.pk) response = self.client.get(page1_url) self.assertTrue('max-age=40' in response['Cache-Control'], response['Cache-Control']) # noqa
c412e97acba65a2a68e70ca15ea950bd31f90d3e
12
test_cache.py
190
feat: add cache ttl extension point (#7299) Adds the setting `CMS_CACHE_LIMIT_TTL_CLASS` that should have a `limit_page_cache_ttl` method that would be called to limit the cache ttl of a page using business logic. Closes #7296
17,357
0
153
109
30
82,360
35
django-cms
26
cms/tests/test_cache.py
Python
11
{ "docstring": "\n Test the `CMS_LIMIT_TTL_CACHE_FUNCTION` setting with a class that returns a value much\n greater thant the default value of 40 seconds.\n ", "language": "en", "n_whitespaces": 42, "n_words": 20, "vocab_size": 17 }
https://github.com/django-cms/django-cms.git
3
subordinate_to_user
def subordinate_to_user(self, user, site): # get user level from cms.utils.page_permissions import get_change_permissions_id_list from cms.utils.permissions import get_user_permission_level try: user_level = get_user_permission_level(user, site) except NoPermissionsException: return self.none() if user_level == ROOT_USER_LEVEL: return self.all() # get all permissions page_id_allow_list = get_change_permissions_id_list(user, site, check_global=False) # get permission set, but without objects targeting user, or any group # in which he can be qs = self.filter( page__id__in=page_id_allow_list, page__node__depth__gte=user_level, ) qs = qs.exclude(user=user).exclude(group__user=user) return qs
a3110e1ff24085373898c7d2a85f628abeb8518d
11
managers.py
168
Enabled isort workflow (#7200) * Ran isort * Enabled isort workflow Co-authored-by: Vinit Kumar <mail@vinitkumar.me>
17,339
0
229
102
53
82,288
69
django-cms
23
cms/models/managers.py
Python
16
{ "docstring": "Get all page permission objects on which user/group is lover in\n hierarchy then given user and given user can change permissions on them.\n\n !IMPORTANT, but exclude objects with given user, or any group containing\n this user - he can't be able to change his own permissions, because if\n he does, and removes some permissions from himself, he will not be able\n to add them anymore.\n\n Example:\n A\n / \\\n user B,E\n / \\\n C,X D,Y\n\n Gives permission nodes C,X,D,Y under user, so he can edit\n permissions if he haves can_change_permission.\n\n Example:\n A,Y\n / \\\n user B,E,X\n / \\\n C,X D,Y\n\n Gives permission nodes C,D under user, so he can edit, but not\n anymore to X,Y, because this users are on the same level or higher\n in page hierarchy. (but only if user have can_change_permission)\n\n Example:\n A\n / \\\n user B,E\n / | \\\n C,X D,Y user\n / \\\n I J,A\n\n User permissions can be assigned to multiple page nodes, so merge of\n all of them is required. In this case user can see permissions for\n users C,X,D,Y,I,J but not A, because A user in higher in hierarchy.\n\n If permission object holds group, this permission object can be visible\n to user only if all of the group members are lover in hierarchy. If any\n of members is higher then given user, this entry must stay invisible.\n\n If user is superuser, or haves global can_change_permission permissions,\n show him everything.\n\n Result of this is used in admin for page permissions inline.\n ", "language": "en", "n_whitespaces": 1085, "n_words": 248, "vocab_size": 119 }
https://github.com/django-cms/django-cms.git
2
test_finditer
def test_finditer(): matches = list(finditer(re.compile(rb"\d+"), b"0123 4567 890 12 3 4")) aligned = [i.group() for i in matches] assert aligned == [b"0123", b"567", b"890", b"12"]
dc12cb59559f99110917bcbd21c9960ab57d994f
13
test_bytecode.py
84
tests: fix test_finditer Have the test use bytestrings instead of strings. Also assert that the bytecode string passed to bytecode.finditer() is in fact a bytestring.
77,525
0
37
52
23
263,956
25
pyinstaller
9
tests/unit/test_bytecode.py
Python
4
{ "docstring": "\n Test that bytecode.finditer() yields matches only that start on an even byte (``match.start() % 2 == 0``).\n\n There are 3 permutations here when considering a match:\n - A match starts on an even byte:\n That's good! Include that sequence.\n - A single character match starts on an odd byte:\n Ignore it. It's a false positive.\n - A multi-character match starts on an odd byte:\n This match will be a false positive but there may be a genuine match shortly afterwards (in the case of the\n # test below - it'll be the next character) which overlaps with this one so we must override regex's\n behaviour of ignoring overlapping matches to prevent these from getting lost.\n ", "language": "en", "n_whitespaces": 169, "n_words": 115, "vocab_size": 82 }
https://github.com/pyinstaller/pyinstaller.git
9
cartesian_product
def cartesian_product(X) -> list[np.ndarray]: msg = "Input must be a list-like of list-likes" if not is_list_like(X): raise TypeError(msg) for x in X: if not is_list_like(x): raise TypeError(msg) if len(X) == 0: return [] lenX = np.fromiter((len(x) for x in X), dtype=np.intp) cumprodX = np.cumproduct(lenX) if np.any(cumprodX < 0): raise ValueError("Product space too large to allocate arrays!") a = np.roll(cumprodX, 1) a[0] = 1 if cumprodX[-1] != 0: b = cumprodX[-1] / cumprodX else: # if any factor is empty, the cartesian product is empty b = np.zeros_like(cumprodX) return [tile_compat(np.repeat(x, b[i]), np.product(a[i])) for i, x in enumerate(X)]
f65417656ba8c59438d832b6e2a431f78d40c21c
11
util.py
290
TYP: more return annotations in core/ (#47618) * TYP: more return annotations in core/ * from __future__ import annotations * more __future__
40,127
0
195
182
68
167,802
96
pandas
27
pandas/core/reshape/util.py
Python
42
{ "docstring": "\n Numpy version of itertools.product.\n Sometimes faster (for large inputs)...\n\n Parameters\n ----------\n X : list-like of list-likes\n\n Returns\n -------\n product : list of ndarrays\n\n Examples\n --------\n >>> cartesian_product([list('ABC'), [1, 2]])\n [array(['A', 'A', 'B', 'B', 'C', 'C'], dtype='<U1'), array([1, 2, 1, 2, 1, 2])]\n\n See Also\n --------\n itertools.product : Cartesian product of input iterables. Equivalent to\n nested for-loops.\n ", "language": "en", "n_whitespaces": 113, "n_words": 56, "vocab_size": 46 }
https://github.com/pandas-dev/pandas.git
1
is_monotonic_decreasing
def is_monotonic_decreasing(self) -> bool: return self._engine.is_monotonic_decreasing
62a69beddbedde349891378992c902c0b9341a9f
7
base.py
25
DOC: Add numpydoc SS06 validation (#47885)
40,199
0
20
14
6
168,092
6
pandas
4
pandas/core/indexes/base.py
Python
14
{ "docstring": "\n Return a boolean if the values are equal or decreasing.\n\n Examples\n --------\n >>> Index([3, 2, 1]).is_monotonic_decreasing\n True\n >>> Index([3, 2, 2]).is_monotonic_decreasing\n True\n >>> Index([3, 1, 2]).is_monotonic_decreasing\n False\n ", "language": "en", "n_whitespaces": 98, "n_words": 27, "vocab_size": 20 }
https://github.com/pandas-dev/pandas.git
13
get_traceback_frame_variables
def get_traceback_frame_variables(self, request, tb_frame): # Loop through the frame's callers to see if the sensitive_variables # decorator was used. current_frame = tb_frame.f_back sensitive_variables = None while current_frame is not None: if ( current_frame.f_code.co_name == "sensitive_variables_wrapper" and "sensitive_variables_wrapper" in current_frame.f_locals ): # The sensitive_variables decorator was used, so we take note # of the sensitive variables' names. wrapper = current_frame.f_locals["sensitive_variables_wrapper"] sensitive_variables = getattr(wrapper, "sensitive_variables", None) break current_frame = current_frame.f_back cleansed = {} if self.is_active(request) and sensitive_variables: if sensitive_variables == "__ALL__": # Cleanse all variables for name in tb_frame.f_locals: cleansed[name] = self.cleansed_substitute else: # Cleanse specified variables for name, value in tb_frame.f_locals.items(): if name in sensitive_variables: value = self.cleansed_substitute else: value = self.cleanse_special_types(request, value) cleansed[name] = value else: # Potentially cleanse the request and any MultiValueDicts if they # are one of the frame variables. for name, value in tb_frame.f_locals.items(): cleansed[name] = self.cleanse_special_types(request, value) if ( tb_frame.f_code.co_name == "sensitive_variables_wrapper" and "sensitive_variables_wrapper" in tb_frame.f_locals ): # For good measure, obfuscate the decorated function's arguments in # the sensitive_variables decorator's frame, in case the variables # associated with those arguments were meant to be obfuscated from # the decorated function's frame. cleansed["func_args"] = self.cleansed_substitute cleansed["func_kwargs"] = self.cleansed_substitute return cleansed.items()
9c19aff7c7561e3a82978a272ecdaad40dda5c00
18
debug.py
355
Refs #33476 -- Reformatted code with Black.
51,719
0
757
209
105
206,808
195
django
19
django/views/debug.py
Python
34
{ "docstring": "\n Replace the values of variables marked as sensitive with\n stars (*********).\n ", "language": "en", "n_whitespaces": 33, "n_words": 11, "vocab_size": 11 }
https://github.com/django/django.git
1
image2hmap
def image2hmap(self, image_tensor): return self.model.forward(image_tensor, training=False)
803b90729d25fda253011c505d0189e8e63cc039
8
DBNet.py
34
add dbnet
27,289
0
28
21
6
123,112
6
EasyOCR
6
easyocr/DBNet/DBNet.py
Python
2
{ "docstring": "\n Run the model to obtain a heatmap tensor from a image tensor. The heatmap\n tensor indicates the probability of each pixel being a part of text area.\n\n Parameters\n ----------\n image_tensor : torch.tensor\n Image tensor.\n\n Returns\n -------\n torch.tensor\n Probability heatmap tensor.\n ", "language": "en", "n_whitespaces": 126, "n_words": 40, "vocab_size": 30 }
https://github.com/JaidedAI/EasyOCR.git
7
get_extended_attribute
def get_extended_attribute(value): # XXX: should we have an ExtendedAttribute TokenList? attribute = Attribute() if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) attribute.append(token) if value and value[0] in EXTENDED_ATTRIBUTE_ENDS: raise errors.HeaderParseError( "expected token but found '{}'".format(value)) token, value = get_extended_attrtext(value) attribute.append(token) if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) attribute.append(token) return attribute, value
8198943edd73a363c266633e1aa5b2a9e9c9f526
12
_header_value_parser.py
164
add python 3.10.4 for windows
56,956
0
129
99
33
223,533
56
XX-Net
13
python3.10.4/Lib/email/_header_value_parser.py
Python
14
{ "docstring": " [CFWS] 1*extended_attrtext [CFWS]\n\n This is like the non-extended version except we allow % characters, so that\n we can pick up an encoded value as a single string.\n\n ", "language": "en", "n_whitespaces": 37, "n_words": 27, "vocab_size": 25 }
https://github.com/XX-net/XX-Net.git
1
test_table_options_language
def test_table_options_language(self): # default must always contain a language value block = TableBlock() self.assertIn("language", block.table_options) # French translation.activate("fr-fr") block_fr = TableBlock() self.assertEqual("fr-fr", block_fr.table_options["language"]) translation.activate("it") # Italian block_it = TableBlock() self.assertEqual("it", block_it.table_options["language"]) # table_options with language provided, different to environment block_with_lang = TableBlock(table_options={"language": "ja"}) self.assertNotEqual("it", block_with_lang.table_options["language"]) self.assertEqual("ja", block_with_lang.table_options["language"]) translation.activate("en")
d10f15e55806c6944827d801cd9c2d53f5da4186
12
tests.py
212
Reformat with black
16,063
0
167
113
38
73,597
48
wagtail
13
wagtail/contrib/table_block/tests.py
Python
13
{ "docstring": "\n Test that the environment's language is used if no language provided.\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 10 }
https://github.com/wagtail/wagtail.git
2
get_static_batch_size
def get_static_batch_size(layer): batch_input_shape, _ = get_input_shape_and_dtype(layer) if batch_input_shape is not None: return tf.compat.v1.Dimension(batch_input_shape[0]).value return None
84afc5193d38057e2e2badf9c889ea87d80d8fbf
12
training_utils.py
62
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,855
0
34
38
14
271,831
15
keras
10
keras/engine/training_utils.py
Python
5
{ "docstring": "Gets the static batch size of a Layer.\n\n Args:\n layer: a `Layer` instance.\n\n Returns:\n The static batch size of a Layer.\n ", "language": "en", "n_whitespaces": 40, "n_words": 21, "vocab_size": 14 }
https://github.com/keras-team/keras.git
4
modularity
def modularity(G, communities, weight="weight", resolution=1): r if not isinstance(communities, list): communities = list(communities) if not is_partition(G, communities): raise NotAPartition(G, communities) directed = G.is_directed() if directed: out_degree = dict(G.out_degree(weight=weight)) in_degree = dict(G.in_degree(weight=weight)) m = sum(out_degree.values()) norm = 1 / m**2 else: out_degree = in_degree = dict(G.degree(weight=weight)) deg_sum = sum(out_degree.values()) m = deg_sum / 2 norm = 1 / deg_sum**2
f6755ffa00211b523c6c0bec5398bc6c3c43c8b1
15
quality.py
226
Update black (#5438) * CI: sync up black dev requirements version with precommit * Run black Co-authored-by: Jarrod Millman <jarrod.millman@gmail.com>
41,926
0
145
152
36
176,475
58
networkx
20
networkx/algorithms/community/quality.py
Python
99
{ "docstring": "Returns the modularity of the given partition of the graph.\n\n Modularity is defined in [1]_ as\n\n .. math::\n Q = \\frac{1}{2m} \\sum_{ij} \\left( A_{ij} - \\gamma\\frac{k_ik_j}{2m}\\right)\n \\delta(c_i,c_j)\n\n where $m$ is the number of edges, $A$ is the adjacency matrix of `G`,\n $k_i$ is the degree of $i$, $\\gamma$ is the resolution parameter,\n and $\\delta(c_i, c_j)$ is 1 if $i$ and $j$ are in the same community else 0.\n\n According to [2]_ (and verified by some algebra) this can be reduced to\n\n .. math::\n Q = \\sum_{c=1}^{n}\n \\left[ \\frac{L_c}{m} - \\gamma\\left( \\frac{k_c}{2m} \\right) ^2 \\right]\n\n where the sum iterates over all communities $c$, $m$ is the number of edges,\n $L_c$ is the number of intra-community links for community $c$,\n $k_c$ is the sum of degrees of the nodes in community $c$,\n and $\\gamma$ is the resolution parameter.\n\n The resolution parameter sets an arbitrary tradeoff between intra-group\n edges and inter-group edges. More complex grouping patterns can be\n discovered by analyzing the same network with multiple values of gamma\n and then combining the results [3]_. That said, it is very common to\n simply use gamma=1. More on the choice of gamma is in [4]_.\n\n The second formula is the one actually used in calculation of the modularity.\n For directed graphs the second formula replaces $k_c$ with $k^{in}_c k^{out}_c$.\n\n Parameters\n ----------\n G : NetworkX Graph\n\n communities : list or iterable of set of nodes\n These node sets must represent a partition of G's nodes.\n\n weight : string or None, optional (default=\"weight\")\n The edge attribute that holds the numerical value used\n as a weight. If None or an edge does not have that attribute,\n then that edge has weight 1.\n\n resolution : float (default=1)\n If resolution is less than 1, modularity favors larger communities.\n Greater than 1 favors smaller communities.\n\n Returns\n -------\n Q : float\n The modularity of the paritition.\n\n Raises\n ------\n NotAPartition\n If `communities` is not a partition of the nodes of `G`.\n\n Examples\n --------\n >>> import networkx.algorithms.community as nx_comm\n >>> G = nx.barbell_graph(3, 0)\n >>> nx_comm.modularity(G, [{0, 1, 2}, {3, 4, 5}])\n 0.35714285714285715\n >>> nx_comm.modularity(G, nx_comm.label_propagation_communities(G))\n 0.35714285714285715\n\n References\n ----------\n .. [1] M. E. J. Newman \"Networks: An Introduction\", page 224.\n Oxford University Press, 2011.\n .. [2] Clauset, Aaron, Mark EJ Newman, and Cristopher Moore.\n \"Finding community structure in very large networks.\"\n Phys. Rev. E 70.6 (2004). <https://arxiv.org/abs/cond-mat/0408187>\n .. [3] Reichardt and Bornholdt \"Statistical Mechanics of Community Detection\"\n Phys. Rev. E 74, 016110, 2006. https://doi.org/10.1103/PhysRevE.74.016110\n .. [4] M. E. J. Newman, \"Equivalence between modularity optimization and\n maximum likelihood methods for community detection\"\n Phys. Rev. E 94, 052315, 2016. https://doi.org/10.1103/PhysRevE.94.052315\n\n ", "language": "en", "n_whitespaces": 682, "n_words": 425, "vocab_size": 259 }
https://github.com/networkx/networkx.git
2
handle
def handle(self, query, args, env, session): if isinstance(self.result, dict): return self.result else: return self.result(query, args, env, session)
3023d6e0223e50c2d7cbe850f1c5355e5d505ceb
10
responder.py
63
renaming
25,106
0
60
43
14
114,143
17
mindsdb
9
mindsdb/api/mongo/classes/responder.py
Python
5
{ "docstring": " making answer based on params:\n\n query (dict): document(s) from request\n args (dict): all other significant information from request: flags, collection name, rows to return, etc\n env (dict): config, model_interface instance, and other mindsdb related stuff\n session (object): current session\n\n returns documents as dict or list of dicts\n ", "language": "en", "n_whitespaces": 90, "n_words": 47, "vocab_size": 42 }
https://github.com/mindsdb/mindsdb.git
3
GetContainingXLAContext
def GetContainingXLAContext(ctxt): while ctxt: if ctxt.IsXLAContext(): return ctxt ctxt = ctxt.outer_context return None
84afc5193d38057e2e2badf9c889ea87d80d8fbf
9
control_flow_util.py
44
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,705
0
47
25
11
276,712
13
keras
4
keras/utils/control_flow_util.py
Python
6
{ "docstring": "Returns the first ancestor XLAContext of `ctxt`.\n\n Returns `ctxt` if `ctxt` is a XLAContext, or None if `ctxt` is not in a\n while loop.\n\n Args:\n ctxt: ControlFlowContext\n\n Returns:\n `ctxt` if `ctxt` is a XLAContext, the most nested XLAContext containing\n `ctxt`, or None if `ctxt` is not in a while loop.\n ", "language": "en", "n_whitespaces": 80, "n_words": 50, "vocab_size": 26 }
https://github.com/keras-team/keras.git
1
get_model
def get_model(self, git_model_id, model_filename, backend): model = GetModel(model_filename, git_model_id).model_path model = cv2.dnn.readNetFromCaffe(model[1], model[0]) model.setPreferableTarget(self.get_backend(backend)) return model
f2e6f24651f62b28ccfb412180baca0aa7baf96a
9
vgg_face.py
81
Centralize model storage
20,368
0
51
53
13
100,921
16
faceswap
13
lib/vgg_face.py
Python
5
{ "docstring": " Check if model is available, if not, download and unzip it ", "language": "en", "n_whitespaces": 12, "n_words": 11, "vocab_size": 10 }
https://github.com/deepfakes/faceswap.git
1
copy_sign
def copy_sign(self, a, b): a = _convert_other(a, raiseit=True) return a.copy_sign(b)
8198943edd73a363c266633e1aa5b2a9e9c9f526
9
_pydecimal.py
43
add python 3.10.4 for windows
55,690
0
31
27
10
219,662
10
XX-Net
6
python3.10.4/Lib/_pydecimal.py
Python
3
{ "docstring": "Copies the second operand's sign to the first one.\n\n In detail, it returns a copy of the first operand with the sign\n equal to the sign of the second operand.\n\n >>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('7.33'))\n Decimal('1.50')\n >>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('7.33'))\n Decimal('1.50')\n >>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('-7.33'))\n Decimal('-1.50')\n >>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('-7.33'))\n Decimal('-1.50')\n >>> ExtendedContext.copy_sign(1, -2)\n Decimal('-1')\n >>> ExtendedContext.copy_sign(Decimal(1), -2)\n Decimal('-1')\n >>> ExtendedContext.copy_sign(1, Decimal(-2))\n Decimal('-1')\n ", "language": "en", "n_whitespaces": 179, "n_words": 60, "vocab_size": 32 }
https://github.com/XX-net/XX-Net.git
2
forward
def forward(self, feats, img_metas): batch_size = len(img_metas) input_img_h, input_img_w = img_metas[0]['batch_input_shape'] padding_mask = feats[-1].new_ones( (batch_size, input_img_h, input_img_w), dtype=torch.float32) for i in range(batch_size): img_h, img_w, _ = img_metas[i]['img_shape'] padding_mask[i, :img_h, :img_w] = 0 padding_mask = F.interpolate( padding_mask.unsqueeze(1), size=feats[-1].shape[-2:], mode='nearest').to(torch.bool).squeeze(1) # when backbone is swin, memory is output of last stage of swin. # when backbone is r50, memory is output of tranformer encoder. mask_features, memory = self.pixel_decoder(feats, img_metas) pos_embed = self.decoder_pe(padding_mask) memory = self.decoder_input_proj(memory) # shape (batch_size, c, h, w) -> (h*w, batch_size, c) memory = memory.flatten(2).permute(2, 0, 1) pos_embed = pos_embed.flatten(2).permute(2, 0, 1) # shape (batch_size, h * w) padding_mask = padding_mask.flatten(1) # shape = (num_queries, embed_dims) query_embed = self.query_embed.weight # shape = (num_queries, batch_size, embed_dims) query_embed = query_embed.unsqueeze(1).repeat(1, batch_size, 1) target = torch.zeros_like(query_embed) # shape (num_decoder, num_queries, batch_size, embed_dims) out_dec = self.transformer_decoder( query=target, key=memory, value=memory, key_pos=pos_embed, query_pos=query_embed, key_padding_mask=padding_mask) # shape (num_decoder, batch_size, num_queries, embed_dims) out_dec = out_dec.transpose(1, 2) # cls_scores all_cls_scores = self.cls_embed(out_dec) # mask_preds mask_embed = self.mask_embed(out_dec) all_mask_preds = torch.einsum('lbqc,bchw->lbqhw', mask_embed, mask_features) return all_cls_scores, all_mask_preds
cac356380d505bf15587f07c0529218cc36b9652
17
maskformer_head.py
475
[Feature] Add Maskformer to mmdet (#7212) * first commit * add README * move model description from config to readme add description for binary_input add description for dice loss add a independent panoptic gt processing function add a independent panoptic gt processing function remove compatibility of pretrain in maskformer * update comments in maskformer_head * update docs format
70,209
0
553
302
97
244,039
167
mmdetection
54
mmdet/models/dense_heads/maskformer_head.py
Python
34
{ "docstring": "Forward function.\n\n Args:\n feats (list[Tensor]): Features from the upstream network, each\n is a 4D-tensor.\n img_metas (list[dict]): List of image information.\n\n Returns:\n all_cls_scores (Tensor): Classification scores for each\\\n scale level. Each is a 4D-tensor with shape\\\n (num_decoder, batch_size, num_queries, cls_out_channels).\\\n Note `cls_out_channels` should includes background.\n all_mask_preds (Tensor): Mask scores for each decoder\\\n layer. Each with shape (num_decoder, batch_size,\\\n num_queries, h, w).\n ", "language": "en", "n_whitespaces": 215, "n_words": 60, "vocab_size": 50 }
https://github.com/open-mmlab/mmdetection.git
3
test_text
def test_text(self, qt_key, upper): modifiers = Qt.KeyboardModifier.ShiftModifier if upper else Qt.KeyboardModifiers() info = keyutils.KeyInfo(qt_key.member, modifiers=modifiers) expected = qt_key.uppertext if upper else qt_key.text assert info.text() == expected
0877fb0d78635692e481c8bde224fac5ad0dd430
9
test_keyutils.py
90
Run scripts/dev/rewrite_enums.py
117,685
0
61
58
20
321,379
26
qutebrowser
16
tests/unit/keyinput/test_keyutils.py
Python
5
{ "docstring": "Test KeyInfo.text() with all possible keys.\n\n See key_data.py for inputs and expected values.\n ", "language": "en", "n_whitespaces": 27, "n_words": 13, "vocab_size": 13 }
https://github.com/qutebrowser/qutebrowser.git
28
present
def present(name, acl_type, acl_name="", perms="", recurse=False, force=False): ret = {"name": name, "result": True, "changes": {}, "comment": ""} _octal = {"r": 4, "w": 2, "x": 1, "-": 0} _octal_lookup = {0: "-", 1: "r", 2: "w", 4: "x"} if not os.path.exists(name): ret["comment"] = "{} does not exist".format(name) ret["result"] = False return ret __current_perms = __salt__["acl.getfacl"](name, recursive=recurse) if acl_type.startswith(("d:", "default:")): _acl_type = ":".join(acl_type.split(":")[1:]) _current_perms = __current_perms[name].get("defaults", {}) _default = True else: _acl_type = acl_type _current_perms = __current_perms[name] _default = False # The getfacl execution module lists default with empty names as being # applied to the user/group that owns the file, e.g., # default:group::rwx would be listed as default:group:root:rwx # In this case, if acl_name is empty, we really want to search for root # but still uses '' for other # We search through the dictionary getfacl returns for the owner of the # file if acl_name is empty. if acl_name == "": _search_name = __current_perms[name].get("comment").get(_acl_type, "") else: _search_name = acl_name if _current_perms.get(_acl_type, None) or _default: try: user = [ i for i in _current_perms[_acl_type] if next(iter(i.keys())) == _search_name ].pop() except (AttributeError, IndexError, StopIteration, KeyError): user = None if user: octal_sum = sum(_octal.get(i, i) for i in perms) need_refresh = False # If recursive check all paths retrieved via acl.getfacl if recurse: for path in __current_perms: acl_found = False if _default: # Recusive default acls only apply to directories if not os.path.isdir(path): continue _current_perms_path = __current_perms[path].get("defaults", {}) else: _current_perms_path = __current_perms[path] for user_acl in _current_perms_path.get(_acl_type, []): if ( _search_name in user_acl and user_acl[_search_name]["octal"] == octal_sum ): acl_found = True if not acl_found: need_refresh = True break # Check the permissions from the already located file elif user[_search_name]["octal"] == sum(_octal.get(i, i) for i in perms): need_refresh = False # If they don't match then refresh else: need_refresh = True if not need_refresh: ret["comment"] = "Permissions are in the desired state" else: _num = user[_search_name]["octal"] new_perms = "{}{}{}".format( _octal_lookup[_num & 1], _octal_lookup[_num & 2], _octal_lookup[_num & 4], ) changes = { "new": {"acl_name": acl_name, "acl_type": acl_type, "perms": perms}, "old": { "acl_name": acl_name, "acl_type": acl_type, "perms": new_perms, }, } if __opts__["test"]: ret.update( { "comment": ( "Updated permissions will be applied for " "{}: {} -> {}".format(acl_name, new_perms, perms) ), "result": None, "changes": changes, } ) return ret try: if force: __salt__["acl.wipefacls"]( name, recursive=recurse, raise_err=True ) __salt__["acl.modfacl"]( acl_type, acl_name, perms, name, recursive=recurse, raise_err=True, ) ret.update( { "comment": "Updated permissions for {}".format(acl_name), "result": True, "changes": changes, } ) except CommandExecutionError as exc: ret.update( { "comment": "Error updating permissions for {}: {}".format( acl_name, exc.strerror ), "result": False, } ) else: changes = { "new": {"acl_name": acl_name, "acl_type": acl_type, "perms": perms} } if __opts__["test"]: ret.update( { "comment": "New permissions will be applied for {}: {}".format( acl_name, perms ), "result": None, "changes": changes, } ) ret["result"] = None return ret try: if force: __salt__["acl.wipefacls"](name, recursive=recurse, raise_err=True) __salt__["acl.modfacl"]( acl_type, acl_name, perms, name, recursive=recurse, raise_err=True ) ret.update( { "comment": "Applied new permissions for {}".format(acl_name), "result": True, "changes": changes, } ) except CommandExecutionError as exc: ret.update( { "comment": "Error updating permissions for {}: {}".format( acl_name, exc.strerror ), "result": False, } ) else: ret["comment"] = "ACL Type does not exist" ret["result"] = False return ret
f2a783643de61cac1ff3288b40241e5ce6e1ddc8
23
linux_acl.py
1,365
Update to latest ``pyupgrade`` hook. Stop skipping it on CI. Signed-off-by: Pedro Algarvio <palgarvio@vmware.com>
54,297
0
3,212
807
248
215,977
522
salt
51
salt/states/linux_acl.py
Python
155
{ "docstring": "\n Ensure a Linux ACL is present\n\n name\n The acl path\n\n acl_type\n The type of the acl is used for it can be 'user' or 'group'\n\n acl_name\n The user or group\n\n perms\n Set the permissions eg.: rwx\n\n recurse\n Set the permissions recursive in the path\n\n force\n Wipe out old permissions and ensure only the new permissions are set\n ", "language": "en", "n_whitespaces": 125, "n_words": 57, "vocab_size": 43 }
https://github.com/saltstack/salt.git
7
test_rate_limited
def test_rate_limited(self): org_strings = {1: {"a", "b", "c"}, 2: {"e", "f"}, 3: {"g"}} with override_options( { "sentry-metrics.writes-limiter.limits.releasehealth.per-org": [ {"window_seconds": 10, "granularity_seconds": 10, "limit": 1} ], } ): results = self.indexer.bulk_record( use_case_id=self.use_case_id, org_strings=org_strings ) assert len(results[1]) == 3 assert len(results[2]) == 2 assert len(results[3]) == 1 assert results[3]["g"] is not None rate_limited_strings = set() for org_id in 1, 2, 3: for k, v in results[org_id].items(): if v is None: rate_limited_strings.add(k) assert len(rate_limited_strings) == 3 assert "g" not in rate_limited_strings for string in rate_limited_strings: assert results.get_fetch_metadata()[string] == ( None, FetchType.RATE_LIMITED, FetchTypeExt(is_global=False), ) org_strings = {1: rate_limited_strings} with override_options( { "sentry-metrics.writes-limiter.limits.releasehealth.global": [ {"window_seconds": 10, "granularity_seconds": 10, "limit": 2} ], } ): results = self.indexer.bulk_record( use_case_id=self.use_case_id, org_strings=org_strings ) rate_limited_strings2 = set() for k, v in results[1].items(): if v is None: rate_limited_strings2.add(k) assert len(rate_limited_strings2) == 1 assert len(rate_limited_strings - rate_limited_strings2) == 2
c4cc0467974bcfb2b3c95120bd19c337aa977183
15
test_postgres_indexer.py
476
feat(metrics_indexer): Add rate limits functionality to indexer [INGEST-1380] (#36263) * feat(metrics_indexer): Add rate limits functionality to indexer [INGEST-1380] The postgres string indexer now is able to rate limit writes using four sentry options. If that happens, `None` is returned in place of an integer, and the FetchType is RATE_LIMITED. The kafka consumer/message processor explicitly checks for those `None` values and throws away every message that references a rate-limited string. It logs a Sentry error for every dropped message just because that's already what we do for other kinds of dropped messages. Rate limiting and quota management currently creates a ton of dataclasses and that probably wastes time. There are a ton of low-hanging fruits: * the return value of _construct_quotas could be globally cached, as long as the cache is wiped when the sentry options change. * the same Quota object (for global limits) is referenced from multiple RequestedQuota instances (one for each org). `sentry.ratelimits.sliding_windows` could check the `id()` of the quota (if there is no prefix override) to avoid computing and checking the same quota multiple times. An even lower hanging fruit is that we're fetching the same keys from Redis multiple times, because multiple organizations (and therefore multiple RequestedQuota instances) adhere to the global quota. So that's been fixed, but as for the rest let's wait for timings from prod. * fix typo * fix typing * apply review feedback * fix typing, add test * fix tests * apply review feedback about logging too many msgs * fix leaking option in test * sike, more test failures
18,908
0
631
294
75
92,391
137
sentry
23
tests/sentry/sentry_metrics/test_postgres_indexer.py
Python
46
{ "docstring": "\n Assert that rate limits per-org and globally are applied at all.\n\n Since we don't have control over ordering in sets/dicts, we have no\n control over which string gets rate-limited. That makes assertions\n quite awkward and imprecise.\n ", "language": "en", "n_whitespaces": 72, "n_words": 36, "vocab_size": 31 }
https://github.com/getsentry/sentry.git
3
register_for_auto_class
def register_for_auto_class(cls, auto_class="FlaxAutoModel"): if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f"{auto_class} is not a valid auto class.") cls._auto_class = auto_class # To update the docstring, we need to copy the method, otherwise we change the original docstring. FlaxPreTrainedModel.push_to_hub = copy_func(FlaxPreTrainedModel.push_to_hub) FlaxPreTrainedModel.push_to_hub.__doc__ = FlaxPreTrainedModel.push_to_hub.__doc__.format( object="model", object_class="FlaxAutoModel", object_files="model checkpoint" )
44b21f117bcf71e3d88a11c3523c94b27949fdbf
11
modeling_flax_utils.py
150
Save code of registered custom models (#15379) * Allow dynamic modules to use relative imports * Work for configs * Fix last merge conflict * Save code of registered custom objects * Map strings to strings * Fix test * Add tokenizer * Rework tests * Tests * Ignore fixtures py files for tests * Tokenizer test + fix collection * With full path * Rework integration * Fix typo * Remove changes in conftest * Test for tokenizers * Add documentation * Update docs/source/custom_models.mdx Co-authored-by: Lysandre Debut <lysandre@huggingface.co> * Add file structure and file content * Add more doc * Style * Update docs/source/custom_models.mdx Co-authored-by: Suraj Patil <surajp815@gmail.com> * Address review comments Co-authored-by: Lysandre Debut <lysandre@huggingface.co> Co-authored-by: Suraj Patil <surajp815@gmail.com>
6,342
0
113
52
47
34,808
57
transformers
21
src/transformers/modeling_flax_utils.py
Python
7
{ "docstring": "\n Register this class with a given auto class. This should only be used for custom models as the ones in the\n library are already mapped with an auto class.\n\n Args:\n auto_class (`str` or `type`, *optional*, defaults to `\"FlaxAutoModel\"`):\n The auto class to register this new model with.\n ", "language": "en", "n_whitespaces": 102, "n_words": 47, "vocab_size": 39 }
https://github.com/huggingface/transformers.git
5
SearchBackend
def SearchBackend(params): if connection.vendor == 'postgresql': from .postgres.postgres import PostgresSearchBackend return PostgresSearchBackend(params) elif connection.vendor == 'mysql': from .mysql.mysql import MySQLSearchBackend return MySQLSearchBackend(params) elif connection.vendor == 'sqlite': from .sqlite.utils import fts5_available if fts5_available(): from .sqlite.sqlite import SQLiteSearchBackend return SQLiteSearchBackend(params) else: from .fallback import DatabaseSearchBackend return DatabaseSearchBackend(params) else: from .fallback import DatabaseSearchBackend return DatabaseSearchBackend(params)
4248d406c011d6ba6207bb0e0e9b885813d961be
13
__init__.py
177
Test for presence of fts5 extension in sqlite backend initialisation and migration
15,513
0
174
99
28
70,498
52
wagtail
14
wagtail/search/backends/database/__init__.py
Python
18
{ "docstring": "\n Returns the appropriate search backend for the current 'default' database system\n ", "language": "en", "n_whitespaces": 18, "n_words": 11, "vocab_size": 10 }
https://github.com/wagtail/wagtail.git
2
test_nca_feature_names_out
def test_nca_feature_names_out(): X = iris_data y = iris_target est = NeighborhoodComponentsAnalysis().fit(X, y) names_out = est.get_feature_names_out() class_name_lower = est.__class__.__name__.lower() expected_names_out = np.array( [f"{class_name_lower}{i}" for i in range(est.components_.shape[1])], dtype=object, ) assert_array_equal(names_out, expected_names_out)
330881a21ca48c543cc8a67aa0d4e4c1dc1001ab
15
test_nca.py
132
ENH Adds get_feature_names_out to neighbors module (#22212) Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org>
75,280
0
71
77
25
258,535
30
scikit-learn
24
sklearn/neighbors/tests/test_nca.py
Python
11
{ "docstring": "Check `get_feature_names_out` for `NeighborhoodComponentsAnalysis`.", "language": "en", "n_whitespaces": 3, "n_words": 4, "vocab_size": 4 }
https://github.com/scikit-learn/scikit-learn.git
1
test_query_with_bool_in_params
def test_query_with_bool_in_params(client): with mock.patch("rest_api.controller.search.query_pipeline") as mocked_pipeline: # `run` must return a dictionary containing a `query` key mocked_pipeline.run.return_value = {"query": TEST_QUERY} request_body = { "query": TEST_QUERY, "params": {"debug": True, "Retriever": {"top_k": 5}, "Reader": {"top_k": 3}}, } response = client.post(url="/query", json=request_body) assert 200 == response.status_code response_json = response.json() assert response_json["documents"] == [] assert response_json["answers"] == []
632cd1c141a8b485c6ef8695685d2d8eef3ca50f
15
test_rest_api.py
185
Allow values that are not dictionaries in the request params in the `/search` endpoint (#2720) * let params contain something else than dictionaries * rewrite the test same style as the main branch
75,098
0
145
102
44
257,593
54
haystack
15
rest_api/test/test_rest_api.py
Python
12
{ "docstring": "\n Ensure items of params can be other types than dictionary, see\n https://github.com/deepset-ai/haystack/issues/2656\n ", "language": "en", "n_whitespaces": 22, "n_words": 12, "vocab_size": 12 }
https://github.com/deepset-ai/haystack.git
2
_setup_amd
def _setup_amd(cls, arguments): logger.debug("Setting up for AMD") try: import plaidml # noqa pylint:disable=unused-import,import-outside-toplevel except ImportError: logger.error("PlaidML not found. Run `pip install plaidml-keras` for AMD support") return False from lib.gpu_stats import setup_plaidml # pylint:disable=import-outside-toplevel setup_plaidml(arguments.loglevel, arguments.exclude_gpus) logger.debug("setup up for PlaidML") return True
bdbbad4d310fb606b6f412aa81e9f57ccd994e97
11
launcher.py
96
Refactor lib.gpu_stats (#1218) * inital gpu_stats refactor * Add dummy CPU Backend * Update Sphinx documentation
19,995
0
132
53
35
100,531
41
faceswap
13
lib/cli/launcher.py
Python
11
{ "docstring": " Test for plaidml and perform setup for AMD.\n\n Parameters\n ----------\n arguments: :class:`argparse.Namespace`\n The command line arguments passed to Faceswap.\n ", "language": "en", "n_whitespaces": 59, "n_words": 19, "vocab_size": 18 }
https://github.com/deepfakes/faceswap.git
3
is_arborescence
def is_arborescence(G): return is_tree(G) and max(d for n, d in G.in_degree()) <= 1 @nx.utils.not_implemented_for("undirected")
5a7985fc41bc0c686c035de43c66cf4fb5fcc94f
@nx.utils.not_implemented_for("undirected")
12
recognition.py
64
Added examples in tournament and tree functions (#5536) * examples * examples * examples * Example changed * improved styling * revised * edge labels * improved styling * spacing * error testing * examples * styling * add_nodes removed * spaceing * spacing * spacing * added examples * removed random_tournament example * added examples in branching and aborescence * error removed
41,959
1
19
28
14
176,550
14
networkx
10
networkx/algorithms/tree/recognition.py
Python
2
{ "docstring": "\n Returns True if `G` is an arborescence.\n\n An arborescence is a directed tree with maximum in-degree equal to 1.\n\n Parameters\n ----------\n G : graph\n The graph to test.\n\n Returns\n -------\n b : bool\n A boolean that is True if `G` is an arborescence.\n\n Examples\n --------\n >>> G = nx.DiGraph([(0, 1), (0, 2), (2, 3), (3, 4)])\n >>> nx.is_arborescence(G)\n True\n >>> G.remove_edge(0, 1)\n >>> G.add_edge(1, 2) # maximum in-degree is 2\n >>> nx.is_arborescence(G)\n False\n\n Notes\n -----\n In another convention, an arborescence is known as a *tree*.\n\n See Also\n --------\n is_tree\n\n ", "language": "en", "n_whitespaces": 177, "n_words": 89, "vocab_size": 62 }
https://github.com/networkx/networkx.git
1
test_public_receipt_can_override_private
def test_public_receipt_can_override_private(self) -> None: # Send a message as the first user res = self.helper.send(self.room_id, body="hello", tok=self.tok) # Send a private read receipt channel = self.make_request( "POST", f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res['event_id']}", {}, access_token=self.tok2, ) self.assertEqual(channel.code, 200) self.assertIsNone(self._get_read_receipt()) # Send a public read receipt channel = self.make_request( "POST", f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ}/{res['event_id']}", {}, access_token=self.tok2, ) self.assertEqual(channel.code, 200) # Test that we did override the private read receipt self.assertNotEqual(self._get_read_receipt(), None)
116a4c8340b729ffde43be33df24d417384cb28b
12
test_sync.py
232
Implement changes to MSC2285 (hidden read receipts) (#12168) * Changes hidden read receipts to be a separate receipt type (instead of a field on `m.read`). * Updates the `/receipts` endpoint to accept `m.fully_read`.
72,135
0
248
114
39
248,167
62
synapse
20
tests/rest/client/test_sync.py
Python
22
{ "docstring": "\n Sending a public read receipt to the same event which has a private read\n receipt should cause that receipt to become public.\n ", "language": "en", "n_whitespaces": 44, "n_words": 22, "vocab_size": 17 }
https://github.com/matrix-org/synapse.git
2
_print_pgf_to_fh
def _print_pgf_to_fh(self, fh, *, bbox_inches_restore=None): header_text = # append the preamble used by the backend as a comment for debugging header_info_preamble = ["%% Matplotlib used the following preamble"] for line in _get_preamble().splitlines(): header_info_preamble.append("%% " + line) header_info_preamble.append("%%") header_info_preamble = "\n".join(header_info_preamble) # get figure size in inch w, h = self.figure.get_figwidth(), self.figure.get_figheight() dpi = self.figure.dpi # create pgfpicture environment and write the pgf code fh.write(header_text) fh.write(header_info_preamble) fh.write("\n") _writeln(fh, r"\begingroup") _writeln(fh, r"\makeatletter") _writeln(fh, r"\begin{pgfpicture}") _writeln(fh, r"\pgfpathrectangle{\pgfpointorigin}{\pgfqpoint{%fin}{%fin}}" % (w, h)) _writeln(fh, r"\pgfusepath{use as bounding box, clip}") renderer = MixedModeRenderer(self.figure, w, h, dpi, RendererPgf(self.figure, fh), bbox_inches_restore=bbox_inches_restore) self.figure.draw(renderer) # end the pgfpicture environment _writeln(fh, r"\end{pgfpicture}") _writeln(fh, r"\makeatother") _writeln(fh, r"\endgroup")
7bafb8be7c6e81180e9518a91d10da9422321a0c
11
backend_pgf.py
327
Deprecate internal use of get/set dpi
23,310
0
389
195
77
108,693
104
matplotlib
23
lib/matplotlib/backends/backend_pgf.py
Python
46
{ "docstring": "%% Creator: Matplotlib, PGF backend\n%%\n%% To include the figure in your LaTeX document, write\n%% \\\\input{<filename>.pgf}\n%%\n%% Make sure the required packages are loaded in your preamble\n%% \\\\usepackage{pgf}\n%%\n%% Also ensure that all the required font packages are loaded; for instance,\n%% the lmodern package is sometimes necessary when using math font.\n%% \\\\usepackage{lmodern}\n%%\n%% Figures using additional raster images can only be included by \\\\input if\n%% they are in the same directory as the main LaTeX file. For loading figures\n%% from other directories you can use the `import` package\n%% \\\\usepackage{import}\n%%\n%% and then include the figures with\n%% \\\\import{<path to file>}{<filename>.pgf}\n%%\n", "language": "en", "n_whitespaces": 103, "n_words": 113, "vocab_size": 74 }
https://github.com/matplotlib/matplotlib.git
2
to_pandas_refs
def to_pandas_refs(self) -> List[ObjectRef["pandas.DataFrame"]]: block_to_df = cached_remote_fn(_block_to_df) return [block_to_df.remote(block) for block in self._blocks.get_blocks()]
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
10
dataset.py
65
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,316
0
34
39
13
130,580
13
ray
11
python/ray/data/dataset.py
Python
15
{ "docstring": "Convert this dataset into a distributed set of Pandas dataframes.\n\n This is only supported for datasets convertible to Arrow records.\n This function induces a copy of the data. For zero-copy access to the\n underlying data, consider using ``.to_arrow()`` or\n ``.get_internal_block_refs()``.\n\n Time complexity: O(dataset size / parallelism)\n\n Returns:\n A list of remote Pandas dataframes created from this dataset.\n ", "language": "en", "n_whitespaces": 117, "n_words": 57, "vocab_size": 49 }
https://github.com/ray-project/ray.git
8
build
def build(self, var_list, exclude_from_weight_decay=None): super().build(var_list) if hasattr(self, "_built") and self._built: return self._built = True if not hasattr(self, "_exclude_from_weight_decay"): self._exclude_from_weight_decay = exclude_from_weight_decay or [] self._momentums = [] self._velocities = [] for var in var_list: self._momentums.append( self.add_variable_from_reference( model_variable=var, variable_name="m" ) ) self._velocities.append( self.add_variable_from_reference( model_variable=var, variable_name="v" ) ) if self.amsgrad: self._velocity_hats = [] for var in var_list: self._velocity_hats.append( self.add_variable_from_reference( model_variable=var, variable_name="vhat" ) )
84afc5193d38057e2e2badf9c889ea87d80d8fbf
15
adamw.py
238
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,348
0
400
145
37
275,252
60
keras
17
keras/optimizers/optimizer_experimental/adamw.py
Python
28
{ "docstring": "Initialize optimizer variables.\n\n AdamW optimizer has 3 types of variables: momentums, velocities and\n velocity_hat (only set when amsgrad is applied),\n\n Args:\n var_list: list of model variables to build AdamW variables on.\n exclude_from_weight_decay: list of model variables that will be excluded\n from weight decay.\n ", "language": "en", "n_whitespaces": 100, "n_words": 43, "vocab_size": 35 }
https://github.com/keras-team/keras.git
3
validate_input
async def validate_input(data): base = Alpha2Base(data["host"]) try: await base.update_data() except (aiohttp.client_exceptions.ClientConnectorError, asyncio.TimeoutError): return {"error": "cannot_connect"} except Exception: # pylint: disable=broad-except _LOGGER.exception("Unexpected exception") return {"error": "unknown"} # Return info that you want to store in the config entry. return {"title": base.name}
243d003acc11d638feb3867410c3cbb1987520bc
11
config_flow.py
123
Add Moehlenhoff Alpha2 underfloor heating system integration (#42771) * Add Moehlenhoff Alpha2 underfloor heating system integration * isort changes * flake8 changes * Do not exclude config_flow.py * pylint changes * Add config_flow test * correct requirements_test_all.txt * more tests * Update test description * Test connection and catch TimeoutError in async_setup_entry * Add version to manifest file * Remove version from manifest file * Replace tests.async_mock.patch by unittest.mock.patch * Update moehlenhoff-alpha2 to version 1.0.1 * Update requirements for moehlenhoff-alpha2 1.0.1 * Update moehlenhoff-alpha2 to 1.0.2 * Use async_setup_platforms * Use async_unload_platforms * Separate connection and devices for each entry_id * Use async_track_time_interval to schedule updates * Check if input is valid before checking uniqueness * Move Exception handling to validate_input * Catch aiohttp.client_exceptions.ClientConnectorError * Remove translation files * Mock TimeoutError * Fix data update * Replace current callback implementation with ha dispatcher * Return False in should_poll * Remove unused argument * Remove CONNECTION_CLASS * Use _async_current_entries * Call async_schedule_update_ha_state after data update * Remove unneeded async_setup Co-authored-by: Milan Meulemans <milan.meulemans@live.be> * Remove unneeded async_setup_platform Co-authored-by: Milan Meulemans <milan.meulemans@live.be> * Set Schema attribute host required Co-authored-by: Milan Meulemans <milan.meulemans@live.be> * Remove unused Exception class Co-authored-by: Milan Meulemans <milan.meulemans@live.be> * Update manifest.json Co-authored-by: Milan Meulemans <milan.meulemans@live.be> * pylint constructor return type None * Replace properties by class variables * use pass instead of return * Remove unused sync update method * remove property hvac_action * remove pass * rework exception handling * Update homeassistant/components/moehlenhoff_alpha2/config_flow.py Co-authored-by: Milan Meulemans <milan.meulemans@live.be> * Correct indentation * catch Exception in validate_input * Replace HomeAssistantType with HomeAssistant * Update to moehlenhoff-alpha2 1.0.3 * Allow to switch between heating and cooling mode * Update moehlenhoff-alpha2 to version 1.0.4 * Update heatarea data after setting target temperature * Support hvac_action * Fix heatarea update with multiple bases * Update data after setting preset mode * Use custom preset modes like defined by device * Fix config flow test * Fix test_duplicate_error * Rename property to extra_state_attributes Rename property device_state_attributes to extra_state_attributes and return lowercase keys in dict. * Refactor using DataUpdateCoordinator * Remove _attr_should_poll * Raise HomeAssistantError on communication error Catch HTTPError instead of broad except and reraise as HomeAssistantError * Change DataUpdateCoordinator name to alpha2_base * Refresh coordinator before setting data * Raise ValueError on invalid heat area mode * Rename heatarea to heat_area * Set type annotation in class attribute * Move coordinator to top * Move exception handling to the coordinator * Use heat_area_id directly * Sore get_cooling() result into local var * Add explanation of status attributes and remove BLOCK_HC * Fix pylint warnings * from __future__ import annotations * Use Platform Enum * Move data handling to coordinator * Remove property extra_state_attributes * Add missing annotations * Update moehlenhoff-alpha2 to version 1.1.2 * Rework tests based on the scaffold template * Set also heat/cool/day/night temp with target temp * Remove unneeded code from tests Co-authored-by: Milan Meulemans <milan.meulemans@live.be>
111,624
0
90
65
35
312,993
40
core
14
homeassistant/components/moehlenhoff_alpha2/config_flow.py
Python
10
{ "docstring": "Validate the user input allows us to connect.\n\n Data has the keys from DATA_SCHEMA with values provided by the user.\n ", "language": "en", "n_whitespaces": 26, "n_words": 20, "vocab_size": 18 }
https://github.com/home-assistant/core.git
1
_update
def _update(self) -> int: retval = self.update() logger.debug("Updated %s: %s", self.__class__.__name__, retval) return retval
e5356a417e7c2124e75c4a2994ed604fc0a3cc74
9
alignments.py
53
Alignments update: - Store face embeddings in PNG header when sorting - typing + refactor - Update alignments keys for 'identity' and 'video_meta' + bump to v2.3 - General typing fixes
21,086
0
42
31
13
101,682
14
faceswap
9
lib/align/alignments.py
Python
12
{ "docstring": " Calls the child's :func:`update` method, logs output and sets the\n :attr:`is_updated` flag\n\n Returns\n -------\n int\n The number of items that were updated\n ", "language": "en", "n_whitespaces": 69, "n_words": 22, "vocab_size": 21 }
https://github.com/deepfakes/faceswap.git
1
test_edgeql_scope_for_with_computable_01
async def test_edgeql_scope_for_with_computable_01(self): await self.assert_query_result( r, tb.bag([ {"name": "Alice", "namelen": 5}, {"name": "Bob", "namelen": 3}, {"name": "Carol", "namelen": 5}, {"name": "Dave", "namelen": 4} ]) )
0dada08f4eedb104bfa40932b576e44d82218547
14
test_edgeql_scope.py
111
Always include the definition context namespace in computable contexts (#3331) We need to include the *original* source namespace in our ctx namespace when compiling computables. The current mechanism of trying to look up in view_sets or failing that using the source namespace from the computable use, but this was failing to find it in some cases with FOR. Fix this by instead directly pulling in the namespace from qlctx. The inclusion of qlctx's namespace nicely allows us to ditch so later logic as well. Additionally we need to merge the namespace into *both* sides in get_view_map_remapping, to handle cases like referencing a `FOR` variable where the current ns doesn't get merged in. Fixes #3323.
41,718
0
131
60
18
176,142
25
edgedb
5
tests/test_edgeql_scope.py
Python
20
{ "docstring": "\n with props := (\n for h in User union (\n select h {namelen := len(h.name)}\n )\n )\n select props {\n name,\n namelen\n };\n ", "language": "en", "n_whitespaces": 146, "n_words": 23, "vocab_size": 17 }
https://github.com/edgedb/edgedb.git
5
transform
def transform(self, X): check_is_fitted(self) if self.n_neighbors is not None: distances, indices = self.nbrs_.kneighbors(X, return_distance=True) else: distances, indices = self.nbrs_.radius_neighbors(X, return_distance=True) # Create the graph of shortest distances from X to # training data via the nearest neighbors of X. # This can be done as a single array operation, but it potentially # takes a lot of memory. To avoid that, use a loop: n_samples_fit = self.nbrs_.n_samples_fit_ n_queries = distances.shape[0] if hasattr(X, "dtype") and X.dtype == np.float32: dtype = np.float32 else: dtype = np.float64 G_X = np.zeros((n_queries, n_samples_fit), dtype) for i in range(n_queries): G_X[i] = np.min(self.dist_matrix_[indices[i]] + distances[i][:, None], 0) G_X **= 2 G_X *= -0.5 return self.kernel_pca_.transform(G_X)
e2e7d75d1e81ce3b67257fcc4cce32ab2d2acd2f
13
_isomap.py
261
MAINT Extend dtype preserved common test to check transform (#24982)
76,893
0
283
170
85
261,613
108
scikit-learn
27
sklearn/manifold/_isomap.py
Python
18
{ "docstring": "Transform X.\n\n This is implemented by linking the points X into the graph of geodesic\n distances of the training data. First the `n_neighbors` nearest\n neighbors of X are found in the training data, and from these the\n shortest geodesic distances from each point in X to each point in\n the training data are computed in order to construct the kernel.\n The embedding of X is the projection of this kernel onto the\n embedding vectors of the training set.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_queries, n_features)\n If neighbors_algorithm='precomputed', X is assumed to be a\n distance matrix or a sparse graph of shape\n (n_queries, n_samples_fit).\n\n Returns\n -------\n X_new : array-like, shape (n_queries, n_components)\n X transformed in the new space.\n ", "language": "en", "n_whitespaces": 262, "n_words": 120, "vocab_size": 71 }
https://github.com/scikit-learn/scikit-learn.git
2
check_status
def check_status(self): status = { 'success': False } try: con = self.__connect() with closing(con) as con: status['success'] = con.is_connected() except Exception as e: log.error(f'Error connecting to MySQL {self.database}, {e}!') status['error'] = e return status
ef0262e95a1e1a5403896088ca3938adb895a8d6
13
mysql_handler.py
122
Add mysql handler
25,250
0
146
60
29
114,694
34
mindsdb
12
mindsdb/integrations/mysql_handler/mysql_handler.py
Python
12
{ "docstring": "\n Check the connection of the MySQL database\n :return: success status and error message if error occurs\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 14 }
https://github.com/mindsdb/mindsdb.git
2
execute
def execute(): auto_email_reports = frappe.db.get_values( "Auto Email Report", {"report": "Requested Items to Order"}, ["name"] ) for auto_email_report in auto_email_reports: frappe.delete_doc("Auto Email Report", auto_email_report[0]) frappe.db.sql( )
494bd9ef78313436f0424b918f200dab8fc7c20b
11
delete_report_requested_items_to_order.py
89
style: format code with black
14,316
0
17
49
22
66,747
25
erpnext
8
erpnext/patches/v13_0/delete_report_requested_items_to_order.py
Python
12
{ "docstring": "Check for one or multiple Auto Email Reports and delete\n\t\tDELETE FROM `tabReport`\n\t\tWHERE name = 'Requested Items to Order'\n\t", "language": "en", "n_whitespaces": 17, "n_words": 20, "vocab_size": 20 }
https://github.com/frappe/erpnext.git
9
check_send_to_ereader
def check_send_to_ereader(entry): formats = list() book_formats = list() if len(entry.data): for ele in iter(entry.data): if ele.uncompressed_size < config.mail_size: formats.append(ele.format) if 'EPUB' in formats: book_formats.append({'format': 'Epub', 'convert': 0, 'text': _('Send %(format)s to E-Reader', format='Epub')}) if 'MOBI' in formats: book_formats.append({'format': 'Mobi', 'convert': 0, 'text': _('Send %(format)s to E-Reader', format='Mobi')}) if 'PDF' in formats: book_formats.append({'format': 'Pdf', 'convert': 0, 'text': _('Send %(format)s to E-Reader', format='Pdf')}) if 'AZW' in formats: book_formats.append({'format': 'Azw', 'convert': 0, 'text': _('Send %(format)s to E-Reader', format='Azw')}) if config.config_converterpath: book_formats.extend(check_send_to_ereader_with_converter(formats)) return book_formats else: log.error(u'Cannot find book entry %d', entry.id) return None # Check if a reader is existing for any of the book formats, if not, return empty list, otherwise return # list with supported formats
fbac3e38ac116855b930ee60fb3c997337ae17b7
17
helper.py
370
Eenabled send epubs to E-Reader devices
40,835
0
527
202
65
173,321
114
calibre-web
21
cps/helper.py
Python
29
{ "docstring": "\n returns all available book formats for sending to E-Reader\n ", "language": "en", "n_whitespaces": 20, "n_words": 9, "vocab_size": 9 }
https://github.com/janeczku/calibre-web.git
2
test_async_on_entire_period
async def test_async_on_entire_period(recorder_mock, hass): start_time = dt_util.utcnow() - timedelta(minutes=60) t0 = start_time + timedelta(minutes=20) t1 = t0 + timedelta(minutes=10) t2 = t1 + timedelta(minutes=10) # Start t0 t1 t2 End # |--20min--|--20min--|--10min--|--10min--| # |---on----|--off----|---on----|--off----|
31a787558fd312331b55e5c2c4b33341fc3601fc
10
test_sensor.py
90
Ensure recorder test fixture is setup before hass fixture (#80528) * Ensure recorder test fixture is setup before hass fixture * Adjust more tests
88,532
0
83
284
20
289,390
34
core
11
tests/components/history_stats/test_sensor.py
Python
62
{ "docstring": "Test the history statistics sensor measuring as on the entire period.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
https://github.com/home-assistant/core.git
1
on_post_template
def on_post_template(self, output_content, template_name, config): return output_content # Page events
f79b34d174e41084391868e7b503f5c61b8b1bdf
6
plugins.py
23
Move plugin events docs into source code + refactor * Create real (no-op) methods for each event in the base class. * Refactor event dispatcher to not check for methods' existence, instead just call them. * Move documentation from Markdown into docstrings of these methods. * Activate the 'mkdocstrings' plugin. * Use 'mkdocstrings' to insert documentation from those docstrings into the site.
57,306
0
27
14
10
224,461
10
mkdocs
5
mkdocs/plugins.py
Python
2
{ "docstring": "\n The `post_template` event is called after the template is rendered, but before\n it is written to disc and can be used to alter the output of the template.\n If an empty string is returned, the template is skipped and nothing is is\n written to disc.\n\n Parameters:\n output_content: output of rendered template as string\n template_name: string filename of template\n config: global configuration object\n\n Returns:\n output of rendered template as string\n ", "language": "en", "n_whitespaces": 163, "n_words": 69, "vocab_size": 42 }
https://github.com/mkdocs/mkdocs.git
1
frame_has_faces
def frame_has_faces(self, frame_name): retval = bool(self._data.get(frame_name, {}).get("faces", [])) logger.trace("'%s': %s", frame_name, retval) return retval
5e73437be47f2410439a3c6716de96354e6a0c94
13
alignments.py
73
lib.align updates: - alignments.py - Add typed dicts for imported alignments - Explicitly check for presence of thumb value in alignments dict - linting - detected_face.py - Typing - Linting - Legacy support for pre-aligned face - Update dependencies to new property names
20,638
0
42
44
13
101,218
14
faceswap
9
lib/align/alignments.py
Python
4
{ "docstring": " Check whether a given frame_name exists within the alignments :attr:`data` and contains\n at least 1 face.\n\n Parameters\n ----------\n frame_name: str\n The frame name to check. This should be the base name of the frame, not the full path\n\n Returns\n -------\n bool\n ``True`` if the given frame_name exists within the alignments :attr:`data` and has at\n least 1 face associated with it, otherwise ``False``\n ", "language": "en", "n_whitespaces": 152, "n_words": 62, "vocab_size": 46 }
https://github.com/deepfakes/faceswap.git
11
smart_resize
def smart_resize(x, size, interpolation='bilinear'): if len(size) != 2: raise ValueError('Expected `size` to be a tuple of 2 integers, ' f'but got: {size}.') img = tf.convert_to_tensor(x) if img.shape.rank is not None: if img.shape.rank < 3 or img.shape.rank > 4: raise ValueError( 'Expected an image array with shape `(height, width, channels)`, ' 'or `(batch_size, height, width, channels)`, but ' f'got input with incorrect rank, of shape {img.shape}.') shape = tf.shape(img) height, width = shape[-3], shape[-2] target_height, target_width = size if img.shape.rank is not None: static_num_channels = img.shape[-1] else: static_num_channels = None crop_height = tf.cast( tf.cast(width * target_height, 'float32') / target_width, 'int32') crop_width = tf.cast( tf.cast(height * target_width, 'float32') / target_height, 'int32') # Set back to input height / width if crop_height / crop_width is not smaller. crop_height = tf.minimum(height, crop_height) crop_width = tf.minimum(width, crop_width) crop_box_hstart = tf.cast( tf.cast(height - crop_height, 'float32') / 2, 'int32') crop_box_wstart = tf.cast(tf.cast(width - crop_width, 'float32') / 2, 'int32') if img.shape.rank == 4: crop_box_start = tf.stack([0, crop_box_hstart, crop_box_wstart, 0]) crop_box_size = tf.stack([-1, crop_height, crop_width, -1]) else: crop_box_start = tf.stack([crop_box_hstart, crop_box_wstart, 0]) crop_box_size = tf.stack([crop_height, crop_width, -1]) img = tf.slice(img, crop_box_start, crop_box_size) img = tf.image.resize(images=img, size=size, method=interpolation) # Apparent bug in resize_images_v2 may cause shape to be lost if img.shape.rank is not None: if img.shape.rank == 4: img.set_shape((None, None, None, static_num_channels)) if img.shape.rank == 3: img.set_shape((None, None, static_num_channels)) if isinstance(x, np.ndarray): return img.numpy() return img
9dc9a78cc6502226775a99725c654fab3298aa5f
15
image_utils.py
646
Expose all utilities in `keras.utils.__init__.py`, and resolve the hourglass import issues that led to the creation of an extraneous `all_utils.py` file / library. PiperOrigin-RevId: 435725558
79,905
0
360
404
122
269,108
226
keras
35
keras/utils/image_utils.py
Python
43
{ "docstring": "Resize images to a target size without aspect ratio distortion.\n\n Warning: `tf.keras.preprocessing.image.smart_resize` is not recommended for\n new code. Prefer `tf.keras.layers.Resizing`, which provides the same\n functionality as a preprocessing layer and adds `tf.RaggedTensor` support. See\n the [preprocessing layer guide](\n https://www.tensorflow.org/guide/keras/preprocessing_layers)\n for an overview of preprocessing layers.\n\n TensorFlow image datasets typically yield images that have each a different\n size. However, these images need to be batched before they can be\n processed by Keras layers. To be batched, images need to share the same height\n and width.\n\n You could simply do:\n\n ```python\n size = (200, 200)\n ds = ds.map(lambda img: tf.image.resize(img, size))\n ```\n\n However, if you do this, you distort the aspect ratio of your images, since\n in general they do not all have the same aspect ratio as `size`. This is\n fine in many cases, but not always (e.g. for GANs this can be a problem).\n\n Note that passing the argument `preserve_aspect_ratio=True` to `resize`\n will preserve the aspect ratio, but at the cost of no longer respecting the\n provided target size. Because `tf.image.resize` doesn't crop images,\n your output images will still have different sizes.\n\n This calls for:\n\n ```python\n size = (200, 200)\n ds = ds.map(lambda img: smart_resize(img, size))\n ```\n\n Your output images will actually be `(200, 200)`, and will not be distorted.\n Instead, the parts of the image that do not fit within the target size\n get cropped out.\n\n The resizing process is:\n\n 1. Take the largest centered crop of the image that has the same aspect ratio\n as the target size. For instance, if `size=(200, 200)` and the input image has\n size `(340, 500)`, we take a crop of `(340, 340)` centered along the width.\n 2. Resize the cropped image to the target size. In the example above,\n we resize the `(340, 340)` crop to `(200, 200)`.\n\n Args:\n x: Input image or batch of images (as a tensor or NumPy array). Must be in\n format `(height, width, channels)` or `(batch_size, height, width,\n channels)`.\n size: Tuple of `(height, width)` integer. Target size.\n interpolation: String, interpolation to use for resizing. Defaults to\n `'bilinear'`. Supports `bilinear`, `nearest`, `bicubic`, `area`,\n `lanczos3`, `lanczos5`, `gaussian`, `mitchellcubic`.\n\n Returns:\n Array with shape `(size[0], size[1], channels)`. If the input image was a\n NumPy array, the output is a NumPy array, and if it was a TF tensor,\n the output is a TF tensor.\n ", "language": "en", "n_whitespaces": 460, "n_words": 383, "vocab_size": 215 }
https://github.com/keras-team/keras.git
7
_port_scan_icmp
def _port_scan_icmp(self, port): ret = None # Create the ping command # Use the system ping command because it already have the sticky bit set # Python can not create ICMP packet with non root right if WINDOWS: timeout_opt = '-w' count_opt = '-n' elif MACOS or BSD: timeout_opt = '-t' count_opt = '-c' else: # Linux and co... timeout_opt = '-W' count_opt = '-c' # Build the command line # Note: Only string are allowed cmd = [ 'ping', count_opt, '1', timeout_opt, str(self._resolv_name(port['timeout'])), self._resolv_name(port['host']), ] fnull = open(os.devnull, 'w') try: counter = Counter() ret = subprocess.check_call(cmd, stdout=fnull, stderr=fnull, close_fds=True) if ret == 0: port['status'] = counter.get() else: port['status'] = False except subprocess.CalledProcessError: # Correct issue #1084: No Offline status for timed-out ports port['status'] = False except Exception as e: logger.debug("{}: Error while pinging host {} ({})".format(self.plugin_name, port['host'], e)) fnull.close() return ret
be927fda3dc4118b77ad0f88d5e6deb652a5f5b3
14
glances_ports.py
314
Prepare memory leak test. Not active for the moment
15,135
0
518
177
103
69,882
142
glances
32
glances/plugins/glances_ports.py
Python
33
{ "docstring": "Scan the (ICMP) port structure (dict) and update the status key.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
https://github.com/nicolargo/glances.git
2
shutdown
async def shutdown(self) -> None: # Stop polling/writing Console dimensions to clients self.shutdown_event.set() await self.size_poll_task # We're shutting down the server, so inform all connected clients for client in self.clients: await client.close() self.clients.clear()
36d7973c7c6792fd1100d5512140a4701b53ba3d
10
service.py
71
Code review actions
44,013
0
93
39
30
182,936
33
textual
9
src/textual/devtools/service.py
Python
7
{ "docstring": "Stop server async tasks and clean up all client handlers", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/Textualize/textual.git
7
_intersection
def _intersection(self, other, sort): # For IntervalIndex we also know other.inclusive == self.inclusive if self.left.is_unique and self.right.is_unique: taken = self._intersection_unique(other) elif other.left.is_unique and other.right.is_unique and self.isna().sum() <= 1: # Swap other/self if other is unique and self does not have # multiple NaNs taken = other._intersection_unique(self) else: # duplicates taken = self._intersection_non_unique(other) if sort is None: taken = taken.sort_values() return taken
7e23a37e1c5bda81234801a6584563e2880769eb
12
interval.py
147
ENH: consistency of input args for boundaries - Interval (#46522)
39,853
0
187
88
45
166,669
61
pandas
13
pandas/core/indexes/interval.py
Python
10
{ "docstring": "\n intersection specialized to the case with matching dtypes.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
https://github.com/pandas-dev/pandas.git
1
load_from_saved_model
def load_from_saved_model(saved_model_path, custom_objects=None): warnings.warn( "`tf.keras.experimental.load_from_saved_model` is deprecated" "and will be removed in a future version. " "Please switch to `tf.keras.models.load_model`.", stacklevel=2, ) # restore model topology from json string model_json_filepath = tf.io.gfile.join( tf.compat.as_bytes(saved_model_path), tf.compat.as_bytes(tf.saved_model.ASSETS_DIRECTORY), tf.compat.as_bytes(SAVED_MODEL_FILENAME_JSON), ) with tf.io.gfile.GFile(model_json_filepath, "r") as f: model_json = f.read() model = model_config.model_from_json( model_json, custom_objects=custom_objects ) # restore model weights checkpoint_prefix = tf.io.gfile.join( tf.compat.as_text(saved_model_path), tf.compat.as_text(tf.saved_model.VARIABLES_DIRECTORY), tf.compat.as_text(tf.saved_model.VARIABLES_FILENAME), ) model.load_weights(checkpoint_prefix) return model #### Directory / path helpers
84afc5193d38057e2e2badf9c889ea87d80d8fbf
12
saved_model_experimental.py
250
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,599
0
194
154
57
276,218
69
keras
28
keras/saving/saved_model_experimental.py
Python
24
{ "docstring": "Loads a keras Model from a SavedModel created by `export_saved_model()`.\n\n This function reinstantiates model state by:\n 1) loading model topology from json (this will eventually come\n from metagraph).\n 2) loading model weights from checkpoint.\n\n Example:\n\n ```python\n import tensorflow as tf\n\n # Create a tf.keras model.\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Dense(1, input_shape=[10]))\n model.summary()\n\n # Save the tf.keras model in the SavedModel format.\n path = '/tmp/simple_keras_model'\n tf.keras.experimental.export_saved_model(model, path)\n\n # Load the saved keras model back.\n new_model = tf.keras.experimental.load_from_saved_model(path)\n new_model.summary()\n ```\n\n Args:\n saved_model_path: a string specifying the path to an existing SavedModel.\n custom_objects: Optional dictionary mapping names\n (strings) to custom classes or functions to be\n considered during deserialization.\n\n Returns:\n a keras.Model instance.\n ", "language": "en", "n_whitespaces": 207, "n_words": 108, "vocab_size": 82 }
https://github.com/keras-team/keras.git
1
add_html_cache_busting
def add_html_cache_busting(app, pagename, templatename, context, doctree): from sphinx.builders.html import Stylesheet, JavaScript css_tag = context['css_tag'] js_tag = context['js_tag']
e87416b33b01f6fc4e3b2290d6e8a60e6ddb6e55
8
conf.py
57
DOC: Add cache busting to all static assets We have seen both in `/stable` and `/3.6.0`, some styling is broken because old CSS is cached. CSS might change from updating sphinx-gallery, mpl-sphinx-theme, pydata-sphinx-theme, etc. Adding a versioned query breaks the cache. It's a bit over-eager to base it on Matplotlib version and not the file contents (since those dependencies may not have updated), but this should work well enough.
23,644
0
29
52
16
109,556
17
matplotlib
13
doc/conf.py
Python
8
{ "docstring": "\n Add cache busting query on CSS and JavaScript assets.\n\n This adds the Matplotlib version as a query to the link reference in the\n HTML, if the path is not absolute (i.e., it comes from the `_static`\n directory) and doesn't already have a query.\n ", "language": "en", "n_whitespaces": 59, "n_words": 43, "vocab_size": 36 }
https://github.com/matplotlib/matplotlib.git
2
target_temperature_high
def target_temperature_high(self): if self.hvac_mode == HVACMode.HEAT_COOL: return self._econet.cool_set_point return None
04b9c9300645fb30541e2bf0881d35cc698a47c5
9
climate.py
39
Use climate enums in econet (#70633)
97,626
0
42
23
9
298,684
10
core
7
homeassistant/components/econet/climate.py
Python
4
{ "docstring": "Return the higher bound temperature we try to reach.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/home-assistant/core.git
10
_promote_shapes
def _promote_shapes(fun_name, *args): if len(args) < 2: return args else: shapes = [shape(arg) for arg in args] if _all(len(shapes[0]) == len(s) for s in shapes[1:]): return args # no need for rank promotion, so rely on lax promotion nonscalar_ranks = {len(shp) for shp in shapes if shp} if len(nonscalar_ranks) < 2: return args else: if config.jax_numpy_rank_promotion != "allow": _rank_promotion_warning_or_error(fun_name, shapes) result_shape = lax.broadcast_shapes(*shapes) return [broadcast_to(arg, result_shape) for arg, shp in zip(args, shapes)]
d9dcd1394aedf760272f14c3560cd5415495c28a
15
lax_numpy.py
203
djax: let make_jaxpr build dyn shape jaxprs
26,552
0
126
128
49
119,183
72
jax
19
jax/_src/numpy/lax_numpy.py
Python
15
{ "docstring": "Apply NumPy-style broadcasting, making args shape-compatible for lax.py.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/google/jax.git
2
_build_attention
def _build_attention(self, qkv_rank): if self._attention_axes is None: self._attention_axes = tuple(range(1, qkv_rank - 2)) else: self._attention_axes = tuple(self._attention_axes) # pragma: no cover ( self._dot_product_equation, self._combine_equation, attn_scores_rank, ) = _build_attention_equation(qkv_rank, attn_axes=self._attention_axes) norm_axes = tuple( range(attn_scores_rank - len(self._attention_axes), attn_scores_rank) ) self._masked_softmax = MaskedSoftmax( mask_expansion_axes=[1], normalization_axes=norm_axes ) self._dropout_layer = layers.Dropout(rate=self._dropout)
b97d27d2e916025f65fed751d54c089d4d4bd022
14
keras_layers.py
176
clean up imports
41,666
0
194
113
36
176,008
46
autokeras
22
autokeras/keras_layers.py
Python
17
{ "docstring": "Builds multi-head dot-product attention computations.\n\n This function builds attributes necessary for `_compute_attention` to\n costomize attention computation to replace the default dot-product\n attention.\n\n Args:\n qkv_rank: the rank of query, key, value tensors.\n ", "language": "en", "n_whitespaces": 75, "n_words": 31, "vocab_size": 27 }
https://github.com/keras-team/autokeras.git
1
test_visibility_when_disabled
def test_visibility_when_disabled(self) -> None: room_id = self.helper.create_room_as(self.user_id, tok=self.token) self.helper.send_state( room_id=room_id, event_type=EventTypes.Retention, body={"max_lifetime": one_day_ms}, tok=self.token, ) resp = self.helper.send(room_id=room_id, body="test", tok=self.token) self.reactor.advance(one_day_ms * 2 / 1000) self.get_event(room_id, resp["event_id"])
4cc4229cd7a55d2556c798fecbb1c9660dc821c8
11
test_retention.py
160
Prevent expired events from being filtered out when retention is disabled (#12611) Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Co-authored-by: Patrick Cloke <clokep@users.noreply.github.com>
72,235
0
120
102
25
248,358
27
synapse
19
tests/rest/client/test_retention.py
Python
12
{ "docstring": "Retention policies should be ignored when the retention feature is disabled.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/matrix-org/synapse.git
2
update_status_for_contracts
def update_status_for_contracts(): contracts = frappe.get_all( "Contract", filters={"is_signed": True, "docstatus": 1}, fields=["name", "start_date", "end_date"], ) for contract in contracts: status = get_status(contract.get("start_date"), contract.get("end_date")) frappe.db.set_value("Contract", contract.get("name"), "status", status)
494bd9ef78313436f0424b918f200dab8fc7c20b
13
contract.py
138
style: format code with black
13,992
0
17
78
25
65,709
26
erpnext
12
erpnext/crm/doctype/contract/contract.py
Python
9
{ "docstring": "\n\tRun the daily hook to update the statuses for all signed\n\tand submitted Contracts\n\t", "language": "en", "n_whitespaces": 12, "n_words": 14, "vocab_size": 13 }
https://github.com/frappe/erpnext.git
1
test_unique_id
async def test_unique_id(hass): await setup_test_entity( hass, { "unique": { "command_open": "echo open", "command_close": "echo close", "command_stop": "echo stop", "unique_id": "unique", }, "not_unique_1": { "command_open": "echo open", "command_close": "echo close", "command_stop": "echo stop", "unique_id": "not-so-unique-anymore", }, "not_unique_2": { "command_open": "echo open", "command_close": "echo close", "command_stop": "echo stop", "unique_id": "not-so-unique-anymore", }, }, ) assert len(hass.states.async_all()) == 2 ent_reg = entity_registry.async_get(hass) assert len(ent_reg.entities) == 2 assert ent_reg.async_get_entity_id("cover", "command_line", "unique") is not None assert ( ent_reg.async_get_entity_id("cover", "command_line", "not-so-unique-anymore") is not None )
d26275011ae4e8ba0a8dcdc2a7ef81b5911d3900
13
test_cover.py
264
Add unique_id configuration variable to command_line integration (#58596)
107,219
0
386
138
38
308,463
78
core
11
tests/components/command_line/test_cover.py
Python
32
{ "docstring": "Test unique_id option and if it only creates one cover per id.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
https://github.com/home-assistant/core.git
1
MultivariateT
def MultivariateT(syms, mu, sigma, v): return multivariate_rv(MultivariateTDistribution, syms, mu, sigma, v) #------------------------------------------------------------------------------- # Multivariate Normal Gamma distribution ---------------------------------------
7fe8e027ae1d7f683243c0229b961671a6cbb4c5
7
joint_rv_types.py
37
Improved some documentation in the stats module
48,612
0
22
25
16
197,534
18
sympy
7
sympy/stats/joint_rv_types.py
Python
2
{ "docstring": "\n Creates a joint random variable with multivariate T-distribution.\n\n Parameters\n ==========\n\n syms : A symbol/str\n For identifying the random variable.\n mu : A list/matrix\n Representing the location vector\n sigma : The shape matrix for the distribution\n\n Examples\n ========\n\n >>> from sympy.stats import density, MultivariateT\n >>> from sympy import Symbol\n\n >>> x = Symbol(\"x\")\n >>> X = MultivariateT(\"x\", [1, 1], [[1, 0], [0, 1]], 2)\n\n >>> density(X)(1, 2)\n 2/(9*pi)\n\n Returns\n =======\n\n RandomSymbol\n\n ", "language": "en", "n_whitespaces": 139, "n_words": 70, "vocab_size": 56 }
https://github.com/sympy/sympy.git
8
choose_agent
async def choose_agent(self) -> Optional[JobAgentSubmissionClient]: # the number of agents which has an available HTTP port. while True: raw_agent_infos = await DataOrganizer.get_all_agent_infos() agent_infos = { key: value for key, value in raw_agent_infos.items() if value.get("httpPort", -1) > 0 } if len(agent_infos) > 0: break await asyncio.sleep(dashboard_consts.TRY_TO_GET_AGENT_INFO_INTERVAL_SECONDS) # delete dead agents. for dead_node in set(self._agents) - set(agent_infos): client = self._agents.pop(dead_node) await client.close() if len(self._agents) >= dashboard_consts.CANDIDATE_AGENT_NUMBER: node_id = sample(set(self._agents), 1)[0] return self._agents[node_id] else: # Randomly select one from among all agents, it is possible that # the selected one already exists in `self._agents` node_id = sample(set(agent_infos), 1)[0] agent_info = agent_infos[node_id] if node_id not in self._agents: node_ip = agent_info["ipAddress"] http_port = agent_info["httpPort"] agent_http_address = f"http://{node_ip}:{http_port}" self._agents[node_id] = JobAgentSubmissionClient(agent_http_address) return self._agents[node_id]
db2f84bdfa49d218f97bf7f10678232bff8c48d5
14
job_head.py
333
[Job Submission][refactor 5/N] Remove the head node dependency on the `Raylet` process (#28599) * introduce stop_job Signed-off-by: Catch-Bull <burglarralgrub@gmail.com> * save Signed-off-by: Catch-Bull <burglarralgrub@gmail.com> * save Signed-off-by: Catch-Bull <burglarralgrub@gmail.com> * save Signed-off-by: Catch-Bull <burglarralgrub@gmail.com> * head rayletless Signed-off-by: Catch-Bull <burglarralgrub@gmail.com> * fix UT Signed-off-by: Catch-Bull <burglarralgrub@gmail.com> * fix UT Signed-off-by: Catch-Bull <burglarralgrub@gmail.com> * save Signed-off-by: Catch-Bull <burglarralgrub@gmail.com> * refactor choose_agent Signed-off-by: Catch-Bull <burglarralgrub@gmail.com> * fix Signed-off-by: Catch-Bull <burglarralgrub@gmail.com> * save Signed-off-by: Catch-Bull <burglarralgrub@gmail.com> * save Signed-off-by: Catch-Bull <burglarralgrub@gmail.com> * fix UT * delete mock Signed-off-by: Catch-Bull <burglarralgrub@gmail.com> * Use "auto" for entrypoint script Signed-off-by: Archit Kulkarni <architkulkarni@users.noreply.github.com> Signed-off-by: Catch-Bull <burglarralgrub@gmail.com> Signed-off-by: Archit Kulkarni <architkulkarni@users.noreply.github.com> Co-authored-by: Archit Kulkarni <architkulkarni@users.noreply.github.com>
28,655
0
451
199
86
128,291
117
ray
30
dashboard/modules/job/job_head.py
Python
40
{ "docstring": "\n Try to disperse as much as possible to select one of\n the `CANDIDATE_AGENT_NUMBER` agents to solve requests.\n the agents will not pop from `self._agents` unless\n it's dead. Saved in `self._agents` is the agent that was\n used before.\n Strategy:\n 1. if the number of `self._agents` has reached\n `CANDIDATE_AGENT_NUMBER`, randomly select one agent from\n `self._agents`.\n 2. if not, randomly select one agent from all available agents,\n it is possible that the selected one already exists in\n `self._agents`.\n ", "language": "en", "n_whitespaces": 203, "n_words": 75, "vocab_size": 48 }
https://github.com/ray-project/ray.git
1
test_switch_read_alarm_state
async def test_switch_read_alarm_state(hass, utcnow): helper = await setup_test_component(hass, create_security_system_service) await helper.async_update( ServicesTypes.SECURITY_SYSTEM, {CharacteristicsTypes.SECURITY_SYSTEM_STATE_CURRENT: 0}, ) state = await helper.poll_and_get_state() assert state.state == "armed_home" assert state.attributes["battery_level"] == 50 await helper.async_update( ServicesTypes.SECURITY_SYSTEM, {CharacteristicsTypes.SECURITY_SYSTEM_STATE_CURRENT: 1}, ) state = await helper.poll_and_get_state() assert state.state == "armed_away" await helper.async_update( ServicesTypes.SECURITY_SYSTEM, {CharacteristicsTypes.SECURITY_SYSTEM_STATE_CURRENT: 2}, ) state = await helper.poll_and_get_state() assert state.state == "armed_night" await helper.async_update( ServicesTypes.SECURITY_SYSTEM, {CharacteristicsTypes.SECURITY_SYSTEM_STATE_CURRENT: 3}, ) state = await helper.poll_and_get_state() assert state.state == "disarmed" await helper.async_update( ServicesTypes.SECURITY_SYSTEM, {CharacteristicsTypes.SECURITY_SYSTEM_STATE_CURRENT: 4}, ) state = await helper.poll_and_get_state() assert state.state == "triggered"
58b8c30221a6f6e5acbbe98b7e3298b03fb741f5
11
test_alarm_control_panel.py
305
Improve homekit_controller tests (#65266)
110,112
0
222
186
30
311,447
83
core
14
tests/components/homekit_controller/test_alarm_control_panel.py
Python
33
{ "docstring": "Test that we can read the state of a HomeKit alarm accessory.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
https://github.com/home-assistant/core.git