complexity
int64
1
139
fun_name
stringlengths
1
80
code
stringlengths
101
62.2k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
3.11k
ast_levels
int64
6
36
file_name
stringlengths
5
79
n_ast_nodes
int64
17
19.2k
commit_message
stringlengths
3
15.3k
d_id
int64
12
121k
n_ast_errors
int64
0
9
n_whitespaces
int64
4
10.8k
token_counts
int64
5
3.06k
vocab_size
int64
4
1.11k
id
int64
20
338k
n_words
int64
4
4.82k
repo
stringlengths
3
22
n_identifiers
int64
2
176
path
stringlengths
7
134
language
stringclasses
1 value
nloc
int64
1
413
documentation
dict
url
stringlengths
31
59
1
test_task_name_consume_folder
def test_task_name_consume_folder(self): result1 = TaskResult.objects.create( task_id=str(uuid.uuid4()), task_name="documents.tasks.some_task", status=celery.states.SUCCESS, task_args="\"('/consume/anothertest.pdf',)\"", task_kwargs="\"{'override_tag_ids': None}\"", ) _ = PaperlessTask.objects.create(attempted_task=result1) response = self.client.get(self.ENDPOINT) self.assertEqual(response.status_code, 200) self.assertEqual(len(response.data), 1) returned_data = response.data[0] self.assertEqual(returned_data["name"], "anothertest.pdf")
5b66ef0a748fd5570361a2a1ed6147e0462568d2
13
test_api.py
186
Updates how task_args and task_kwargs are parsed, adds testing to cover everything I can think of
117,052
0
145
112
24
320,067
27
paperless-ngx
29
src/documents/tests/test_api.py
Python
14
{ "docstring": "\n GIVEN:\n - Attempted celery task\n - Task was created through the consume folder\n WHEN:\n - API call is made to get tasks\n THEN:\n - Returned data include the filename\n ", "language": "en", "n_whitespaces": 102, "n_words": 29, "vocab_size": 25 }
https://github.com/paperless-ngx/paperless-ngx.git
2
_prepare_registry
def _prepare_registry(self): # noqa cur = self.internal_registry.cursor() if ('models',) not in list(cur.execute("SELECT name FROM sqlite_master WHERE type='table';")): cur.execute() # TODO: dtype_dict? self.internal_registry.commit()
30877cf7ead465750763822b3c88f970c870d9dd
11
mlflow_integration.py
84
feat: add mlflow, mysql, base handlers
25,156
0
67
45
21
114,339
22
mindsdb
8
mindsdb/integrations/mlflow_handler/mlflow/mlflow_integration.py
Python
5
{ "docstring": " Checks that sqlite records of registered models exists, otherwise creates it. create table models (model_name text, format text, target text, url text)", "language": "en", "n_whitespaces": 22, "n_words": 22, "vocab_size": 19 }
https://github.com/mindsdb/mindsdb.git
1
_get_threads
def _get_threads(self) -> MultiThread: # TODO Check if multiple threads actually speeds anything up save_queue = queue_manager.get_queue("convert_out") patch_queue = queue_manager.get_queue("patch") return MultiThread(self._converter.process, patch_queue, save_queue, thread_count=self._pool_processes, name="patch")
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
9
convert.py
80
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
20,797
0
87
47
25
101,382
26
faceswap
12
scripts/convert.py
Python
11
{ "docstring": " Get the threads for patching the converted faces onto the frames.\n\n Returns\n :class:`lib.multithreading.MultiThread`\n The threads that perform the patching of swapped faces onto the output frames\n ", "language": "en", "n_whitespaces": 59, "n_words": 26, "vocab_size": 18 }
https://github.com/deepfakes/faceswap.git
7
parseline
def parseline(self, line): line = line.strip() if not line: return None, None, line elif line[0] == '?': line = 'help ' + line[1:] elif line[0] == '!': if hasattr(self, 'do_shell'): line = 'shell ' + line[1:] else: return None, None, line i, n = 0, len(line) while i < n and line[i] in self.identchars: i = i+1 cmd, arg = line[:i], line[i:].strip() return cmd, arg, line
8198943edd73a363c266633e1aa5b2a9e9c9f526
14
cmd.py
211
add python 3.10.4 for windows
56,361
0
203
129
41
221,346
66
XX-Net
11
python3.10.4/Lib/cmd.py
Python
15
{ "docstring": "Parse the line into a command name and a string containing\n the arguments. Returns a tuple containing (command, args, line).\n 'command' and 'args' may be None if the line couldn't be parsed.\n ", "language": "en", "n_whitespaces": 54, "n_words": 32, "vocab_size": 24 }
https://github.com/XX-net/XX-Net.git
5
_get_animated_artists
def _get_animated_artists(self): return tuple([a for ax_ in self.ax.get_figure().get_axes() for a in ax_.get_children() if a.get_animated() and a not in self.artists])
334cc617b8ed3b6b4ec6cb64ff16a040ef454149
14
widgets.py
78
Fix z_order
22,584
0
75
48
15
107,070
19
matplotlib
11
lib/matplotlib/widgets.py
Python
4
{ "docstring": "\n Convenience method to get all animated artists of a figure, except\n those already present in self.artists. 'z_order' is ignored.\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 19 }
https://github.com/matplotlib/matplotlib.git
1
test_message
def test_message(self) -> None: room_id = self.helper.create_room_as( self.other_user_id, tok=self.other_access_token ) # The user should be in the room. self.helper.join(room_id, self.banned_user_id, tok=self.banned_access_token) # Sending a message should complete successfully. result = self.helper.send_event( room_id=room_id, type=EventTypes.Message, content={"msgtype": "m.text", "body": "with right label"}, tok=self.banned_access_token, ) self.assertIn("event_id", result) event_id = result["event_id"] latest_events = self.get_success( self.store.get_latest_event_ids_in_room(room_id) ) self.assertNotIn(event_id, latest_events)
1901cb1d4a8b7d9af64493fbd336e9aa2561c20c
12
test_shadow_banned.py
192
Add type hints to `tests/rest/client` (#12084)
71,479
0
210
118
46
247,069
53
synapse
24
tests/rest/client/test_shadow_banned.py
Python
18
{ "docstring": "Messages from shadow-banned users don't actually get sent.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/matrix-org/synapse.git
7
_get_streams
def _get_streams(self): params = self.session.http.get(self.url, schema=validate.Schema( re.compile( r, re.VERBOSE | re.DOTALL, ), validate.none_or_all( validate.get("json"), validate.parse_json(), { "contentId": validate.any(str, int), validate.optional("streamId"): str, validate.optional("idec"): str, validate.optional("token"): str, }, ), )) if not params: log.error("Could not find player manager data") return params.update({ "video": (unquote(params.pop("token")) if params.get("token") is not None else params.pop("streamId")), "noflash": "yes", "embedded": "0", }) url_parsed = urlparse(self.url) skip_vods = url_parsed.netloc.endswith("m4sport.hu") and url_parsed.path.startswith("/elo") self.session.http.headers.update({"Referer": self.url}) playlists = self.session.http.get(self.PLAYER_URL, params=params, schema=validate.Schema( re.compile(r"pl\.setup\s*\(\s*(?P<json>{.*?})\s*\)\s*;", re.DOTALL), validate.none_or_all( validate.get("json"), validate.parse_json(), {"playlist": [{ "file": validate.url(), "type": str, }]}, validate.get("playlist"), validate.filter(lambda p: p["type"] == "hls"), validate.filter(lambda p: not skip_vods or "vod" not in p["file"]), validate.map(lambda p: update_scheme("https://", p["file"])), ), )) for url in playlists or []: yield from HLSStream.parse_variant_playlist(self.session, url).items() __plugin__ = Mediaklikk
08209aecf41706dc1eb0171466d88d7fb02aefca
20
mediaklikk.py
609
plugins: refactor validation schemas Refactor validation schemas of plugins where schemas are not defined as class attributes or where no major changes are needed: - Use `validate.none_or_all(...)` - Replace `validate.transform(pattern.search)` with `pattern` and fix schemas using `validate.regex(pattern)` where a regex has to match - Move pattern definitions from class attributes to schema definitions - Fix some patterns in regards to quotation matching - Remove unneeded type validations, like `str` for example - Remove unneeded `TypeError` exceptions from try-except code blocks - Fix minor style issues where it makes sense - use double quotes - add trailing commas - fix indentation
45,798
0
698
367
88
187,530
114
streamlink
44
src/streamlink/plugins/mediaklikk.py
Python
52
{ "docstring": "\n mtva_player_manager\\.player\\s*\\(\\s*\n document\\.getElementById\\(\\s*\"\\w+\"\\s*\\)\\s*,\\s*\n (?P<json>{.*?})\\s*\n \\)\\s*;\n ", "language": "en", "n_whitespaces": 104, "n_words": 4, "vocab_size": 4 }
https://github.com/streamlink/streamlink.git
1
test_upload_file_with_supplied_mimetype
def test_upload_file_with_supplied_mimetype(self) -> None: fp = StringIO("zulip!") fp.name = "pasted_file" result = self.api_post( self.example_user("hamlet"), "/api/v1/user_uploads?mimetype=image/png", {"file": fp} ) uri = self.assert_json_success(result)["uri"] self.assertTrue(uri.endswith("pasted_file.png"))
a142fbff85302c5e3acb2e204eca2e9c75dbc74b
11
test_upload.py
114
tests: Refactor away result.json() calls with helpers. Signed-off-by: Zixuan James Li <p359101898@gmail.com>
17,798
0
82
62
19
84,169
22
zulip
12
zerver/tests/test_upload.py
Python
13
{ "docstring": "\n When files are copied into the system clipboard and pasted for upload\n the filename may not be supplied so the extension is determined from a\n query string parameter.\n ", "language": "en", "n_whitespaces": 57, "n_words": 28, "vocab_size": 26 }
https://github.com/zulip/zulip.git
1
write_graph6
def write_graph6(G, path, nodes=None, header=True): return write_graph6_file(G, path, nodes=nodes, header=header) @not_implemented_for("directed") @not_implemented_for("multigraph")
9b63ca1a0d46a1f50bcc59eda52be02721a134db
@not_implemented_for("directed") @not_implemented_for("multigraph")
8
graph6.py
67
Remove old Appveyor cruft (#5924) * Remove old Appveyor cruft * Fix Windows issue
42,276
1
16
31
11
177,121
12
networkx
7
networkx/readwrite/graph6.py
Python
2
{ "docstring": "Write a simple undirected graph to a path in graph6 format.\n\n Parameters\n ----------\n G : Graph (undirected)\n\n path : str\n The path naming the file to which to write the graph.\n\n nodes: list or iterable\n Nodes are labeled 0...n-1 in the order provided. If None the ordering\n given by ``G.nodes()`` is used.\n\n header: bool\n If True add '>>graph6<<' string to head of data\n\n Raises\n ------\n NetworkXNotImplemented\n If the graph is directed or is a multigraph.\n\n ValueError\n If the graph has at least ``2 ** 36`` nodes; the graph6 format\n is only defined for graphs of order less than ``2 ** 36``.\n\n Examples\n --------\n You can write a graph6 file by giving the path to a file::\n\n >>> import tempfile\n >>> with tempfile.NamedTemporaryFile(delete=False) as f:\n ... nx.write_graph6(nx.path_graph(2), f.name)\n ... _ = f.seek(0)\n ... print(f.read())\n b'>>graph6<<A_\\\\n'\n\n See Also\n --------\n from_graph6_bytes, read_graph6\n\n Notes\n -----\n The function writes a newline character after writing the encoding\n of the graph.\n\n The format does not support edge or node labels, parallel edges or\n self loops. If self loops are present they are silently ignored.\n\n References\n ----------\n .. [1] Graph6 specification\n <http://users.cecs.anu.edu.au/~bdm/data/formats.html>\n\n ", "language": "en", "n_whitespaces": 373, "n_words": 184, "vocab_size": 127 }
https://github.com/networkx/networkx.git
1
test_forked_graph_cleanup
def test_forked_graph_cleanup(self) -> None: r # Create the room graph event_id_a = self.create_and_send_event(self.room_id, self.user) event_id_b = self.create_and_send_event(self.room_id, self.user) event_id_sf1 = self.create_and_send_event( self.room_id, self.user, True, [event_id_a] ) event_id_sf2 = self.create_and_send_event( self.room_id, self.user, True, [event_id_a, event_id_b] ) event_id_sf3 = self.create_and_send_event( self.room_id, self.user, True, [event_id_sf1] ) self.create_and_send_event( self.room_id, self.user, True, [event_id_sf2, event_id_sf3] ) # SF4 event_id_c = self.create_and_send_event( self.room_id, self.user, False, [event_id_sf3] ) # Add the new extremity and check the latest events are as expected self.add_extremity(self.room_id, event_id_a) latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) ) self.assertEqual(set(latest_event_ids), {event_id_a, event_id_b, event_id_c}) # Run the background update and check it did the right thing self.run_background_update() latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) ) self.assertEqual(set(latest_event_ids), {event_id_b, event_id_c})
3ac412b4e2f8c5ba11dc962b8a9d871c1efdce9b
11
test_cleanup_extrems.py
327
Require types in tests.storage. (#14646) Adds missing type hints to `tests.storage` package and does not allow untyped definitions.
73,272
0
350
220
61
250,105
105
synapse
19
tests/storage/test_cleanup_extrems.py
Python
46
{ "docstring": "Test that extremities are correctly calculated in the presence of\n soft failed events.\n\n Tests a graph like, where time flows down the page:\n\n A B\n / \\ /\n / \\ /\n SF1 SF2\n | |\n SF3 |\n / \\ |\n | \\ |\n C SF4\n\n Where SF* are soft failed, and with them A, B and C marked as\n extremities. This should resolve to B and C being marked as extremity.\n ", "language": "en", "n_whitespaces": 241, "n_words": 71, "vocab_size": 48 }
https://github.com/matrix-org/synapse.git
3
_test_settings_get
def _test_settings_get(self, key, default=None, prefixed=None): settings_dict = self.connection.settings_dict val = settings_dict["TEST"].get(key, default) if val is None and prefixed: val = TEST_DATABASE_PREFIX + settings_dict[prefixed] return val
9c19aff7c7561e3a82978a272ecdaad40dda5c00
10
creation.py
83
Refs #33476 -- Reformatted code with Black.
51,003
0
71
53
20
205,061
25
django
10
django/db/backends/oracle/creation.py
Python
6
{ "docstring": "\n Return a value from the test settings dict, or a given default, or a\n prefixed entry from the main settings dict.\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 15 }
https://github.com/django/django.git
4
throw
def throw(self, typ, val=None, tb=None): if val is None: if tb is None: raise typ val = typ() if tb is not None: val = val.with_traceback(tb) raise val
8198943edd73a363c266633e1aa5b2a9e9c9f526
10
_collections_abc.py
78
add python 3.10.4 for windows
55,605
0
104
49
16
219,497
28
XX-Net
6
python3.10.4/Lib/_collections_abc.py
Python
8
{ "docstring": "Raise an exception in the coroutine.\n Return next yielded value or raise StopIteration.\n ", "language": "en", "n_whitespaces": 27, "n_words": 13, "vocab_size": 13 }
https://github.com/XX-net/XX-Net.git
1
count_non_terminated_nodes
def count_non_terminated_nodes() -> int: provider_config = _generate_provider_config(ray_cluster_namespace="default") kuberay_node_provider = _get_node_provider( provider_config=provider_config, cluster_name="raycluster-autoscaler" ) nodes = kuberay_node_provider.non_terminated_nodes({}) return len(nodes)
c4a259828b9cfbfb4f09059f74808893a6d20b76
10
non_terminated_nodes_count.py
73
[kuberay] Update KubeRay operator commit, turn autoscaler RPC drain back on (#27077) This PR: - Updates the KubeRay operator commit used in the Ray CI autoscaling test - Uses the RayCluster autoscaling sample config from the KubeRay repo in place of of a config from the Ray repo - Turns the autoscaler RPC worker drain back on, as I saw some dead node messages from the GCS, and the RPC drain is supposed to avoid those. Signed-off-by: Dmitri Gekhtman <dmitri.m.gekhtman@gmail.com>
28,023
0
43
41
16
125,913
18
ray
11
python/ray/tests/kuberay/scripts/non_terminated_nodes_count.py
Python
10
{ "docstring": "Get the count of non terminated nodes for the Ray cluster raycluster-autoscaler\n in namespace default.\n ", "language": "en", "n_whitespaces": 21, "n_words": 15, "vocab_size": 14 }
https://github.com/ray-project/ray.git
2
all_equal
def all_equal(iterable): g = itertools.groupby(iterable) return next(g, True) and not next(g, False)
8f1ba4de582c5e5282c022a7713a56b47302cabe
9
util.py
48
Backport parser/alignment optimizations from `feature/refactor-parser` (#10952)
24,410
0
21
29
11
111,439
12
spaCy
6
spacy/util.py
Python
3
{ "docstring": "Return True if all the elements are equal to each other\n (or if the input is an empty sequence), False otherwise.", "language": "en", "n_whitespaces": 23, "n_words": 21, "vocab_size": 19 }
https://github.com/explosion/spaCy.git
1
pagination_querystring
def pagination_querystring(context, page_number, page_key="p"): return querystring(context, **{page_key: page_number}) @register.inclusion_tag( "wagtailadmin/pages/listing/_pagination.html", takes_context=True )
d10f15e55806c6944827d801cd9c2d53f5da4186
@register.inclusion_tag( "wagtailadmin/pages/listing/_pagination.html", takes_context=True )
10
wagtailadmin_tags.py
59
Reformat with black
15,657
1
19
24
12
71,277
12
wagtail
8
wagtail/admin/templatetags/wagtailadmin_tags.py
Python
2
{ "docstring": "\n Print out a querystring with an updated page number:\n\n {% if page.has_next_page %}\n <a href=\"{% pagination_link page.next_page_number %}\">Next page</a>\n {% endif %}\n ", "language": "en", "n_whitespaces": 54, "n_words": 22, "vocab_size": 20 }
https://github.com/wagtail/wagtail.git
3
test_knn_imputer_keep_empty_features
def test_knn_imputer_keep_empty_features(keep_empty_features): X = np.array([[1, np.nan, 2], [3, np.nan, np.nan]]) imputer = KNNImputer(keep_empty_features=keep_empty_features) for method in ["fit_transform", "transform"]: X_imputed = getattr(imputer, method)(X) if keep_empty_features: assert X_imputed.shape == X.shape assert_array_equal(X_imputed[:, 1], 0) else: assert X_imputed.shape == (X.shape[0], X.shape[1] - 1)
d8fa96c29828e3ca79ddd5d7466521ac4d95213c
15
test_impute.py
168
ENH keep features with all missing values during imputation (#24770) Co-authored-by: Chiara Marmo <cmarmo@users.noreply.github.com> Co-authored-by: Julien Jerphanion <git@jjerphan.xyz> Co-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com> Co-authored-by: Vitor SRG <vitorssrg@gmail.com> Fixes https://github.com/scikit-learn/scikit-learn/pull/16695 Fixes https://github.com/scikit-learn/scikit-learn/issues/16426 Fixes https://github.com/scikit-learn/scikit-learn/issues/16977
76,878
0
105
110
33
261,584
39
scikit-learn
13
sklearn/impute/tests/test_impute.py
Python
10
{ "docstring": "Check the behaviour of `keep_empty_features` for `KNNImputer`.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
https://github.com/scikit-learn/scikit-learn.git
2
test_batch_idx
def test_batch_idx(self) -> int: if self.trainer is None: return 0 return self.trainer.test_loop.epoch_loop.batch_progress.current.processed
8a549a550cb10189ff1db382f546a40cd1c6c5b3
11
base.py
51
Integrate progress tracking into the progress bar (#11213)
69,648
0
44
31
11
241,678
12
lightning
9
pytorch_lightning/callbacks/progress/base.py
Python
8
{ "docstring": "The number of batches processed during testing.\n\n Use this to update your progress bar.\n ", "language": "en", "n_whitespaces": 28, "n_words": 14, "vocab_size": 14 }
https://github.com/Lightning-AI/lightning.git
2
_get_one_trial_job
def _get_one_trial_job(self): if not self.generated_hyper_configs: ret = { 'parameter_id': '-1_0_0', 'parameter_source': 'algorithm', 'parameters': '' } self.send(CommandType.NoMoreTrialJobs, nni.dump(ret)) return None assert self.generated_hyper_configs params = self.generated_hyper_configs.pop(0) ret = { 'parameter_id': params[0], 'parameter_source': 'algorithm', 'parameters': params[1] } self.parameters[params[0]] = params[1] return ret
98c1a77f61900d486f46d284c49fb65675dbee6a
11
bohb_advisor.py
164
Support multiple HPO experiments in one process (#4855)
24,770
0
217
95
26
112,855
39
nni
12
nni/algorithms/hpo/bohb_advisor/bohb_advisor.py
Python
18
{ "docstring": "get one trial job, i.e., one hyperparameter configuration.\n\n If this function is called, Command will be sent by BOHB:\n a. If there is a parameter need to run, will return \"NewTrialJob\" with a dict:\n {\n 'parameter_id': id of new hyperparameter\n 'parameter_source': 'algorithm'\n 'parameters': value of new hyperparameter\n }\n b. If BOHB don't have parameter waiting, will return \"NoMoreTrialJobs\" with\n {\n 'parameter_id': '-1_0_0',\n 'parameter_source': 'algorithm',\n 'parameters': ''\n }\n ", "language": "en", "n_whitespaces": 189, "n_words": 67, "vocab_size": 48 }
https://github.com/microsoft/nni.git
2
get_menu_item
def get_menu_item(self, order=None): return ModelAdminMenuItem(self, order or self.get_menu_order())
b8a9a2d319b06fc2318d68d05b5a6cdf85b5b33d
10
options.py
38
Deprecate wagtail.contrib.modeladmin.menus.SubMenu in favour of wagtail.admin.menu.Menu The Menu class was not originally designed to accept menu items at constructor time (instead requiring them to be passed via hooks); ModelAdmin's SubMenu class patched this functionality in, and the documentation for extending admin views piggybacked on this. Add this functionality to the base Menu class so that we don't have this unnecessary dependency on ModelAdmin.
16,727
0
22
23
8
77,976
8
wagtail
5
wagtail/contrib/modeladmin/options.py
Python
2
{ "docstring": "\n Utilised by Wagtail's 'register_menu_item' hook to create a menu item\n to access the listing view, or can be called by ModelAdminGroup\n to create a submenu\n ", "language": "en", "n_whitespaces": 54, "n_words": 25, "vocab_size": 20 }
https://github.com/wagtail/wagtail.git
1
get_failure
def get_failure(self, d, exc): deferred: Deferred[Any] = ensureDeferred(d) self.pump() return self.failureResultOf(deferred, exc)
33ebee47e4e96a2b6fdf72091769e59034dc550f
8
unittest.py
56
Remove redundant `get_success` calls in test code (#12346) There are a bunch of places we call get_success on an immediate value, which is unnecessary. Let's rip them out, and remove the redundant functionality in get_success and friends.
71,975
0
40
35
12
247,885
12
synapse
10
tests/unittest.py
Python
4
{ "docstring": "\n Run a Deferred and get a Failure from it. The failure must be of the type `exc`.\n ", "language": "en", "n_whitespaces": 32, "n_words": 17, "vocab_size": 16 }
https://github.com/matrix-org/synapse.git
4
get_client_ip
def get_client_ip(request, additional_headers=()): HTTP_HEADERS = ( 'HTTP_X_REAL_IP', 'HTTP_X_FORWARDED_FOR', 'REMOTE_ADDR', *additional_headers ) for header in HTTP_HEADERS: if header in request.META: client_ip = request.META[header].split(',')[0] try: return ipaddress.ip_address(client_ip) except ValueError: raise ValueError(f"Invalid IP address set for {header}: {client_ip}") # Could not determine the client IP address from request headers return None
a38a880e67d78eba52f19cc4c2613e9399939c2f
16
request.py
128
Refactor source IP resolution logic
78,016
0
164
71
41
265,175
48
netbox
11
netbox/utilities/request.py
Python
18
{ "docstring": "\n Return the client (source) IP address of the given request.\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 9 }
https://github.com/netbox-community/netbox.git
6
save_video_meta_data
def save_video_meta_data(self, pts_time, keyframes): if pts_time[0] != 0: pts_time, keyframes = self._pad_leading_frames(pts_time, keyframes) sample_filename = next(fname for fname in self.data) basename = sample_filename[:sample_filename.rfind("_")] logger.debug("sample filename: %s, base filename: %s", sample_filename, basename) logger.info("Saving video meta information to Alignments file") for idx, pts in enumerate(pts_time): meta = dict(pts_time=pts, keyframe=idx in keyframes) key = f"{basename}_{idx + 1:06d}.png" if key not in self.data: self.data[key] = dict(video_meta=meta, faces=[]) else: self.data[key]["video_meta"] = meta logger.debug("Alignments count: %s, timestamp count: %s", len(self.data), len(pts_time)) if len(self.data) != len(pts_time): raise FaceswapError( "There is a mismatch between the number of frames found in the video file " f"({len(pts_time)}) and the number of frames found in the alignments file " f"({len(self.data)}).\nThis can be caused by a number of issues:" "\n - The video has a Variable Frame Rate and FFMPEG is having a hard time " "calculating the correct number of frames." "\n - You are working with a Merged Alignments file. This is not supported for " "your current use case." "\nYou should either extract the video to individual frames, re-encode the " "video at a constant frame rate and re-run extraction or work with a dedicated " "alignments file for your requested video.") self.save()
5e73437be47f2410439a3c6716de96354e6a0c94
15
alignments.py
353
lib.align updates: - alignments.py - Add typed dicts for imported alignments - Explicitly check for presence of thumb value in alignments dict - linting - detected_face.py - Typing - Linting - Legacy support for pre-aligned face - Update dependencies to new property names
20,631
0
512
189
128
101,210
194
faceswap
26
lib/align/alignments.py
Python
28
{ "docstring": " Save video meta data to the alignments file.\n\n If the alignments file does not have an entry for every frame (e.g. if Extract Every N\n was used) then the frame is added to the alignments file with no faces, so that they video\n meta data can be stored.\n\n Parameters\n ----------\n pts_time: list\n A list of presentation timestamps (`float`) in frame index order for every frame in\n the input video\n keyframes: list\n A list of frame indices corresponding to the key frames in the input video\n ", "language": "en", "n_whitespaces": 175, "n_words": 85, "vocab_size": 55 }
https://github.com/deepfakes/faceswap.git
1
unweighted_minimum_spanning_digraph
def unweighted_minimum_spanning_digraph(tree, children=iter, shapes=None, attr=None): return edges2dot( edge_closure( tree, lambda node: unweighted_minimum_spanning_dict(tree, children)[node] ), shapes, attr, ) ########################################################################## # Breadth-First / Depth-first Searches with Cycle Detection ##########################################################################
692adaff901dd9daf29400fdf3385130aefbfb2a
13
util.py
67
Fix some tests in Wordnet-related DocStrings
7,568
0
72
44
26
42,483
27
nltk
10
nltk/util.py
Python
8
{ "docstring": "\n\n Build a Minimum Spanning Tree (MST) of an unweighted graph,\n by traversing the nodes of a tree in breadth-first order,\n discarding eventual cycles.\n\n Return a representation of this MST as a string in the DOT graph language,\n which can be converted to an image by the 'dot' program from the Graphviz\n package, or nltk.parse.dependencygraph.dot2img(dot_string).\n\n The first argument should be the tree root;\n children should be a function taking as argument a tree node\n and returning an iterator of the node's children.\n\n >>> import nltk\n >>> wn=nltk.corpus.wordnet\n >>> from nltk.util import unweighted_minimum_spanning_digraph as umsd\n >>> print(umsd(wn.synset('bound.a.01'), lambda s:s.also_sees()))\n digraph G {\n \"Synset('bound.a.01')\" -> \"Synset('unfree.a.02')\";\n \"Synset('unfree.a.02')\" -> \"Synset('confined.a.02')\";\n \"Synset('unfree.a.02')\" -> \"Synset('dependent.a.01')\";\n \"Synset('unfree.a.02')\" -> \"Synset('restricted.a.01')\";\n \"Synset('restricted.a.01')\" -> \"Synset('classified.a.02')\";\n }\n <BLANKLINE>\n ", "language": "en", "n_whitespaces": 184, "n_words": 117, "vocab_size": 81 }
https://github.com/nltk/nltk.git
3
_gorg
def _gorg(cls): assert isinstance(cls, GenericMeta) if hasattr(cls, '_gorg'): return cls._gorg while cls.__origin__ is not None: cls = cls.__origin__ return cls _PROTO_WHITELIST = ['Callable', 'Awaitable', 'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator', 'Hashable', 'Sized', 'Container', 'Collection', 'Reversible', 'ContextManager', 'AsyncContextManager']
f3166e673fe8d40277b804d35d77dcdb760fc3b3
9
typing_extensions.py
124
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
3,622
0
120
40
31
20,942
35
pipenv
7
pipenv/patched/notpip/_vendor/typing_extensions.py
Python
7
{ "docstring": "This function exists for compatibility with old typing versions.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/pypa/pipenv.git
4
lemmas
def lemmas(self, lang="eng"): if lang == "eng": return self._lemmas elif self._name: self._wordnet_corpus_reader._load_lang_data(lang) lemmark = [] lemmy = self.lemma_names(lang) for lem in lemmy: temp = Lemma( self._wordnet_corpus_reader, self, lem, self._wordnet_corpus_reader._lexnames.index(self.lexname()), 0, None, ) temp._lang = lang lemmark.append(temp) return lemmark
75f4e2183a80904dd3a6f958072ae2d063b51fad
17
wordnet.py
150
Handle wordnet synsets that were lost in mapping
7,561
0
299
94
32
42,476
38
nltk
18
nltk/corpus/reader/wordnet.py
Python
19
{ "docstring": "Return all the lemma objects associated with the synset", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
https://github.com/nltk/nltk.git
1
test_cross_validation
def test_cross_validation(self): testargs = .split() with mock.patch.dict(os.environ, {"TESTING_MOCKED_DATALOADERS": "0"}): output = subprocess.run( self._launch_args + testargs, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE ).stdout results = ast.literal_eval(re.findall("({.+})", output)[-1]) self.assertGreaterEqual(results["accuracy"], 0.75)
23c0341262bd396a3ba9265614b3818d6e08a6c1
14
test_examples.py
148
Refactor tests to use accelerate launch (#373) Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
121,085
0
97
92
23
337,588
25
accelerate
23
tests/test_examples.py
Python
11
{ "docstring": "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ", "language": "en", "n_whitespaces": 25, "n_words": 3, "vocab_size": 3 }
https://github.com/huggingface/accelerate.git
1
parse_headers
def parse_headers(fp, _class=HTTPMessage): headers = _read_headers(fp) hstring = b''.join(headers).decode('iso-8859-1') return email.parser.Parser(_class=_class).parsestr(hstring)
8198943edd73a363c266633e1aa5b2a9e9c9f526
11
client.py
78
add python 3.10.4 for windows
54,906
0
23
46
10
217,736
11
XX-Net
13
python3.10.4/Lib/http/client.py
Python
4
{ "docstring": "Parses only RFC2822 headers from a file pointer.\n\n email Parser wants to see strings rather than bytes.\n But a TextIOWrapper around self.rfile would buffer too many bytes\n from the stream, bytes which we later need to read as bytes.\n So we read the correct bytes here, as bytes, for email Parser\n to parse.\n\n ", "language": "en", "n_whitespaces": 71, "n_words": 53, "vocab_size": 40 }
https://github.com/XX-net/XX-Net.git
1
is_executable
def is_executable(path): # This function's signature needs to be repeated # as the first line of its docstring. # This method is reused by the basic module, # the repetition helps the basic module's html documentation come out right. # http://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autodoc_docstring_signature # These are all bitfields so first bitwise-or all the permissions we're # looking for, then bitwise-and with the file's mode to determine if any # execute bits are set. return ((stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & os.stat(path)[stat.ST_MODE])
fee90b15a25b588bfb8a9ff047e851d43e78511f
11
file.py
63
Misc typo fixes in module_utils (#76564)
78,454
0
109
34
60
266,521
79
ansible
8
lib/ansible/module_utils/common/file.py
Python
2
{ "docstring": "is_executable(path)\n\n is the given path executable?\n\n :arg path: The path of the file to check.\n\n Limitations:\n\n * Does not account for FSACLs.\n * Most times we really want to know \"Can the current user execute this\n file\". This function does not tell us that, only if any execute bit is set.\n ", "language": "en", "n_whitespaces": 75, "n_words": 51, "vocab_size": 43 }
https://github.com/ansible/ansible.git
1
project_group_token
def project_group_token(self, group_tokens): # [B, num_output_groups, C] <- [B, num_group_tokens, C] projected_group_tokens = self.mlp_inter(group_tokens) projected_group_tokens = self.norm_post_tokens(projected_group_tokens) return projected_group_tokens
6c8f4c9a938a09749ea1b19a5fa2a8dd27e99a29
8
modeling_groupvit.py
45
Adding GroupViT Models (#17313) * add group vit and fixed test (except slow) * passing slow test * addressed some comments * fixed test * fixed style * fixed copy * fixed segmentation output * fixed test * fixed relative path * fixed copy * add ignore non auto configured * fixed docstring, add doc * fixed copies * Apply suggestions from code review merge suggestions Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * resolve comment, renaming model * delete unused attr * use fix copies * resolve comments * fixed attn * remove unused vars * refactor tests * resolve final comments * add demo notebook * fixed inconsitent default * Apply suggestions from code review Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * rename stage->stages * Create single GroupViTEncoderLayer class * Update conversion script * Simplify conversion script * Remove cross-attention class in favor of GroupViTAttention * Convert other model as well, add processor to conversion script * addressing final comment * fixed args * Update src/transformers/models/groupvit/modeling_groupvit.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Niels Rogge <nielsrogge@Nielss-MacBook-Pro.local>
5,800
0
54
26
14
31,755
19
transformers
6
src/transformers/models/groupvit/modeling_groupvit.py
Python
4
{ "docstring": "\n Args:\n group_tokens (torch.Tensor): group tokens, [batch_size, num_group_tokens, channels]\n\n Returns:\n projected_group_tokens (torch.Tensor): [batch_size, num_output_groups, channels]\n ", "language": "en", "n_whitespaces": 58, "n_words": 14, "vocab_size": 11 }
https://github.com/huggingface/transformers.git
1
write_media_player_states
def write_media_player_states(self) -> None: dispatcher_send(self.hass, SONOS_MEDIA_UPDATED, self.soco.uid)
cfd763db40544c31077b46631bbdd9655581dfe9
9
media.py
36
Refactor Sonos media metadata handling (#66840) Co-authored-by: Paulus Schoutsen <paulus@home-assistant.io>
91,764
0
21
22
7
292,690
7
core
7
homeassistant/components/sonos/media.py
Python
3
{ "docstring": "Send a signal to media player(s) to write new states.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
https://github.com/home-assistant/core.git
1
select_option
def select_option(self, selector, value): from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import Select select = Select(self.selenium.find_element(By.CSS_SELECTOR, selector)) select.select_by_value(value)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
11
tests.py
79
Refs #33476 -- Reformatted code with Black.
50,427
0
52
53
15
203,524
17
django
16
django/contrib/admin/tests.py
Python
5
{ "docstring": "\n Select the <OPTION> with the value `value` inside the <SELECT> widget\n identified by the CSS selector `selector`.\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 14 }
https://github.com/django/django.git
2
_random_choice
def _random_choice(self) -> List[int]: retval = [random.choice(indices) for indices in self._indices] logger.debug(retval) return retval
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
9
preview.py
57
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
20,835
0
42
35
13
101,421
14
faceswap
11
tools/preview/preview.py
Python
5
{ "docstring": " list: Random indices from the :attr:`_indices` group ", "language": "en", "n_whitespaces": 8, "n_words": 7, "vocab_size": 7 }
https://github.com/deepfakes/faceswap.git
1
test_has_related_field_in_list_display_fk
def test_has_related_field_in_list_display_fk(self): state = State.objects.create(name="Karnataka") City.objects.create(state=state, name="Bangalore") response = self.client.get(reverse("admin:admin_views_city_changelist"), {}) response.context["cl"].list_display = ["id", "name", "state"] self.assertIs(response.context["cl"].has_related_field_in_list_display(), True) response.context["cl"].list_display = ["id", "name", "state_id"] self.assertIs(response.context["cl"].has_related_field_in_list_display(), False)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
11
tests.py
200
Refs #33476 -- Reformatted code with Black.
52,071
0
81
114
18
207,724
25
django
16
tests/admin_views/tests.py
Python
8
{ "docstring": "Joins shouldn't be performed for <FK>_id fields in list display.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/django/django.git
1
exit_with_error
def exit_with_error(message, code=1, **kwargs): kwargs.setdefault("style", "red") app.console.print(message, **kwargs) raise typer.Exit(code)
c0cb1fee460c1bded9e3eb741ad7979402844bf8
8
base.py
66
Update `set` command; allow CLI `console` object to be patched
11,226
0
22
39
10
55,137
10
prefect
10
src/prefect/cli/base.py
Python
4
{ "docstring": "\n Utility to print a stylized error message and exit with a non-zero code\n ", "language": "en", "n_whitespaces": 20, "n_words": 13, "vocab_size": 12 }
https://github.com/PrefectHQ/prefect.git
4
trigger_tool
def trigger_tool(self, name, sender=None, canvasevent=None, data=None): tool = self.get_tool(name) if tool is None: return if sender is None: sender = self if isinstance(tool, backend_tools.ToolToggleBase): self._handle_toggle(tool, canvasevent, data) tool.trigger(sender, canvasevent, data) # Actually trigger Tool. s = 'tool_trigger_%s' % name event = ToolTriggerEvent(s, sender, tool, canvasevent, data) self._callbacks.process(s, event)
eec341a3a5918a59b212afffc2f9285bada1f276
9
backend_managers.py
153
Remove several unused variables
23,434
0
145
101
35
109,095
48
matplotlib
18
lib/matplotlib/backend_managers.py
Python
12
{ "docstring": "\n Trigger a tool and emit the ``tool_trigger_{name}`` event.\n\n Parameters\n ----------\n name : str\n Name of the tool.\n sender : object\n Object that wishes to trigger the tool.\n canvasevent : Event\n Original Canvas event or None.\n data : object\n Extra data to pass to the tool when triggering.\n ", "language": "en", "n_whitespaces": 148, "n_words": 47, "vocab_size": 35 }
https://github.com/matplotlib/matplotlib.git
7
save_file
def save_file(self, name, file_path, file_name=None): files_metadata = self.get_files() if name in [x['name'] for x in files_metadata]: raise Exception(f'File already exists: {name}') if file_name is None: file_name = Path(file_path).name file_dir = None try: df, _col_map = FileHandler._handle_source(file_path) ds_meta = { 'row_count': len(df), 'column_names': list(df.columns) } file_record = db.File( name=name, company_id=ctx.company_id, source_file_path=file_name, file_path='', row_count=ds_meta['row_count'], columns=ds_meta['column_names'] ) db.session.add(file_record) db.session.commit() store_file_path = f'file_{ctx.company_id}_{file_record.id}' file_record.file_path = store_file_path db.session.commit() file_dir = Path(self.dir).joinpath(store_file_path) file_dir.mkdir(parents=True, exist_ok=True) source = file_dir.joinpath(file_name) # NOTE may be delay between db record exists and file is really in folder shutil.move(file_path, str(source)) self.fs_store.put(store_file_path, base_dir=self.dir) except Exception as e: log.logger.error(e) raise finally: if file_dir is not None: shutil.rmtree(file_dir) return file_record.id
7f0d38c3925c981d015e8624662c8e0c13aa4d97
13
file_controller.py
417
get company_id from context
26,200
0
534
247
83
118,262
105
mindsdb
48
mindsdb/interfaces/file/file_controller.py
Python
38
{ "docstring": " Save the file to our store\n\n Args:\n name (str): with that name file will be available in sql api\n file_name (str): file name\n file_path (str): path to the file\n\n Returns:\n int: id of 'file' record in db\n ", "language": "en", "n_whitespaces": 127, "n_words": 37, "vocab_size": 27 }
https://github.com/mindsdb/mindsdb.git
2
get_source_link
def get_source_link(browser): source = [] try: source.append( browser.find_element( By.XPATH, read_xpath(get_source_link.__name__, "image") ).get_attribute("src") ) except NoSuchElementException: source.append( browser.find_element( By.XPATH, read_xpath(get_source_link.__name__, "video") ).get_attribute("src") ) source.append( browser.find_element( By.XPATH, read_xpath(get_source_link.__name__, "image_alt") ).get_attribute("src") ) return source
2a157d452611d37cf50ccb7d56ff1a06e9790ecb
17
clarifai_util.py
167
PR - Fix `extract_text_from_element()`and `find_element*()` to `find_element()` (#6438) * Updated getUserData() and find_element* Signed-off-by: elulcao <elulcao@icloud.com> Thanks @breuerfelix for reviewing, 🚀 People in this thread please let me know if something is not OK, IG changed a lot these days. 🤗 @her
821
0
199
98
18
5,791
31
InstaPy
11
instapy/clarifai_util.py
Python
20
{ "docstring": "Checks to see if a post is an image. If so, returns list with image\n source URL.\n If a NoSuchElement exception occurs, checks post for video and returns\n the source URLs\n for both the video and the video's keyframe.", "language": "en", "n_whitespaces": 50, "n_words": 39, "vocab_size": 29 }
https://github.com/InstaPy/InstaPy.git
1
test_changelist_with_no_change_url
def test_changelist_with_no_change_url(self): o = UnchangeableObject.objects.create() response = self.client.get( reverse("admin:admin_views_unchangeableobject_changelist") ) # Check the format of the shown object -- shouldn't contain a change link self.assertContains( response, '<th class="field-__str__">%s</th>' % o, html=True )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
11
tests.py
74
Refs #33476 -- Reformatted code with Black.
52,142
0
103
42
29
207,873
32
django
12
tests/admin_views/tests.py
Python
8
{ "docstring": "\n ModelAdmin.changelist_view shouldn't result in a NoReverseMatch if url\n for change_view is removed from get_urls (#20934).\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 15 }
https://github.com/django/django.git
1
test_cancellation_while_waiting_for_write_lock
def test_cancellation_while_waiting_for_write_lock(self): rwlock = ReadWriteLock() key = "key" # 1. A reader takes the lock and blocks. reader_d, _, unblock_reader = self._start_blocking_reader( rwlock, key, "read completed" ) # 2. A writer waits for the reader to complete. writer1_d, _, unblock_writer1 = self._start_blocking_writer( rwlock, key, "write 1 completed" ) # 3. A second writer waits for both the reader and first writer to complete. # This writer will be cancelled later. writer2_d, _ = self._start_nonblocking_writer(rwlock, key, "write 2 completed") self.assertFalse(writer2_d.called) # 4. A third writer waits for the second writer to complete. writer3_d, _ = self._start_nonblocking_writer(rwlock, key, "write 3 completed") self.assertFalse(writer3_d.called) # 5. The second writer is cancelled, but continues waiting for the lock. # The reader, first writer and third writer should not be cancelled. # The first writer should still be waiting on the reader. # The third writer should still be waiting on the second writer. writer2_d.cancel() self.assertNoResult(writer2_d) self.assertFalse(reader_d.called, "Reader was unexpectedly cancelled") self.assertFalse(writer1_d.called, "First writer was unexpectedly cancelled") self.assertFalse( writer3_d.called, "Third writer was unexpectedly cancelled or given the lock before the first " "writer finished", ) # 6. Unblock the reader, which should complete. # The first writer should be given the lock and block. # The third writer should still be waiting on the second writer. unblock_reader.callback(None) self.assertEqual("read completed", self.successResultOf(reader_d)) self.assertNoResult(writer2_d) self.assertFalse( writer3_d.called, "Third writer was unexpectedly given the lock before the first writer " "finished", ) # 7. Unblock the first writer, which should complete. unblock_writer1.callback(None) self.assertEqual("write 1 completed", self.successResultOf(writer1_d)) # 8. The second writer should take the lock and release it immediately, since it # has been cancelled. self.failureResultOf(writer2_d, CancelledError) # 9. The third writer should take the lock and complete. self.assertTrue( writer3_d.called, "Third writer is stuck waiting for a cancelled writer" ) self.assertEqual("write 3 completed", self.successResultOf(writer3_d))
605d161d7d585847fd1bb98d14d5281daeac8e86
9
test_rwlock.py
385
Add cancellation support to `ReadWriteLock` (#12120) Also convert `ReadWriteLock` to use async context managers. Signed-off-by: Sean Quah <seanq@element.io>
71,756
0
721
220
120
247,581
293
synapse
25
tests/util/test_rwlock.py
Python
37
{ "docstring": "Test cancellation while waiting for a write lock.\n\n Tests that cancelling a waiting writer:\n * does not cancel the reader or writer it is waiting on\n * does not cancel the next writer waiting on it\n * does not allow the next writer to acquire the lock before an earlier reader\n and writer have finished\n * does not keep the next writer waiting indefinitely\n\n These correspond to the asserts with explicit messages.\n ", "language": "en", "n_whitespaces": 135, "n_words": 72, "vocab_size": 42 }
https://github.com/matrix-org/synapse.git
1
current_year
def current_year(context): context["current_year"] = datetime.datetime.now().year return context
bbf17ea692e437cec908eae6759ffff8092fb42e
10
pandas_web.py
40
WEB: Add new footer to web (#48557)
40,398
0
28
22
7
169,203
7
pandas
5
web/pandas_web.py
Python
3
{ "docstring": "\n Add the current year to the context, so it can be used for the copyright\n note, or other places where it is needed.\n ", "language": "en", "n_whitespaces": 45, "n_words": 23, "vocab_size": 20 }
https://github.com/pandas-dev/pandas.git
10
remove_field
def remove_field(self, model, field): # Special-case implicit M2M tables if field.many_to_many and field.remote_field.through._meta.auto_created: return self.delete_model(field.remote_field.through) # It might not actually have a column behind it if field.db_parameters(connection=self.connection)["type"] is None: return # Drop any FK constraints, MySQL requires explicit deletion if field.remote_field: fk_names = self._constraint_names(model, [field.column], foreign_key=True) for fk_name in fk_names: self.execute(self._delete_fk_sql(model, fk_name)) # Delete the column sql = self.sql_delete_column % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(field.column), } self.execute(sql) # Reset connection if required if self.connection.features.connection_persists_old_columns: self.connection.close() # Remove all deferred statements referencing the deleted column. for sql in list(self.deferred_sql): if isinstance(sql, Statement) and sql.references_column( model._meta.db_table, field.column ): self.deferred_sql.remove(sql)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
13
schema.py
304
Refs #33476 -- Reformatted code with Black.
50,985
0
346
190
79
204,933
97
django
32
django/db/backends/base/schema.py
Python
21
{ "docstring": "\n Remove a field from a model. Usually involves deleting a column,\n but for M2Ms may involve deleting a table.\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 15 }
https://github.com/django/django.git
2
get_characters_loss
def get_characters_loss(ops, docs, prediction, nr_char): target_ids = numpy.vstack([doc.to_utf8_array(nr_char=nr_char) for doc in docs]) target_ids = target_ids.reshape((-1,)) target = ops.asarray(to_categorical(target_ids, n_classes=256), dtype="f") target = target.reshape((-1, 256 * nr_char)) diff = prediction - target loss = (diff**2).sum() d_target = diff / float(prediction.shape[0]) return loss, d_target
91ccacea12a46c62ccb5e7f6de891a37cb71e184
12
multi_task.py
175
Auto-format code with black (#10209) * Auto-format code with black * add black requirement to dev dependencies and pin to 22.x * ignore black dependency for comparison with setup.cfg Co-authored-by: explosion-bot <explosion-bot@users.noreply.github.com> Co-authored-by: svlandeg <svlandeg@github.com>
24,357
0
69
112
31
111,194
42
spaCy
22
spacy/ml/models/multi_task.py
Python
9
{ "docstring": "Compute a loss based on a number of characters predicted from the docs.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 12 }
https://github.com/explosion/spaCy.git
1
test_k_fold_cv
def test_k_fold_cv(): boston = load_boston() clf = make_pipeline( OneHotEncoder( categorical_features='auto', sparse=False, minimum_fraction=0.05 ), LinearRegression() ) cross_val_score(clf, boston.data, boston.target, cv=KFold(n_splits=10, shuffle=True))
388616b6247ca4ea8de4e2f340d6206aee523541
12
one_hot_encoder_tests.py
92
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
43,446
0
89
60
19
181,658
20
tpot
17
tests/one_hot_encoder_tests.py
Python
11
{ "docstring": "Test OneHotEncoder with categorical_features='auto'.", "language": "en", "n_whitespaces": 3, "n_words": 4, "vocab_size": 4 }
https://github.com/EpistasisLab/tpot.git
1
_create_mock_app_session
def _create_mock_app_session(*args, **kwargs): mock_id = mock.PropertyMock( return_value="mock_id:%s" % ServerTestCase._next_session_id ) ServerTestCase._next_session_id += 1 mock_session = mock.MagicMock(AppSession, autospec=True, *args, **kwargs) type(mock_session).id = mock_id return mock_session
704eab3478cf69847825b23dabf15813a8ac9fa2
11
server_test_case.py
93
Rename and refactor `Report` machinery (#4141) This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app".
26,306
0
84
57
19
118,583
24
streamlit
15
lib/tests/server_test_case.py
Python
8
{ "docstring": "Create a mock AppSession. Each mocked instance will have\n its own unique ID.", "language": "en", "n_whitespaces": 19, "n_words": 13, "vocab_size": 13 }
https://github.com/streamlit/streamlit.git
5
_check_folder
def _check_folder(self) -> None: err = None if not self._faces_dir: err = "ERROR: Output faces folder not provided." elif not os.path.isdir(self._faces_dir): logger.debug("Creating folder: '%s'", self._faces_dir) os.makedirs(self._faces_dir) elif os.listdir(self._faces_dir): err = f"ERROR: Output faces folder should be empty: '{self._faces_dir}'" if err: logger.error(err) sys.exit(0) logger.verbose("Creating output folder at '%s'", self._faces_dir)
a9908b46f77dc66ac7efe7100ea0eed4b1f2b460
12
jobs.py
161
Alignments tool - Replace 'extract-large' with 'min-size'
20,125
0
163
90
34
100,667
48
faceswap
15
tools/alignments/jobs.py
Python
14
{ "docstring": " Check that the faces folder doesn't pre-exist and create. ", "language": "en", "n_whitespaces": 10, "n_words": 9, "vocab_size": 9 }
https://github.com/deepfakes/faceswap.git
1
stable_semver
def stable_semver(): from packaging.version import Version return Version(__version__).base_version
1d82b8822120db088bfeb6c8eae7ec8df9703783
8
version.py
32
Updated distutils.Version to packaging.Version
62,618
0
17
18
8
230,977
8
plotly.py
6
packages/python/plotly/plotly/version.py
Python
3
{ "docstring": "\n Get the stable portion of the semantic version string (the first three\n numbers), without any of the trailing labels\n\n '3.0.0rc11' -> '3.0.0'\n ", "language": "en", "n_whitespaces": 35, "n_words": 22, "vocab_size": 19 }
https://github.com/plotly/plotly.py.git
1
test_column_feature_type_mismatch_fill
def test_column_feature_type_mismatch_fill(): cat_feat = category_feature() bin_feat = binary_feature() input_features = [cat_feat] output_features = [bin_feat] config = {"input_features": input_features, "output_features": output_features} # Construct dataframe with int-like column representing a categorical feature df = pd.DataFrame( { cat_feat[NAME]: pd.Series(pd.array([None] + [1] * 24, dtype=pd.Int64Dtype())), bin_feat[NAME]: pd.Series([True] * 25), } ) # run preprocessing backend = LocalTestBackend() ludwig_model = LudwigModel(config, backend=backend) train_ds, val_ds, test_ds, _ = ludwig_model.preprocess(dataset=df) @pytest.mark.parametrize("format", ["file", "df"])
1e6dbeff57fc5065b97dd018b904b9907468676f
@pytest.mark.parametrize("format", ["file", "df"])
17
test_preprocessing.py
230
Treat dataset columns as object dtype during first pass of handle_missing_values (#2398)
1,273
1
140
125
56
7,805
66
ludwig
29
tests/integration_tests/test_preprocessing.py
Python
15
{ "docstring": "Tests that we are able to fill missing values even in columns where the column dtype and desired feature\n dtype do not match.", "language": "en", "n_whitespaces": 25, "n_words": 23, "vocab_size": 22 }
https://github.com/ludwig-ai/ludwig.git
1
test_xsum_summarization_same_as_fairseq
def test_xsum_summarization_same_as_fairseq(self): model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-xsum").to(torch_device) tok = self.default_tokenizer PGE_ARTICLE = EXPECTED_SUMMARY = ( "California's largest power company has begun shutting off electricity to thousands of customers in the" " state." ) dct = tok.batch_encode_plus( [PGE_ARTICLE], max_length=1024, padding="max_length", truncation=True, return_tensors="pt", ).to(torch_device) hypotheses_batch = model.generate( input_ids=dct["input_ids"], attention_mask=dct["attention_mask"], num_beams=2, max_length=62, min_length=11, length_penalty=1.0, no_repeat_ngram_size=3, early_stopping=True, decoder_start_token_id=model.config.eos_token_id, ) decoded = tok.batch_decode( hypotheses_batch, skip_special_tokens=True, ) self.assertEqual(EXPECTED_SUMMARY, decoded[0])
afe5d42d8d1d80af911ed980c2936bfe887078f6
12
test_modeling_bart.py
220
Black preview (#17217) * Black preview * Fixup too! * Fix check copies * Use the same version as the CI * Bump black
6,963
0
343
143
53
38,308
61
transformers
33
tests/models/bart/test_modeling_bart.py
Python
31
{ "docstring": " PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.", "language": "en", "n_whitespaces": 47, "n_words": 47, "vocab_size": 40 }
https://github.com/huggingface/transformers.git
2
test_get_nodes_for_order_with_uuid_id
def test_get_nodes_for_order_with_uuid_id(order_list): # given global_ids = [to_global_id("Order", order.pk) for order in order_list] # Make sure function works even if duplicated ids are provided global_ids.append(to_global_id("Order", order_list[0].pk)) # when orders = get_nodes(global_ids, Order) # then assert orders == order_list
41b87559118f560c223f83d405efe9b406701d17
11
test_graphql.py
86
Migrate order id from int to UUID (#9324) * Add migration to change order id from int to UUID (#9281) * Change order token to uuid * Migrate order id to uuid * Fix failing tests * Apply code review suggestions * Fix payment migration dependencies * Fix typo in order migration name * Handle old order ids for order queries * Hanlde old order ids for order mutations * Add order relation to GiftCardEvent model * Deprecate order token related queries and fields (#9295) * Deprecate order.token field * Update description of orderByToken query * Update prepare_order_search_document_value method * Update changelog * Update schema file
4,964
0
64
50
32
26,316
37
saleor
10
saleor/graphql/core/tests/test_graphql.py
Python
5
{ "docstring": "Ensure that `get_nodes` returns correct nodes, when the new uuid order id\n is used.", "language": "en", "n_whitespaces": 16, "n_words": 14, "vocab_size": 14 }
https://github.com/saleor/saleor.git
4
string_to_tokentype
def string_to_tokentype(s): if isinstance(s, _TokenType): return s if not s: return Token node = Token for item in s.split('.'): node = getattr(node, item) return node # Map standard token types to short names, used in CSS class naming. # If you add a new item, please be sure to run this file to perform # a consistency check for duplicate values. STANDARD_TYPES = { Token: '', Text: '', Whitespace: 'w', Escape: 'esc', Error: 'err', Other: 'x', Keyword: 'k', Keyword.Constant: 'kc', Keyword.Declaration: 'kd', Keyword.Namespace: 'kn', Keyword.Pseudo: 'kp', Keyword.Reserved: 'kr', Keyword.Type: 'kt', Name: 'n', Name.Attribute: 'na', Name.Builtin: 'nb', Name.Builtin.Pseudo: 'bp', Name.Class: 'nc', Name.Constant: 'no', Name.Decorator: 'nd', Name.Entity: 'ni', Name.Exception: 'ne', Name.Function: 'nf', Name.Function.Magic: 'fm', Name.Property: 'py', Name.Label: 'nl', Name.Namespace: 'nn', Name.Other: 'nx', Name.Tag: 'nt', Name.Variable: 'nv', Name.Variable.Class: 'vc', Name.Variable.Global: 'vg', Name.Variable.Instance: 'vi', Name.Variable.Magic: 'vm', Literal: 'l', Literal.Date: 'ld', String: 's', String.Affix: 'sa', String.Backtick: 'sb', String.Char: 'sc', String.Delimiter: 'dl', String.Doc: 'sd', String.Double: 's2', String.Escape: 'se', String.Heredoc: 'sh', String.Interpol: 'si', String.Other: 'sx', String.Regex: 'sr', String.Single: 's1', String.Symbol: 'ss', Number: 'm', Number.Bin: 'mb', Number.Float: 'mf', Number.Hex: 'mh', Number.Integer: 'mi', Number.Integer.Long: 'il', Number.Oct: 'mo', Operator: 'o', Operator.Word: 'ow', Punctuation: 'p', Comment: 'c', Comment.Hashbang: 'ch', Comment.Multiline: 'cm', Comment.Preproc: 'cp', Comment.PreprocFile: 'cpf', Comment.Single: 'c1', Comment.Special: 'cs', Generic: 'g', Generic.Deleted: 'gd', Generic.Emph: 'ge', Generic.Error: 'gr', Generic.Heading: 'gh', Generic.Inserted: 'gi', Generic.Output: 'go', Generic.Prompt: 'gp', Generic.Strong: 'gs', Generic.Subheading: 'gu', Generic.Traceback: 'gt', }
f3166e673fe8d40277b804d35d77dcdb760fc3b3
10
token.py
841
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
3,398
0
1,759
45
205
20,500
221
pipenv
77
pipenv/patched/notpip/_vendor/pygments/token.py
Python
9
{ "docstring": "\n Convert a string into a token type::\n\n >>> string_to_token('String.Double')\n Token.Literal.String.Double\n >>> string_to_token('Token.Literal.Number')\n Token.Literal.Number\n >>> string_to_token('')\n Token\n\n Tokens that are already tokens are returned unchanged:\n\n >>> string_to_token(String)\n Token.Literal.String\n ", "language": "en", "n_whitespaces": 93, "n_words": 27, "vocab_size": 22 }
https://github.com/pypa/pipenv.git
1
get_emp_list
def get_emp_list(sal_struct, cond, end_date, payroll_payable_account): return frappe.db.sql( % cond, { "sal_struct": tuple(sal_struct), "from_date": end_date, "payroll_payable_account": payroll_payable_account, }, as_dict=True, )
494bd9ef78313436f0424b918f200dab8fc7c20b
11
payroll_entry.py
70
style: format code with black
14,375
0
9
45
17
66,909
19
erpnext
10
erpnext/payroll/doctype/payroll_entry/payroll_entry.py
Python
21
{ "docstring": "\n\t\t\tselect\n\t\t\t\tdistinct t1.name as employee, t1.employee_name, t1.department, t1.designation\n\t\t\tfrom\n\t\t\t\t`tabEmployee` t1, `tabSalary Structure Assignment` t2\n\t\t\twhere\n\t\t\t\tt1.name = t2.employee\n\t\t\t\tand t2.docstatus = 1\n\t\t\t\tand t1.status != 'Inactive'\n\t\t%s order by t2.from_date desc\n\t\t", "language": "en", "n_whitespaces": 23, "n_words": 32, "vocab_size": 29 }
https://github.com/frappe/erpnext.git
1
match_tree_files
def match_tree_files(self, root, on_error=None, follow_links=None): files = util.iter_tree_files(root, on_error=on_error, follow_links=follow_links) return self.match_files(files) # Alias `match_tree_files()` as `match_tree()`. match_tree = match_tree_files
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
9
pathspec.py
65
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,176
0
47
39
19
130,251
20
ray
10
python/ray/_private/thirdparty/pathspec/pathspec.py
Python
3
{ "docstring": "\n Walks the specified root path for all files and matches them to this\n path-spec.\n\n *root* (:class:`str`; or :class:`pathlib.PurePath`) is the root\n directory to search for files.\n\n *on_error* (:class:`~collections.abc.Callable` or :data:`None`)\n optionally is the error handler for file-system exceptions. See\n :func:`~pathspec.util.iter_tree_files` for more information.\n\n *follow_links* (:class:`bool` or :data:`None`) optionally is whether\n to walk symbolic links that resolve to directories. See\n :func:`~pathspec.util.iter_tree_files` for more information.\n\n Returns the matched files (:class:`~collections.abc.Iterable` of\n :class:`str`).\n ", "language": "en", "n_whitespaces": 162, "n_words": 70, "vocab_size": 48 }
https://github.com/ray-project/ray.git
13
cycle
def cycle(parser, token): # Note: This returns the exact same node on each {% cycle name %} call; # that is, the node object returned from {% cycle a b c as name %} and the # one returned from {% cycle name %} are the exact same object. This # shouldn't cause problems (heh), but if it does, now you know. # # Ugly hack warning: This stuffs the named template dict into parser so # that names are only unique within each template (as opposed to using # a global variable, which would make cycle names have to be unique across # *all* templates. # # It keeps the last node in the parser to be able to reset it with # {% resetcycle %}. args = token.split_contents() if len(args) < 2: raise TemplateSyntaxError("'cycle' tag requires at least two arguments") if len(args) == 2: # {% cycle foo %} case. name = args[1] if not hasattr(parser, "_named_cycle_nodes"): raise TemplateSyntaxError( "No named cycles in template. '%s' is not defined" % name ) if name not in parser._named_cycle_nodes: raise TemplateSyntaxError("Named cycle '%s' does not exist" % name) return parser._named_cycle_nodes[name] as_form = False if len(args) > 4: # {% cycle ... as foo [silent] %} case. if args[-3] == "as": if args[-1] != "silent": raise TemplateSyntaxError( "Only 'silent' flag is allowed after cycle's name, not '%s'." % args[-1] ) as_form = True silent = True args = args[:-1] elif args[-2] == "as": as_form = True silent = False if as_form: name = args[-1] values = [parser.compile_filter(arg) for arg in args[1:-2]] node = CycleNode(values, name, silent=silent) if not hasattr(parser, "_named_cycle_nodes"): parser._named_cycle_nodes = {} parser._named_cycle_nodes[name] = node else: values = [parser.compile_filter(arg) for arg in args[1:]] node = CycleNode(values) parser._last_cycle_node = node return node @register.tag
9c19aff7c7561e3a82978a272ecdaad40dda5c00
@register.tag
16
defaulttags.py
428
Refs #33476 -- Reformatted code with Black.
51,453
1
661
247
155
206,265
291
django
20
django/template/defaulttags.py
Python
39
{ "docstring": "\n Cycle among the given strings each time this tag is encountered.\n\n Within a loop, cycles among the given strings each time through\n the loop::\n\n {% for o in some_list %}\n <tr class=\"{% cycle 'row1' 'row2' %}\">\n ...\n </tr>\n {% endfor %}\n\n Outside of a loop, give the values a unique name the first time you call\n it, then use that name each successive time through::\n\n <tr class=\"{% cycle 'row1' 'row2' 'row3' as rowcolors %}\">...</tr>\n <tr class=\"{% cycle rowcolors %}\">...</tr>\n <tr class=\"{% cycle rowcolors %}\">...</tr>\n\n You can use any number of values, separated by spaces. Commas can also\n be used to separate values; if a comma is used, the cycle values are\n interpreted as literal strings.\n\n The optional flag \"silent\" can be used to prevent the cycle declaration\n from returning any value::\n\n {% for o in some_list %}\n {% cycle 'row1' 'row2' as rowcolors silent %}\n <tr class=\"{{ rowcolors }}\">{% include \"subtemplate.html \" %}</tr>\n {% endfor %}\n ", "language": "en", "n_whitespaces": 310, "n_words": 156, "vocab_size": 89 }
https://github.com/django/django.git
2
cmpfiles
def cmpfiles(a, b, common, shallow=True): res = ([], [], []) for x in common: ax = os.path.join(a, x) bx = os.path.join(b, x) res[_cmp(ax, bx, shallow)].append(x) return res # Compare two files. # Return: # 0 for equal # 1 for different # 2 for funny cases (can't stat, etc.) #
8198943edd73a363c266633e1aa5b2a9e9c9f526
12
filecmp.py
115
add python 3.10.4 for windows
54,725
0
95
73
38
217,342
50
XX-Net
14
python3.10.4/Lib/filecmp.py
Python
7
{ "docstring": "Compare common files in two directories.\n\n a, b -- directory names\n common -- list of file names found in both directories\n shallow -- if true, do comparison based solely on stat() information\n\n Returns a tuple of three lists:\n files that compare equal\n files that are different\n filenames that aren't regular files.\n\n ", "language": "en", "n_whitespaces": 81, "n_words": 51, "vocab_size": 41 }
https://github.com/XX-net/XX-Net.git
5
_set_partitions
def _set_partitions(n): p = [0]*n q = [0]*n nc = 1 yield nc, q while nc != n: m = n while 1: m -= 1 i = q[m] if p[i] != 1: break q[m] = 0 i += 1 q[m] = i m += 1 nc += m - n p[0] += n - m if i == nc: p[nc] = 0 nc += 1 p[i - 1] -= 1 p[i] += 1 yield nc, q
f3b08522003f40868afb20304fc0fa5b16d13f6a
11
iterables.py
191
Cleanup documentation
48,426
0
253
119
31
197,279
77
sympy
7
sympy/utilities/iterables.py
Python
24
{ "docstring": "Cycle through all partions of n elements, yielding the\n current number of partitions, ``m``, and a mutable list, ``q``\n such that ``element[i]`` is in part ``q[i]`` of the partition.\n\n NOTE: ``q`` is modified in place and generally should not be changed\n between function calls.\n\n Examples\n ========\n\n >>> from sympy.utilities.iterables import _set_partitions, _partition\n >>> for m, q in _set_partitions(3):\n ... print('%s %s %s' % (m, q, _partition('abc', q, m)))\n 1 [0, 0, 0] [['a', 'b', 'c']]\n 2 [0, 0, 1] [['a', 'b'], ['c']]\n 2 [0, 1, 0] [['a', 'c'], ['b']]\n 2 [0, 1, 1] [['a'], ['b', 'c']]\n 3 [0, 1, 2] [['a'], ['b'], ['c']]\n\n Notes\n =====\n\n This algorithm is similar to, and solves the same problem as,\n Algorithm 7.2.1.5H, from volume 4A of Knuth's The Art of Computer\n Programming. Knuth uses the term \"restricted growth string\" where\n this code refers to a \"partition vector\". In each case, the meaning is\n the same: the value in the ith element of the vector specifies to\n which part the ith set element is to be assigned.\n\n At the lowest level, this code implements an n-digit big-endian\n counter (stored in the array q) which is incremented (with carries) to\n get the next partition in the sequence. A special twist is that a\n digit is constrained to be at most one greater than the maximum of all\n the digits to the left of it. The array p maintains this maximum, so\n that the code can efficiently decide when a digit can be incremented\n in place or whether it needs to be reset to 0 and trigger a carry to\n the next digit. The enumeration starts with all the digits 0 (which\n corresponds to all the set elements being assigned to the same 0th\n part), and ends with 0123...n, which corresponds to each set element\n being assigned to a different, singleton, part.\n\n This routine was rewritten to use 0-based lists while trying to\n preserve the beauty and efficiency of the original algorithm.\n\n References\n ==========\n\n .. [1] Nijenhuis, Albert and Wilf, Herbert. (1978) Combinatorial Algorithms,\n 2nd Ed, p 91, algorithm \"nexequ\". Available online from\n https://www.math.upenn.edu/~wilf/website/CombAlgDownld.html (viewed\n November 17, 2012).\n\n ", "language": "en", "n_whitespaces": 497, "n_words": 351, "vocab_size": 221 }
https://github.com/sympy/sympy.git
1
standard_b64decode
def standard_b64decode(s): return b64decode(s) _urlsafe_encode_translation = bytes.maketrans(b'+/', b'-_') _urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/')
8198943edd73a363c266633e1aa5b2a9e9c9f526
7
base64.py
59
add python 3.10.4 for windows
56,186
0
16
11
11
221,074
12
XX-Net
7
python3.10.4/Lib/base64.py
Python
2
{ "docstring": "Decode bytes encoded with the standard Base64 alphabet.\n\n Argument s is a bytes-like object or ASCII string to decode. The result\n is returned as a bytes object. A binascii.Error is raised if the input\n is incorrectly padded. Characters that are not in the standard alphabet\n are discarded prior to the padding check.\n ", "language": "en", "n_whitespaces": 70, "n_words": 52, "vocab_size": 41 }
https://github.com/XX-net/XX-Net.git
2
__repr__
def __repr__(self): if self._other is _SENTINEL: return "<ANY>" return repr(self._other) ANY = _HA_ANY()
3884e16b46f438e4a13bcab4fe0df3b2188de767
8
common.py
49
Add tests.common.ANY (#84240)
96,599
0
44
22
12
297,632
13
core
7
tests/common.py
Python
4
{ "docstring": "Return repr() other to not show up in pytest quality diffs.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/home-assistant/core.git
3
_set_execing_flag
def _set_execing_flag(self): if self._execing: raise RuntimeError("Nested set_execing_flag call") self._execing = True try: yield finally: self._execing = False
2a0ec52386e6d1594aa5cfe39bef2bfb89a59416
10
script_runner.py
58
Saner "execution control request" handling (#4383) ### Background Currently, a `ForwardMsg` enqueued from an `st.foo()` call results in a twisty path: 1. `st.foo()` retrieves the current `ScriptRunContext` and calls its `enqueue` function 1. `ScriptRunContext.enqueue` calls the `AppSession.enqueue` callback 2. `AppSession.enqueue` retrieves the current `ScriptRunner` and calls its `maybe_handle_execution_control_request` function 3. `ScriptRunner.maybe_handle_execution_control_request` optionally interrupts its own script's execution to respond to a STOP or RERUN request. (In other words, the message results in this call chain: ScriptRunContext -> AppSession -> ScriptRunner.) This is weird for a couple reasons: 1. `ScriptRunner.maybe_handle_execution_control_request` should be private. The circumstances in which it's legal to be called are very specific, and `AppSession.enqueue` currently has some ScriptRunner "business logic" inside it. 2. The call chain should really be ScriptRunContext -> ScriptRunner -> AppSession. (AppSession creates ScriptRunner which in turn creates ScriptRunContext. After creation, AppSession should ideally never have to communicate directly with its ScriptRunner - especially in a future where there may be multiple ScriptRunners attached to a single AppSession.) ### Resolution With this PR, `maybe_handle_execution_control_request` is a private method, and an enqueued `ForwardMsg` now "bubbles up" from ScriptRunContext -> ScriptRunner -> AppSession. AppSession no longer knows about execution control requests; they are handled internally by ScriptRunner. (The salient change is that ScriptRunner gets a new `_enqueue` function that is called by `ScriptRunContext.enqueue`, rather than `ScriptRunContext.enqueue` calling `AppSession.enqueue` directly. The execution control request handling happens inside this new `ScriptRunner.enqueue` function, which then calls up to `AppSession.enqueue`.)
26,433
0
85
31
15
118,799
17
streamlit
4
lib/streamlit/script_runner.py
Python
8
{ "docstring": "A context for setting the ScriptRunner._execing flag.\n\n Used by _maybe_handle_execution_control_request to ensure that\n we only handle requests while we're inside an exec() call\n ", "language": "en", "n_whitespaces": 44, "n_words": 23, "vocab_size": 23 }
https://github.com/streamlit/streamlit.git
2
column_sql
def column_sql(self, model, field, include_default=False): # Get the column's type and use that as the basis of the SQL. db_params = field.db_parameters(connection=self.connection) column_db_type = db_params["type"] # Check for fields that aren't actually columns (e.g. M2M). if column_db_type is None: return None, None params = [] return ( " ".join( # This appends to the params being returned. self._iter_column_sql( column_db_type, params, model, field, include_default ) ), params, )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
10
schema.py
111
Refs #33476 -- Reformatted code with Black.
50,974
0
238
70
52
204,917
67
django
12
django/db/backends/base/schema.py
Python
14
{ "docstring": "\n Return the column definition for a field. The field must already have\n had set_attributes_from_name() called.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 15 }
https://github.com/django/django.git
2
_set_output
def _set_output(self): now = datetime.now().strftime("%Y%m%d_%H%M%S") folder_name = f"drawn_landmarks_{now}" if self._frames.is_video: dest_folder = os.path.dirname(self._frames.folder) else: dest_folder = self._frames.folder output_folder = os.path.join(dest_folder, folder_name) logger.debug("Creating folder: '%s'", output_folder) os.makedirs(output_folder) return output_folder
a9908b46f77dc66ac7efe7100ea0eed4b1f2b460
12
jobs.py
139
Alignments tool - Replace 'extract-large' with 'min-size'
20,126
0
113
80
22
100,668
28
faceswap
18
tools/alignments/jobs.py
Python
11
{ "docstring": " Set the output folder path.\n\n If annotating a folder of frames, output will be placed in a sub folder within the frames\n folder. If annotating a video, output will be a folder next to the original video.\n\n Returns\n -------\n str\n Full path to the output folder\n\n ", "language": "en", "n_whitespaces": 100, "n_words": 46, "vocab_size": 28 }
https://github.com/deepfakes/faceswap.git
1
test_gymnasium_old_api_but_wrapped
def test_gymnasium_old_api_but_wrapped(self): from gymnasium.wrappers import EnvCompatibility register_env( "test", lambda env_ctx: EnvCompatibility(GymnasiumOldAPI(env_ctx)), ) algo = ( PPOConfig() .environment(env="test") .rollouts(num_envs_per_worker=2, num_rollout_workers=2) .build() ) algo.train() algo.stop()
8e680c483ce326cefc62e44f68ab1a6948b1c3d2
15
test_gym_env_apis.py
115
[RLlib] gymnasium support (new `Env.reset()/step()/seed()/render()` APIs). (#28369)
31,268
0
145
67
22
137,985
23
ray
18
rllib/tests/backward_compat/test_gym_env_apis.py
Python
14
{ "docstring": "Tests a gymnasium Env that uses the old API, but is correctly wrapped.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
https://github.com/ray-project/ray.git
1
test_stacking_classifier_base_regressor
def test_stacking_classifier_base_regressor(): X_train, X_test, y_train, y_test = train_test_split( scale(X_iris), y_iris, stratify=y_iris, random_state=42 ) clf = StackingClassifier(estimators=[("ridge", Ridge())]) clf.fit(X_train, y_train) clf.predict(X_test) clf.predict_proba(X_test) assert clf.score(X_test, y_test) > 0.8
b1807ff8ead319a08294beeaae90c3f03b2bb8ac
13
test_stacking.py
121
ENH StackingClassifier allows regressors in its first layer (#24538) Co-authored-by: Tom Dupré la Tour <tom.duprelatour.10@gmail.com> Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com>
76,836
0
57
79
25
261,492
26
scikit-learn
19
sklearn/ensemble/tests/test_stacking.py
Python
9
{ "docstring": "Check that a regressor can be used as the first layer in `StackingClassifier`.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
https://github.com/scikit-learn/scikit-learn.git
5
error
def error(self, flow): if ( self.not_in_timeout(self.last_trigger, self.timeout) and flow.error is not None and not isinstance(flow.error, HttpSyntaxException) ): self.last_trigger = time.time() logger.error(f"Watchdog triggered! Cause: {flow}") self.error_event.set() # save the request which might have caused the problem if flow.request: with (self.flow_dir / f"{datetime.utcnow().isoformat()}.curl").open( "w" ) as f: f.write(curl_command(flow)) with (self.flow_dir / f"{datetime.utcnow().isoformat()}.raw").open( "wb" ) as f: f.write(raw(flow))
8c2428c9d355ca5fbc3dd90e9820ceb1cc795837
22
watchdog.py
246
[autofix.ci] apply automated fixes
74,071
0
300
121
45
253,414
55
mitmproxy
22
examples/contrib/webscanner_helper/watchdog.py
Python
18
{ "docstring": "Checks if the watchdog will be triggered.\n\n Only triggers watchdog for timeouts after last reset and if flow.error is set (shows that error is a server\n error). Ignores HttpSyntaxException Errors since this can be triggered on purpose by web application scanner.\n\n Args:\n flow: mitmproxy.http.flow\n ", "language": "en", "n_whitespaces": 83, "n_words": 44, "vocab_size": 40 }
https://github.com/mitmproxy/mitmproxy.git
7
device_class
def device_class(self) -> CoverDeviceClass: if isinstance(self.node, Awning): return CoverDeviceClass.AWNING if isinstance(self.node, Blind): return CoverDeviceClass.BLIND if isinstance(self.node, GarageDoor): return CoverDeviceClass.GARAGE if isinstance(self.node, Gate): return CoverDeviceClass.GATE if isinstance(self.node, RollerShutter): return CoverDeviceClass.SHUTTER if isinstance(self.node, Window): return CoverDeviceClass.WINDOW return CoverDeviceClass.WINDOW
10dc38e0ec27f7bef990ee431459342f9c3c52b4
8
cover.py
148
Adjust CoverEntity property type hints in components (#73943) * Adjust CoverEntity property type hints in components * Revert changes to rflink * Revert changes to wilight
113,101
0
158
96
19
314,495
36
core
17
homeassistant/components/velux/cover.py
Python
15
{ "docstring": "Define this cover as either awning, blind, garage, gate, shutter or window.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
https://github.com/home-assistant/core.git
6
validate_syllables
def validate_syllables(self, syllable_list): valid_syllables = [] front = "" vowel_pattern = re.compile("|".join(self.vowels)) for i, syllable in enumerate(syllable_list): if syllable in punctuation: valid_syllables.append(syllable) continue if not vowel_pattern.search(syllable): if len(valid_syllables) == 0: front += syllable else: valid_syllables = valid_syllables[:-1] + [ valid_syllables[-1] + syllable ] else: if len(valid_syllables) == 0: valid_syllables.append(front + syllable) else: valid_syllables.append(syllable) return valid_syllables
ea006df9885bd1e5f439cd05359d047d83276b73
18
sonority_sequencing.py
202
Greatly improve SyllableTokenizer time performance on some edge cases Mainly when tokenizing numbers
7,584
0
338
120
34
42,516
55
nltk
17
nltk/tokenize/sonority_sequencing.py
Python
21
{ "docstring": "\n Ensures each syllable has at least one vowel.\n If the following syllable doesn't have vowel, add it to the current one.\n\n :param syllable_list: Single word or token broken up into syllables.\n :type syllable_list: list(str)\n :return: Single word or token broken up into syllables\n (with added syllables if necessary)\n :rtype: list(str)\n ", "language": "en", "n_whitespaces": 116, "n_words": 50, "vocab_size": 38 }
https://github.com/nltk/nltk.git
1
test_check_dataframe_with_only_boolean
def test_check_dataframe_with_only_boolean(): pd = importorskip("pandas", minversion="1.0") df = pd.DataFrame({"bool": pd.Series([True, False, True], dtype="boolean")}) array = check_array(df, dtype=None) assert array.dtype == np.float64 assert_array_equal(array, [[True], [False], [True]])
8d7935827d01d95239710c9c519bb1905f46b645
14
test_validation.py
126
ENH check_array returns numeric array w/ mixed typed dataframes (#22237) Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com>
75,889
0
43
78
23
259,742
25
scikit-learn
13
sklearn/utils/tests/test_validation.py
Python
6
{ "docstring": "Check that dataframe with boolean return a float array with dtype=None", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
https://github.com/scikit-learn/scikit-learn.git
5
get_used_alternative_items
def get_used_alternative_items(purchase_order=None, work_order=None): cond = "" if purchase_order: cond = "and ste.purpose = 'Send to Subcontractor' and ste.purchase_order = '{0}'".format( purchase_order ) elif work_order: cond = "and ste.purpose = 'Material Transfer for Manufacture' and ste.work_order = '{0}'".format( work_order ) if not cond: return {} used_alternative_items = {} data = frappe.db.sql( .format( cond ), as_dict=1, ) for d in data: used_alternative_items[d.original_item] = d return used_alternative_items
494bd9ef78313436f0424b918f200dab8fc7c20b
12
stock_entry.py
137
style: format code with black
14,612
0
42
82
40
67,755
64
erpnext
13
erpnext/stock/doctype/stock_entry/stock_entry.py
Python
28
{ "docstring": " select sted.original_item, sted.uom, sted.conversion_factor,\n\t\t\tsted.item_code, sted.item_name, sted.conversion_factor,sted.stock_uom, sted.description\n\t\tfrom\n\t\t\t`tabStock Entry` ste, `tabStock Entry Detail` sted\n\t\twhere\n\t\t\tsted.parent = ste.name and ste.docstatus = 1 and sted.original_item != sted.item_code\n\t\t\t{0} ", "language": "en", "n_whitespaces": 25, "n_words": 29, "vocab_size": 26 }
https://github.com/frappe/erpnext.git
5
generate_schema
def generate_schema(route, viewset=None, view=None, view_function=None, patterns=None): from django.urls import path from drf_spectacular.generators import SchemaGenerator from rest_framework import routers from rest_framework.viewsets import ViewSetMixin if viewset: assert issubclass(viewset, ViewSetMixin) router = routers.SimpleRouter() router.register(route, viewset, basename=route) patterns = router.urls elif view: patterns = [path(route, view.as_view())] elif view_function: patterns = [path(route, view_function)] else: assert route is None and isinstance(patterns, list) generator = SchemaGenerator(patterns=patterns) schema = generator.get_schema(request=None, public=True) validate_schema(schema) # make sure generated schemas are always valid return schema
4319707cbd0d7783596fd4a4b7e707d55f97f629
14
__init__.py
231
feat(apidocs): Added the framework for schema tests (#38422) The functions shown, gives us a way to test any changes we make to SentrySchema
17,986
0
163
149
57
85,409
74
sentry
30
tests/sentry/apidocs/__init__.py
Python
20
{ "docstring": "\n Taken from drf_spectacular tests\n https://github.com/tfranzel/drf-spectacular/blob/590a2f7f053fbe83446aa453cb4d4a3025410609/tests/__init__.py#L64\n ", "language": "en", "n_whitespaces": 15, "n_words": 5, "vocab_size": 5 }
https://github.com/getsentry/sentry.git
1
mock_4x4x4_devices
def mock_4x4x4_devices(one_device_per_chip): return mock_devices(4, 4, 4, 'TPU v4', one_device_per_chip)
3f9e45e0c5b035de27b14588cd3b4cfd5f3c1f04
8
mesh_utils_test.py
31
[mesh_utils] Support creating device meshes for hybrid networks Also makes some NFCs to other mesh_utils code. PiperOrigin-RevId: 442581767
26,800
0
11
19
8
120,213
9
jax
3
tests/mesh_utils_test.py
Python
2
{ "docstring": "Hard-coded reproduction of jax.devices() output on 4x4x4.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
https://github.com/google/jax.git
36
categorize_changes
def categorize_changes(args, paths, verbose_command=None): # type: (TestConfig, t.List[str], t.Optional[str]) -> ChangeDescription mapper = PathMapper(args) commands = { 'sanity': set(), 'units': set(), 'integration': set(), 'windows-integration': set(), 'network-integration': set(), } # type: t.Dict[str, t.Set[str]] focused_commands = collections.defaultdict(set) deleted_paths = set() # type: t.Set[str] original_paths = set() # type: t.Set[str] additional_paths = set() # type: t.Set[str] no_integration_paths = set() # type: t.Set[str] for path in paths: if not os.path.exists(path): deleted_paths.add(path) continue original_paths.add(path) dependent_paths = mapper.get_dependent_paths(path) if not dependent_paths: continue display.info('Expanded "%s" to %d dependent file(s):' % (path, len(dependent_paths)), verbosity=2) for dependent_path in dependent_paths: display.info(dependent_path, verbosity=2) additional_paths.add(dependent_path) additional_paths -= set(paths) # don't count changed paths as additional paths if additional_paths: display.info('Expanded %d changed file(s) into %d additional dependent file(s).' % (len(paths), len(additional_paths))) paths = sorted(set(paths) | additional_paths) display.info('Mapping %d changed file(s) to tests.' % len(paths)) none_count = 0 for path in paths: tests = mapper.classify(path) if tests is None: focused_target = False display.info('%s -> all' % path, verbosity=1) tests = all_tests(args) # not categorized, run all tests display.warning('Path not categorized: %s' % path) else: focused_target = bool(tests.pop(FOCUSED_TARGET, None)) and path in original_paths tests = dict((key, value) for key, value in tests.items() if value) if focused_target and not any('integration' in command for command in tests): no_integration_paths.add(path) # path triggers no integration tests if verbose_command: result = '%s: %s' % (verbose_command, tests.get(verbose_command) or 'none') # identify targeted integration tests (those which only target a single integration command) if 'integration' in verbose_command and tests.get(verbose_command): if not any('integration' in command for command in tests if command != verbose_command): if focused_target: result += ' (focused)' result += ' (targeted)' else: result = '%s' % tests if not tests.get(verbose_command): # minimize excessive output from potentially thousands of files which do not trigger tests none_count += 1 verbosity = 2 else: verbosity = 1 if args.verbosity >= verbosity: display.info('%s -> %s' % (path, result), verbosity=1) for command, target in tests.items(): commands[command].add(target) if focused_target: focused_commands[command].add(target) if none_count > 0 and args.verbosity < 2: display.notice('Omitted %d file(s) that triggered no tests.' % none_count) for command, targets in commands.items(): targets.discard('none') if any(target == 'all' for target in targets): commands[command] = {'all'} sorted_commands = dict((c, sorted(targets)) for c, targets in commands.items() if targets) focused_commands = dict((c, sorted(targets)) for c, targets in focused_commands.items()) for command, targets in sorted_commands.items(): if targets == ['all']: sorted_commands[command] = [] # changes require testing all targets, do not filter targets changes = ChangeDescription() changes.command = verbose_command changes.changed_paths = sorted(original_paths) changes.deleted_paths = sorted(deleted_paths) changes.regular_command_targets = sorted_commands changes.focused_command_targets = focused_commands changes.no_integration_paths = sorted(no_integration_paths) return changes
a06fa496d3f837cca3c437ab6e9858525633d147
20
__init__.py
1,067
ansible-test - Code cleanup and refactoring. (#77169) * Remove unnecessary PyCharm ignores. * Ignore intentional undefined attribute usage. * Add missing type hints. Fix existing type hints. * Fix docstrings and comments. * Use function to register completion handler. * Pass strings to display functions. * Fix CompositeAction handling of dest argument. * Use consistent types in expressions/assignments. * Use custom function to keep linters happy. * Add missing raise for custom exception. * Clean up key/value type handling in cloud plugins. * Use dataclass instead of dict for results. * Add custom type_guard function to check lists. * Ignore return type that can't be checked (yet). * Avoid changing types on local variables.
78,529
0
1,141
637
206
266,714
412
ansible
55
test/lib/ansible_test/_internal/classification/__init__.py
Python
83
{ "docstring": "Categorize the given list of changed paths and return a description of the changes.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 12 }
https://github.com/ansible/ansible.git
1
test_grouped_annotation_in_group_by
def test_grouped_annotation_in_group_by(self): qs = ( Book.objects.annotate(xprice=F("price")) .filter(rating=4.0) .values("rating", "xprice") .annotate(count=Count("publisher_id", distinct=True)) .values("count", "rating") .order_by("count") ) self.assertEqual( list(qs), [ {"rating": 4.0, "count": 1}, {"rating": 4.0, "count": 2}, ], )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
23
tests.py
168
Refs #33476 -- Reformatted code with Black.
49,825
0
192
102
24
200,902
28
django
17
tests/aggregation/tests.py
Python
16
{ "docstring": "\n An annotation included in values() before an aggregate should be\n included in the group by clause.\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 14 }
https://github.com/django/django.git
7
get_admin_log
def get_admin_log(parser, token): tokens = token.contents.split() if len(tokens) < 4: raise template.TemplateSyntaxError( "'get_admin_log' statements require two arguments" ) if not tokens[1].isdigit(): raise template.TemplateSyntaxError( "First argument to 'get_admin_log' must be an integer" ) if tokens[2] != "as": raise template.TemplateSyntaxError( "Second argument to 'get_admin_log' must be 'as'" ) if len(tokens) > 4: if tokens[4] != "for_user": raise template.TemplateSyntaxError( "Fourth argument to 'get_admin_log' must be 'for_user'" ) return AdminLogNode( limit=tokens[1], varname=tokens[3], user=(tokens[5] if len(tokens) > 5 else None), )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
13
log.py
204
Refs #33476 -- Reformatted code with Black.
50,415
0
240
124
46
203,510
76
django
14
django/contrib/admin/templatetags/log.py
Python
24
{ "docstring": "\n Populate a template variable with the admin log for the given criteria.\n\n Usage::\n\n {% get_admin_log [limit] as [varname] for_user [context_var_containing_user_obj] %}\n\n Examples::\n\n {% get_admin_log 10 as admin_log for_user 23 %}\n {% get_admin_log 10 as admin_log for_user user %}\n {% get_admin_log 10 as admin_log %}\n\n Note that ``context_var_containing_user_obj`` can be a hard-coded integer\n (user ID) or the name of a template context variable containing the user\n object whose ID you want.\n ", "language": "en", "n_whitespaces": 120, "n_words": 70, "vocab_size": 44 }
https://github.com/django/django.git
3
binned
def binned(self) -> List[List[str]]: if not self._binned: self._binned = self._binning() logger.debug({f"bin_{idx}": len(bin_) for idx, bin_ in enumerate(self._binned)}) return self._binned
98d01760e469fd2108eed8d0b0a1ba6297c3177c
14
sort_methods.py
98
Overhaul sort: - Standardize image data reading and writing - Optimize loading (just one pass required) - Make all sort groups binnable (to greater or lesser results) - Add sort by pitch - Deprecate multiple options - linting, docs + locales
21,024
0
62
58
18
101,616
19
faceswap
12
tools/sort/sort_methods.py
Python
7
{ "docstring": " list: List of bins (list) containing the filenames belonging to the bin. The binning\n process is called when this property is first accessed", "language": "en", "n_whitespaces": 30, "n_words": 23, "vocab_size": 21 }
https://github.com/deepfakes/faceswap.git
11
_make_estimator
def _make_estimator(self, append=True, random_state=None): estimator = clone(self.base_estimator_) estimator.set_params(**{p: getattr(self, p) for p in self.estimator_params}) # TODO: Remove in v1.2 # criterion "mse" and "mae" would cause warnings in every call to # DecisionTreeRegressor.fit(..) if isinstance(estimator, (DecisionTreeRegressor, ExtraTreeRegressor)): if getattr(estimator, "criterion", None) == "mse": estimator.set_params(criterion="squared_error") elif getattr(estimator, "criterion", None) == "mae": estimator.set_params(criterion="absolute_error") # TODO(1.3): Remove # max_features = 'auto' would cause warnings in every call to # Tree.fit(..) if isinstance(estimator, BaseDecisionTree): if getattr(estimator, "max_features", None) == "auto": if isinstance(estimator, DecisionTreeClassifier): estimator.set_params(max_features="sqrt") elif isinstance(estimator, DecisionTreeRegressor): estimator.set_params(max_features=1.0) if random_state is not None: _set_random_states(estimator, random_state) if append: self.estimators_.append(estimator) return estimator
e5736afb316038c43301d2c53ce39f9a89b64495
15
_base.py
288
API Deprecate max_feature=`auto` for tree classes (#22476) Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com> Co-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>
75,735
0
347
175
62
259,364
96
scikit-learn
20
sklearn/ensemble/_base.py
Python
19
{ "docstring": "Make and configure a copy of the `base_estimator_` attribute.\n\n Warning: This method should be used to properly instantiate new\n sub-estimators.\n ", "language": "en", "n_whitespaces": 41, "n_words": 20, "vocab_size": 20 }
https://github.com/scikit-learn/scikit-learn.git
14
lattice_reference
def lattice_reference(G, niter=5, D=None, connectivity=True, seed=None): import numpy as np from networkx.utils import cumulative_distribution, discrete_sequence local_conn = nx.connectivity.local_edge_connectivity if len(G) < 4: raise nx.NetworkXError("Graph has less than four nodes.") # Instead of choosing uniformly at random from a generated edge list, # this algorithm chooses nonuniformly from the set of nodes with # probability weighted by degree. G = G.copy() keys, degrees = zip(*G.degree()) # keys, degree cdf = cumulative_distribution(degrees) # cdf of degree nnodes = len(G) nedges = nx.number_of_edges(G) if D is None: D = np.zeros((nnodes, nnodes)) un = np.arange(1, nnodes) um = np.arange(nnodes - 1, 0, -1) u = np.append((0,), np.where(un < um, un, um)) for v in range(int(np.ceil(nnodes / 2))): D[nnodes - v - 1, :] = np.append(u[v + 1 :], u[: v + 1]) D[v, :] = D[nnodes - v - 1, :][::-1] niter = niter * nedges # maximal number of rewiring attempts per 'niter' max_attempts = int(nnodes * nedges / (nnodes * (nnodes - 1) / 2)) for _ in range(niter): n = 0 while n < max_attempts: # pick two random edges without creating edge list # choose source node indices from discrete distribution (ai, ci) = discrete_sequence(2, cdistribution=cdf, seed=seed) if ai == ci: continue # same source, skip a = keys[ai] # convert index to label c = keys[ci] # choose target uniformly from neighbors b = seed.choice(list(G.neighbors(a))) d = seed.choice(list(G.neighbors(c))) bi = keys.index(b) di = keys.index(d) if b in [a, c, d] or d in [a, b, c]: continue # all vertices should be different # don't create parallel edges if (d not in G[a]) and (b not in G[c]): if D[ai, bi] + D[ci, di] >= D[ai, ci] + D[bi, di]: # only swap if we get closer to the diagonal G.add_edge(a, d) G.add_edge(c, b) G.remove_edge(a, b) G.remove_edge(c, d) # Check if the graph is still connected if connectivity and local_conn(G, a, b) == 0: # Not connected, revert the swap G.remove_edge(a, d) G.remove_edge(c, b) G.add_edge(a, b) G.add_edge(c, d) else: break n += 1 return G @py_random_state(3) @not_implemented_for("directed") @not_implemented_for("multigraph")
bd56814a8e9c988fc3d12801315226f249af1419
@py_random_state(3) @not_implemented_for("directed") @not_implemented_for("multigraph")
17
smallworld.py
829
Keep omega within [-1, 1] bounds (#5216) * Adjusted calculation of small-world metric omega to be within the correct bounds * Update docstring to reflect that there is no hard bound on omega * add release note * maint: rm print statements from tests. * Update release file per comment Co-authored-by: Dan Schult <dschult@colgate.edu> Co-authored-by: Ross Barnowski <rossbar@berkeley.edu>
41,846
1
976
516
216
176,340
339
networkx
57
networkx/algorithms/smallworld.py
Python
50
{ "docstring": "Latticize the given graph by swapping edges.\n\n Parameters\n ----------\n G : graph\n An undirected graph with 4 or more nodes.\n\n niter : integer (optional, default=1)\n An edge is rewired approximatively niter times.\n\n D : numpy.array (optional, default=None)\n Distance to the diagonal matrix.\n\n connectivity : boolean (optional, default=True)\n Ensure connectivity for the latticized graph when set to True.\n\n seed : integer, random_state, or None (default)\n Indicator of random number generation state.\n See :ref:`Randomness<randomness>`.\n\n Returns\n -------\n G : graph\n The latticized graph.\n\n Notes\n -----\n The implementation is adapted from the algorithm by Sporns et al. [1]_.\n which is inspired from the original work by Maslov and Sneppen(2002) [2]_.\n\n References\n ----------\n .. [1] Sporns, Olaf, and Jonathan D. Zwi.\n \"The small world of the cerebral cortex.\"\n Neuroinformatics 2.2 (2004): 145-162.\n .. [2] Maslov, Sergei, and Kim Sneppen.\n \"Specificity and stability in topology of protein networks.\"\n Science 296.5569 (2002): 910-913.\n ", "language": "en", "n_whitespaces": 276, "n_words": 146, "vocab_size": 110 }
https://github.com/networkx/networkx.git
1
transpose_output
def transpose_output(self, X): X = X.reshape(-1, self.num_heads, X.shape[1], X.shape[2]) X = X.transpose(0, 2, 1, 3) return X.reshape(X.shape[0], X.shape[1], -1)
f0be7e672bc0a7c77005d5c79452d796cfe1a06b
10
mxnet.py
103
Refactor Multihead Attn, Self Attn, and Transformer (#2096) * multihead attn * self attn and pos encoding * simplify * before EncoderBlock * before tmencoder * before decoder block * before training * transformer code * rm seq2seq encoder old * fix bahdanau attn map * transformer done, perf tuned * clean super
74,224
0
47
71
16
253,767
19
d2l-en
7
d2l/mxnet.py
Python
4
{ "docstring": "Reverse the operation of transpose_qkv.\n \n Defined in :numref:`sec_multihead-attention`", "language": "en", "n_whitespaces": 18, "n_words": 8, "vocab_size": 8 }
https://github.com/d2l-ai/d2l-en.git
1
test_decoder_model_past_with_large_inputs
def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
8635407bc724c45142c1f91dbc9ef3ea681e1a56
9
test_modeling_tf_{{cookiecutter.lowercase_modelname}}.py
43
Fix tf.concatenate + test past_key_values for TF models (#15774) * fix wrong method name tf.concatenate * add tests related to causal LM / decoder * make style and quality * clean-up * Fix TFBertModel's extended_attention_mask when past_key_values is provided * Fix tests * fix copies * More tf.int8 -> tf.int32 in TF test template * clean-up * Update TF test template * revert the previous commit + update the TF test template * Fix TF template extended_attention_mask when past_key_values is provided * Fix some styles manually * clean-up * Fix ValueError: too many values to unpack in the test * Fix more: too many values to unpack in the test * Add a comment for extended_attention_mask when there is past_key_values * Fix TFElectra extended_attention_mask when past_key_values is provided * Add tests to other TF models * Fix for TF Electra test: add prepare_config_and_inputs_for_decoder * Fix not passing training arg to lm_head in TFRobertaForCausalLM * Fix tests (with past) for TF Roberta * add testing for pask_key_values for TFElectra model Co-authored-by: ydshieh <ydshieh@users.noreply.github.com>
6,470
0
27
24
6
35,527
6
transformers
6
templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/test_modeling_tf_{{cookiecutter.lowercase_modelname}}.py
Python
3
{ "docstring": "Similar to `test_causal_lm_model_past_with_large_inputs` but with cross-attention", "language": "en", "n_whitespaces": 5, "n_words": 6, "vocab_size": 6 }
https://github.com/huggingface/transformers.git
11
get_dependencies_from_index
def get_dependencies_from_index(dep, sources=None, pip_options=None, wheel_cache=None): session, finder = get_finder(sources=sources, pip_options=pip_options) dep.is_direct = True requirements = None setup_requires = {} with temp_environ(), ExitStack() as stack: if not wheel_cache: wheel_cache = stack.enter_context(_get_wheel_cache()) os.environ["PIP_EXISTS_ACTION"] = "i" if dep.editable and not dep.prepared and not dep.req: setup_info = SetupInfo.from_ireq(dep) results = setup_info.get_info() setup_requires.update(results["setup_requires"]) requirements = set(results["requires"].values()) else: results = shims.resolve(dep) requirements = [v for v in results.values() if v.name != dep.name] requirements = set([format_requirement(r) for r in requirements]) if not dep.editable and is_pinned_requirement(dep) and requirements is not None: DEPENDENCY_CACHE[dep] = list(requirements) return requirements
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
16
dependencies.py
333
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
4,270
0
231
202
57
22,226
88
pipenv
38
pipenv/vendor/requirementslib/models/dependencies.py
Python
21
{ "docstring": "Retrieves dependencies for the given install requirement from the pip\n resolver.\n\n :param dep: A single InstallRequirement\n :type dep: :class:`~pipenv.patched.pip._internal.req.req_install.InstallRequirement`\n :param sources: Pipfile-formatted sources, defaults to None\n :type sources: list[dict], optional\n :return: A set of dependency lines for generating new InstallRequirements.\n :rtype: set(str) or None\n ", "language": "en", "n_whitespaces": 68, "n_words": 44, "vocab_size": 36 }
https://github.com/pypa/pipenv.git
6
_get_sourcefile
def _get_sourcefile(bytecode_path): if len(bytecode_path) == 0: return None rest, _, extension = bytecode_path.rpartition('.') if not rest or extension.lower()[-3:-1] != 'py': return bytecode_path try: source_path = source_from_cache(bytecode_path) except (NotImplementedError, ValueError): source_path = bytecode_path[:-1] return source_path if _path_isfile(source_path) else bytecode_path
8198943edd73a363c266633e1aa5b2a9e9c9f526
12
_bootstrap_external.py
135
add python 3.10.4 for windows
55,163
0
87
81
29
218,147
38
XX-Net
13
python3.10.4/Lib/importlib/_bootstrap_external.py
Python
11
{ "docstring": "Convert a bytecode file path to a source path (if possible).\n\n This function exists purely for backwards-compatibility for\n PyImport_ExecCodeModuleWithFilenames() in the C API.\n\n ", "language": "en", "n_whitespaces": 32, "n_words": 23, "vocab_size": 20 }
https://github.com/XX-net/XX-Net.git
1
reset_display_options
def reset_display_options() -> None: pd.reset_option("^display.", silent=True) # ----------------------------------------------------------------------------- # Comparators
f538568afc2c76c2d738d32e3544cf9fe6742960
8
__init__.py
34
TYP: misc return type annotations (#47558)
40,037
0
14
17
9
167,566
10
pandas
4
pandas/_testing/__init__.py
Python
5
{ "docstring": "\n Reset the display options for printing and representing objects.\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 9 }
https://github.com/pandas-dev/pandas.git
1
test_edit_save_as_delete_inline
def test_edit_save_as_delete_inline(self): post_data = self.inline_post_data.copy() post_data.update( { "_saveasnew": "Save+as+new", "article_set-1-section": "1", "article_set-2-section": "1", "article_set-2-DELETE": "1", "article_set-3-section": "1", } ) response = self.client.post( reverse("admin:admin_views_section_change", args=(self.s1.pk,)), post_data ) self.assertEqual(response.status_code, 302) # started with 3 articles, one was deleted. self.assertEqual(Section.objects.latest("id").article_set.count(), 2)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
14
tests.py
176
Refs #33476 -- Reformatted code with Black.
52,139
0
209
99
32
207,868
38
django
20
tests/admin_views/tests.py
Python
16
{ "docstring": "\n Should be able to \"Save as new\" while also deleting an inline.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 12 }
https://github.com/django/django.git
5
_compute_fans
def _compute_fans(shape): if len(shape) < 1: # Just to avoid errors for constants. fan_in = fan_out = 1 elif len(shape) == 1: fan_in = fan_out = shape[0] elif len(shape) == 2: fan_in = shape[0] fan_out = shape[1] else: # Assuming convolution kernels (2D, 3D, or more). # kernel shape: (..., input_depth, depth) receptive_field_size = 1 for dim in shape[:-2]: receptive_field_size *= dim fan_in = shape[-2] * receptive_field_size fan_out = shape[-1] * receptive_field_size return int(fan_in), int(fan_out) @test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
84afc5193d38057e2e2badf9c889ea87d80d8fbf
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
12
initializers_test.py
198
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,963
1
176
102
48
272,135
77
keras
12
keras/initializers/initializers_test.py
Python
15
{ "docstring": "Computes the number of input and output units for a weight shape.\n\n Args:\n shape: Integer shape tuple or TF tensor shape.\n\n Returns:\n A tuple of integer scalars (fan_in, fan_out).\n ", "language": "en", "n_whitespaces": 48, "n_words": 29, "vocab_size": 26 }
https://github.com/keras-team/keras.git
6
sparse_categorical_matches
def sparse_categorical_matches(y_true, y_pred): reshape_matches = False y_pred = tf.convert_to_tensor(y_pred) y_true = tf.convert_to_tensor(y_true) y_true_org_shape = tf.shape(y_true) y_pred_rank = y_pred.shape.ndims y_true_rank = y_true.shape.ndims # If the shape of y_true is (num_samples, 1), squeeze to (num_samples,) if ( (y_true_rank is not None) and (y_pred_rank is not None) and (len(backend.int_shape(y_true)) == len(backend.int_shape(y_pred))) ): y_true = tf.squeeze(y_true, [-1]) reshape_matches = True y_pred = tf.math.argmax(y_pred, axis=-1) # If the predicted output and actual output types don't match, force cast them # to match. if backend.dtype(y_pred) != backend.dtype(y_true): y_pred = tf.cast(y_pred, backend.dtype(y_true)) matches = tf.cast(tf.equal(y_true, y_pred), backend.floatx()) if reshape_matches: matches = tf.reshape(matches, shape=y_true_org_shape) return matches
84afc5193d38057e2e2badf9c889ea87d80d8fbf
13
metrics_utils.py
297
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,817
0
198
187
66
276,986
98
keras
24
keras/utils/metrics_utils.py
Python
21
{ "docstring": "Creates float Tensor, 1.0 for label-prediction match, 0.0 for mismatch.\n\n You can provide logits of classes as `y_pred`, since argmax of\n logits and probabilities are same.\n\n Args:\n y_true: Integer ground truth values.\n y_pred: The prediction values.\n\n Returns:\n Match tensor: 1.0 for label-prediction match, 0.0 for mismatch.\n ", "language": "en", "n_whitespaces": 76, "n_words": 46, "vocab_size": 35 }
https://github.com/keras-team/keras.git
6
query_yes_no
def query_yes_no(question, default="yes"): valid = {"yes": "yes", "y": "yes", "ye": "yes", "no": "no", "n": "no"} prompt = {None: " [y/n] ", "yes": " [Y/n] ", "no": " [y/N] "}.get(default, None) if not prompt: raise ValueError("invalid default answer: '%s'" % default) reply = None while not reply: sys.stdout.write(colorize(question, Colors.PROMPT) + prompt) choice = input().lower() reply = None if default and not choice: reply = default elif choice in valid: reply = valid[choice] else: print_failure("Please respond with 'yes' or 'no' (or 'y' or 'n').\n") return reply == "yes"
7bf15bf33644ef13e34dff63beaf8d19387219be
13
updateHostsFile.py
245
Use https in more places
27,310
0
189
132
62
123,265
86
hosts
18
updateHostsFile.py
Python
17
{ "docstring": "\n Ask a yes/no question via input() and get answer from the user.\n\n Inspired by the following implementation:\n\n https://code.activestate.com/recipes/577058/\n\n Parameters\n ----------\n question : str\n The question presented to the user.\n default : str, default \"yes\"\n The presumed answer if the user just hits <Enter>. It must be \"yes\",\n \"no\", or None (means an answer is required of the user).\n\n Returns\n -------\n yes : Whether or not the user replied yes to the question.\n ", "language": "en", "n_whitespaces": 127, "n_words": 72, "vocab_size": 53 }
https://github.com/StevenBlack/hosts.git
7
wait_start_success
def wait_start_success(self) -> None: if not self.args.noblock_on_start: raise ValueError( f'{self.wait_start_success!r} should only be called when `noblock_on_start` is set to True' ) try: if self.uses_before_pea is not None: self.uses_before_pea.wait_start_success() if self.uses_after_pea is not None: self.uses_after_pea.wait_start_success() if self.head_pea is not None: self.head_pea.wait_start_success() for shard_id in self.shards: self.shards[shard_id].wait_start_success() self.activate() except: self.close() raise
933415bfa1f9eb89f935037014dfed816eb9815d
13
__init__.py
173
feat: star routing (#3900) * feat(proto): adjust proto for star routing (#3844) * feat(proto): adjust proto for star routing * feat(proto): generate proto files * feat(grpc): refactor grpclet interface (#3846) * feat: refactor connection pool for star routing (#3872) * feat(k8s): add more labels to k8s deployments * feat(network): refactor connection pool * feat(network): refactor k8s pool * feat: star routing graph gateway (#3877) * feat: star routing - refactor grpc data runtime (#3887) * feat(runtimes): refactor grpc dataruntime * fix(tests): adapt worker runtime tests * fix(import): fix import * feat(proto): enable sending multiple lists (#3891) * feat: star routing gateway (#3893) * feat: star routing gateway all protocols (#3897) * test: add streaming and prefetch tests (#3901) * feat(head): new head runtime for star routing (#3899) * feat(head): new head runtime * feat(head): new head runtime * style: fix overload and cli autocomplete * feat(network): improve proto comments Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * feat(worker): merge docs in worker runtime (#3905) * feat(worker): merge docs in worker runtime * feat(tests): assert after clean up * feat(tests): star routing runtime integration tests (#3908) * fix(tests): fix integration tests * test: test runtimes fast slow request (#3910) * feat(zmq): purge zmq, zed, routing_table (#3915) * feat(zmq): purge zmq, zed, routing_table * style: fix overload and cli autocomplete * feat(zmq): adapt comment in dependency list * style: fix overload and cli autocomplete * fix(tests): fix type tests Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * test: add test gateway to worker connection (#3921) * feat(pea): adapt peas for star routing (#3918) * feat(pea): adapt peas for star routing * style: fix overload and cli autocomplete * feat(pea): add tests * feat(tests): add failing head pea test Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * feat(tests): integration tests for peas (#3923) * feat(tests): integration tests for peas * feat(pea): remove _inner_pea function * feat: star routing container pea (#3922) * test: rescue tests (#3942) * fix: fix streaming tests (#3945) * refactor: move docker run to run (#3948) * feat: star routing pods (#3940) * feat(pod): adapt pods for star routing * feat(pods): adapt basepod to star routing * feat(pod): merge pod and compound pod * feat(tests): fix tests * style: fix overload and cli autocomplete * feat(test): add container pea int test * feat(ci): remove more unnecessary tests * fix(tests): remove jinad runtime * feat(ci): remove latency tracking * fix(ci): fix ci def * fix(runtime): enable runtime to be exited * fix(tests): wrap runtime test in process * fix(runtimes): remove unused runtimes * feat(runtimes): improve cancel wait * fix(ci): build test pip again in ci * fix(tests): fix a test * fix(test): run async in its own process * feat(pod): include shard in activate msg * fix(pea): dont join * feat(pod): more debug out * feat(grpc): manage channels properly * feat(pods): remove exitfifo * feat(network): add simple send retry mechanism * fix(network): await pool close * fix(test): always close grpc server in worker * fix(tests): remove container pea from tests * fix(tests): reorder tests * fix(ci): split tests * fix(ci): allow alias setting * fix(test): skip a test * feat(pods): address comments Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * test: unblock skipped test (#3957) * feat: jinad pea (#3949) * feat: jinad pea * feat: jinad pea * test: remote peas * test: toplogy tests with jinad * ci: parallel jobs * feat(tests): add pod integration tests (#3958) * feat(tests): add pod integration tests * fix(tests): make tests less flaky * fix(test): fix test * test(pea): remote pea topologies (#3961) * test(pea): remote pea simple topology * test: remote pea topologies * refactor: refactor streamer result handling (#3960) * feat(k8s): adapt K8s Pod for StarRouting (#3964) * test: optimize k8s test * test: increase timeout and use different namespace * test: optimize k8s test * test: build and load image when needed * test: refactor k8s test * test: fix image name error * test: fix k8s image load * test: fix typoe port expose * test: update tests in connection pool and handling * test: remove unused fixture * test: parameterize docker images * test: parameterize docker images * test: parameterize docker images * feat(k8s): adapt k8s pod for star routing * fix(k8s): dont overwrite add/remove function in pool * fix(k8s): some fixes * fix(k8s): some more fixes * fix(k8s): linting * fix(tests): fix tests * fix(tests): fix k8s unit tests * feat(k8s): complete k8s integration test * feat(k8s): finish k8s tests * feat(k8s): fix test * fix(tests): fix test with no name * feat(k8s): unify create/replace interface * feat(k8s): extract k8s port constants * fix(tests): fix tests * fix(tests): wait for runtime being ready in tests * feat(k8s): address comments Co-authored-by: bwanglzu <bo.wang@jina.ai> * feat(flow): adapt Flow for StarRouting (#3986) * feat(flow): add routes * feat(flow): adapt flow to star routing * style: fix overload and cli autocomplete * feat(flow): handle empty topologies * feat(k8s): allow k8s pool disabling * style: fix overload and cli autocomplete * fix(test): fix test with mock * fix(tests): fix more tests * feat(flow): clean up tests * style: fix overload and cli autocomplete * fix(tests): fix more tests * feat: add plot function (#3994) * fix(tests): avoid hanging tests * feat(flow): add type hinting * fix(test): fix duplicate exec name in test * fix(tests): fix more tests * fix(tests): enable jinad test again * fix(tests): random port fixture * fix(style): replace quotes Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> * feat(ci): bring back ci (#3997) * feat(ci): enable ci again * style: fix overload and cli autocomplete * feat(ci): add latency tracking * feat(ci): bring back some tests * fix(tests): remove invalid port test * feat(ci): disable daemon and distributed tests * fix(tests): fix entrypoint in hub test * fix(tests): wait for gateway to be ready * fix(test): fix more tests * feat(flow): do rolling update and scale sequentially * fix(tests): fix more tests * style: fix overload and cli autocomplete * feat: star routing hanging pods (#4011) * fix: try to handle hanging pods better * test: hanging pods test work * fix: fix topology graph problem * test: add unit test to graph * fix(tests): fix k8s tests * fix(test): fix k8s test * fix(test): fix k8s pool test * fix(test): fix k8s test * fix(test): fix k8s connection pool setting * fix(tests): make runtime test more reliable * fix(test): fix routes test * fix(tests): make rolling update test less flaky * feat(network): gurantee unique ports * feat(network): do round robin for shards * fix(ci): increase pytest timeout to 10 min Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> * fix(ci): fix ci file * feat(daemon): jinad pod for star routing * Revert "feat(daemon): jinad pod for star routing" This reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92. * feat(daemon): remote jinad pod support (#4042) * feat(daemon): add pod tests for star routing * feat(daemon): add remote pod test * test(daemon): add remote pod arguments test * test(daemon): add async scale test * test(daemon): add rolling update test * test(daemon): fix host * feat(proto): remove message proto (#4051) * feat(proto): remove message proto * fix(tests): fix tests * fix(tests): fix some more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * feat(proto): put docs back in data * fix(proto): clean up * feat(proto): clean up * fix(tests): skip latency tracking * fix(test): fix hub test * fix(tests): fix k8s test * fix(test): some test clean up * fix(style): clean up style issues * feat(proto): adjust for rebase * fix(tests): bring back latency tracking * fix(tests): fix merge accident * feat(proto): skip request serialization (#4074) * feat: add reduce to star routing (#4070) * feat: add reduce on shards to head runtime * test: add reduce integration tests with fixed order * feat: add reduce on needs * chore: get_docs_matrix_from_request becomes public * style: fix overload and cli autocomplete * docs: remove undeterministic results warning * fix: fix uses_after * test: assert correct num docs after reducing in test_external_pod * test: correct asserts after reduce in test_rolling_update * fix: no reduce if uses_after_address is set * fix: get_docs_from_request only if needed * fix: fix tests after merge * refactor: move reduce from data_request_handler to head * style: fix overload and cli autocomplete * chore: apply suggestions * fix: fix asserts * chore: minor test fix * chore: apply suggestions * test: remove flow tests with external executor (pea) * fix: fix test_expected_messages_routing * fix: fix test_func_joiner * test: adapt k8s test Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * fix(k8s): fix static pool config * fix: use custom protoc doc generator image (#4088) * fix: use custom protoc doc generator image * fix(docs): minor doc improvement * fix(docs): use custom image * fix(docs): copy docarray * fix: doc building local only * fix: timeout doc building * fix: use updated args when building ContainerPea * test: add container PeaFactory test * fix: force pea close on windows (#4098) * fix: dont reduce if uses exist (#4099) * fix: dont use reduce if uses exist * fix: adjust reduce tests * fix: adjust more reduce tests * fix: fix more tests * fix: adjust more tests * fix: ignore non jina resources (#4101) * feat(executor): enable async executors (#4102) * feat(daemon): daemon flow on star routing (#4096) * test(daemon): add remote flow test * feat(daemon): call scale in daemon * feat(daemon): remove tail args and identity * test(daemon): rename scalable executor * test(daemon): add a small delay in async test * feat(daemon): scale partial flow only * feat(daemon): call scale directly in partial flow store * test(daemon): use asyncio sleep * feat(daemon): enable flow level distributed tests * test(daemon): fix jinad env workspace config * test(daemon): fix pod test use new port rolling update * feat(daemon): enable distribuetd tests * test(daemon): remove duplicate tests and zed runtime test * test(daemon): fix stores unit test * feat(daemon): enable part of distributed tests * feat(daemon): enable part of distributed tests * test: correct test paths * test(daemon): add client test for remote flows * test(daemon): send a request with jina client * test(daemon): assert async generator * test(daemon): small interval between tests * test(daemon): add flow test for container runtime * test(daemon): add flow test for container runtime * test(daemon): fix executor name * test(daemon): fix executor name * test(daemon): use async client fetch result * test(daemon): finish container flow test * test(daemon): enable distributed in ci * test(daemon): enable distributed in ci * test(daemon): decare flows and pods * test(daemon): debug ci if else * test(daemon): debug ci if else * test(daemon): decare flows and pods * test(daemon): correct test paths * test(daemon): add small delay for async tests * fix: star routing fixes (#4100) * docs: update docs * fix: fix Request.__repr__ * docs: update flow remarks * docs: fix typo * test: add non_empty_fields test * chore: remove non_empty_fields test * feat: polling per endpoint (#4111) * feat(polling): polling per endpoint configurable * fix: adjust tests * feat(polling): extend documentation * style: fix overload and cli autocomplete * fix: clean up * fix: adjust more tests * fix: remove repeat from flaky test * fix: k8s test * feat(polling): address pr feedback * feat: improve docs Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * feat(grpc): support connect grpc server via ssl tunnel (#4092) * feat(grpc): support ssl grpc connect if port is 443 * fix(grpc): use https option instead of detect port automatically * chore: fix typo * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> * test(networking): add test for peapods networking * fix: address comments Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> * feat(polling): unify polling args (#4113) * fix: several issues for jinad pods (#4119) * fix: activate for jinad pods * fix: dont expose worker pod in partial daemon * fix: workspace setting * fix: containerized flows * fix: hub test * feat(daemon): remote peas on star routing (#4112) * test(daemon): fix request in peas * test(daemon): fix request in peas * test(daemon): fix sync async client test * test(daemon): enable remote peas test * test(daemon): replace send message to send request * test(daemon): declare pea tests in ci * test(daemon): use pea args fixture * test(daemon): head pea use default host * test(daemon): fix peas topologies * test(daemon): fix pseudo naming * test(daemon): use default host as host * test(daemon): fix executor path * test(daemon): add remote worker back * test(daemon): skip local remote remote topology * fix: jinad pea test setup * fix: jinad pea tests * fix: remove invalid assertion Co-authored-by: jacobowitz <tobias.jacobowitz@posteo.de> * feat: enable daemon tests again (#4132) * feat: enable daemon tests again * fix: remove bogy empty script file * fix: more jinad test fixes * style: fix overload and cli autocomplete * fix: scale and ru in jinad * fix: fix more jinad tests Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * fix: fix flow test * fix: improve pea tests reliability (#4136) Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> Co-authored-by: Deepankar Mahapatro <deepankar.mahapatro@jina.ai> Co-authored-by: bwanglzu <bo.wang@jina.ai> Co-authored-by: AlaeddineAbdessalem <alaeddine-13@live.fr> Co-authored-by: Zhaofeng Miao <522856232@qq.com>
1,759
0
251
99
36
9,892
49
jina
12
jina/peapods/pods/__init__.py
Python
22
{ "docstring": "Block until all peas starts successfully.\n\n If not successful, it will raise an error hoping the outer function to catch it\n ", "language": "en", "n_whitespaces": 35, "n_words": 21, "vocab_size": 20 }
https://github.com/jina-ai/jina.git
40
nD
def nD(i=None, brute=None, *, n=None, m=None): from sympy.integrals.integrals import integrate from sympy.functions.special.polynomials import laguerre from sympy.abc import x
e0dc14eca132f37c5f49369eb4051eae37c9b119
6
numbers.py
67
Refactored import ordering in functions
48,287
0
30
562
14
197,011
18
sympy
14
sympy/functions/combinatorial/numbers.py
Python
67
{ "docstring": "return the number of derangements for: ``n`` unique items, ``i``\n items (as a sequence or multiset), or multiplicities, ``m`` given\n as a sequence or multiset.\n\n Examples\n ========\n\n >>> from sympy.utilities.iterables import generate_derangements as enum\n >>> from sympy.functions.combinatorial.numbers import nD\n\n A derangement ``d`` of sequence ``s`` has all ``d[i] != s[i]``:\n\n >>> set([''.join(i) for i in enum('abc')])\n {'bca', 'cab'}\n >>> nD('abc')\n 2\n\n Input as iterable or dictionary (multiset form) is accepted:\n\n >>> assert nD([1, 2, 2, 3, 3, 3]) == nD({1: 1, 2: 2, 3: 3})\n\n By default, a brute-force enumeration and count of multiset permutations\n is only done if there are fewer than 9 elements. There may be cases when\n there is high multiplicty with few unique elements that will benefit\n from a brute-force enumeration, too. For this reason, the `brute`\n keyword (default None) is provided. When False, the brute-force\n enumeration will never be used. When True, it will always be used.\n\n >>> nD('1111222233', brute=True)\n 44\n\n For convenience, one may specify ``n`` distinct items using the\n ``n`` keyword:\n\n >>> assert nD(n=3) == nD('abc') == 2\n\n Since the number of derangments depends on the multiplicity of the\n elements and not the elements themselves, it may be more convenient\n to give a list or multiset of multiplicities using keyword ``m``:\n\n >>> assert nD('abc') == nD(m=(1,1,1)) == nD(m={1:3}) == 2\n\n ", "language": "en", "n_whitespaces": 304, "n_words": 217, "vocab_size": 140 }
https://github.com/sympy/sympy.git
1
test_tf_non_distributed
def test_tf_non_distributed(ray_start_4_cpus): trainer = TorchTrainer( tf_quick_start_train_func, scaling_config=dict(num_workers=1) ) trainer.fit() # TODO: Refactor as a backend test.
b9a4f64f32389a1f76a7f74103b4d5da089ebb2b
12
test_examples.py
46
[AIR/train] Use new Train API (#25735) Uses the new AIR Train API for examples and tests. The `Result` object gets a new attribute - `log_dir`, pointing to the Trial's `logdir` allowing users to access tensorboard logs and artifacts of other loggers. This PR only deals with "low hanging fruit" - tests that need substantial rewriting or Train user guide are not touched. Those will be updated in followup PRs. Tests and examples that concern deprecated features or which are duplicated in AIR have been removed or disabled. Requires https://github.com/ray-project/ray/pull/25943 to be merged in first
27,594
0
34
26
16
124,395
16
ray
9
python/ray/train/tests/test_examples.py
Python
5
{ "docstring": "Make sure Ray Train works without TF MultiWorkerMirroredStrategy.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/ray-project/ray.git
7
load_accelerator_state
def load_accelerator_state(input_dir, models, optimizers, process_index, scaler=None): # Model states for i, model in enumerate(models): weights_name = f"{MODEL_NAME}.bin" if i == 0 else f"{MODEL_NAME}_{i}.bin" input_model_file = os.path.join(input_dir, weights_name) models[i].load_state_dict(torch.load(input_model_file)) logger.info("All model weights loaded successfully") # Optimizer states for i, opt in enumerate(optimizers): optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin" input_optimizer_file = os.path.join(input_dir, optimizer_name) optimizers[i].load_state_dict(torch.load(input_optimizer_file)) logger.info("All optimizer states loaded successfully") # GradScaler state if scaler is not None: input_scaler_file = os.path.join(input_dir, SCALER_NAME) scaler.load_state_dict(torch.load(input_scaler_file)) logger.info("GradScaler state loaded successfully") # Random states states = torch.load(os.path.join(input_dir, f"{RNG_STATE_NAME}_{process_index}.pkl")) random.setstate(states["random_state"]) np.random.set_state(states["numpy_random_seed"]) torch.set_rng_state(states["torch_manual_seed"]) torch.cuda.set_rng_state_all(states["torch_cuda_manual_seed"]) # ^^ safe to call this function even if cuda is not available if is_tpu_available(): xm.set_rng_state(states["xm_seed"]) logger.info("All random states loaded successfully")
fb5ed62c102c0323486b89805e1888495de3db15
12
checkpointing.py
421
Convert documentation to the new front (#271) * Main conversion * Doc styling * Style * New front deploy * Fixes * Fixes * Fix new docstrings * Style
121,002
0
233
235
70
337,288
109
accelerate
37
src/accelerate/checkpointing.py
Python
23
{ "docstring": "\n Loads states of the models, optimizers, scaler, and RNG generators from a given directory.\n\n Args:\n input_dir (`str` or `os.PathLike`):\n The name of the folder to load all relevant weights and states.\n model_stmodelsates (`List[torch.nn.Module]`):\n A list of model instances\n optimizers (`List[torch.optim.Optimizer]`):\n A list of optimizer instances\n process_index (`int`):\n The current process index in the Accelerator state\n scaler (`torch.cuda.amp.GradScaler`, *optional*):\n An optional *GradScaler* instance to load\n ", "language": "en", "n_whitespaces": 164, "n_words": 64, "vocab_size": 52 }
https://github.com/huggingface/accelerate.git
7
__call__
def __call__(self, results): if random.random() < self.shift_ratio: img_shape = results['img'].shape[:2] random_shift_x = random.randint(-self.max_shift_px, self.max_shift_px) random_shift_y = random.randint(-self.max_shift_px, self.max_shift_px) new_x = max(0, random_shift_x) ori_x = max(0, -random_shift_x) new_y = max(0, random_shift_y) ori_y = max(0, -random_shift_y) # TODO: support mask and semantic segmentation maps. for key in results.get('bbox_fields', []): bboxes = results[key].copy() bboxes[..., 0::2] += random_shift_x bboxes[..., 1::2] += random_shift_y # clip border bboxes[..., 0::2] = np.clip(bboxes[..., 0::2], 0, img_shape[1]) bboxes[..., 1::2] = np.clip(bboxes[..., 1::2], 0, img_shape[0]) # remove invalid bboxes bbox_w = bboxes[..., 2] - bboxes[..., 0] bbox_h = bboxes[..., 3] - bboxes[..., 1] valid_inds = (bbox_w > self.filter_thr_px) & ( bbox_h > self.filter_thr_px) # If the shift does not contain any gt-bbox area, skip this # image. if key == 'gt_bboxes' and not valid_inds.any(): return results bboxes = bboxes[valid_inds] results[key] = bboxes # label fields. e.g. gt_labels and gt_labels_ignore label_key = self.bbox2label.get(key) if label_key in results: results[label_key] = results[label_key][valid_inds] for key in results.get('img_fields', ['img']): img = results[key] new_img = np.zeros_like(img) img_h, img_w = img.shape[:2] new_h = img_h - np.abs(random_shift_y) new_w = img_w - np.abs(random_shift_x) new_img[new_y:new_y + new_h, new_x:new_x + new_w] \ = img[ori_y:ori_y + new_h, ori_x:ori_x + new_w] results[key] = new_img return results
86037650f243a1ab0a515a22e831ea5dcddd6a7d
14
transforms.py
604
Unified name of orig as ori (#7456)
70,237
0
861
393
115
244,099
193
mmdetection
37
mmdet/datasets/pipelines/transforms.py
Python
38
{ "docstring": "Call function to random shift images, bounding boxes.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Shift results.\n ", "language": "en", "n_whitespaces": 63, "n_words": 20, "vocab_size": 20 }
https://github.com/open-mmlab/mmdetection.git
10
svd
def svd(a, full_matrices=True, compute_uv=True, hermitian=False): import numpy as _nx a, wrap = _makearray(a) if hermitian: # note: lapack svd returns eigenvalues with s ** 2 sorted descending, # but eig returns s sorted ascending, so we re-order the eigenvalues # and related arrays to have the correct order if compute_uv: s, u = eigh(a) sgn = sign(s) s = abs(s) sidx = argsort(s)[..., ::-1] sgn = _nx.take_along_axis(sgn, sidx, axis=-1) s = _nx.take_along_axis(s, sidx, axis=-1) u = _nx.take_along_axis(u, sidx[..., None, :], axis=-1) # singular values are unsigned, move the sign into v vt = transpose(u * sgn[..., None, :]).conjugate() return wrap(u), s, wrap(vt) else: s = eigvalsh(a) s = s[..., ::-1] s = abs(s) return sort(s)[..., ::-1] _assert_stacked_2d(a) t, result_t = _commonType(a) extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence) m, n = a.shape[-2:] if compute_uv: if full_matrices: if m < n: gufunc = _umath_linalg.svd_m_f else: gufunc = _umath_linalg.svd_n_f else: if m < n: gufunc = _umath_linalg.svd_m_s else: gufunc = _umath_linalg.svd_n_s signature = 'D->DdD' if isComplexType(t) else 'd->ddd' u, s, vh = gufunc(a, signature=signature, extobj=extobj) u = u.astype(result_t, copy=False) s = s.astype(_realType(result_t), copy=False) vh = vh.astype(result_t, copy=False) return wrap(u), s, wrap(vh) else: if m < n: gufunc = _umath_linalg.svd_m else: gufunc = _umath_linalg.svd_n signature = 'D->d' if isComplexType(t) else 'd->d' s = gufunc(a, signature=signature, extobj=extobj) s = s.astype(_realType(result_t), copy=False) return s
40747ae50620631941e43dbbd5baaccab669922f
17
linalg.py
635
clarify svd documentation `u @ np.diag(s) @ vh` can only reproduce the original matrix when `full_matrices` is `False`, otherwise dimension does not match.
38,474
0
658
399
116
160,044
215
numpy
48
numpy/linalg/linalg.py
Python
49
{ "docstring": "\n Singular Value Decomposition.\n\n When `a` is a 2D array, and when `full_matrices` is `False`,\n it is factorized as ``u @ np.diag(s) @ vh = (u * s) @ vh``,\n where `u` and `vh` are 2D unitary arrays and `s` is a 1D\n array of `a`'s singular values. When `a` is higher-dimensional, SVD is\n applied in stacked mode as explained below.\n\n Parameters\n ----------\n a : (..., M, N) array_like\n A real or complex array with ``a.ndim >= 2``.\n full_matrices : bool, optional\n If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and\n ``(..., N, N)``, respectively. Otherwise, the shapes are\n ``(..., M, K)`` and ``(..., K, N)``, respectively, where\n ``K = min(M, N)``.\n compute_uv : bool, optional\n Whether or not to compute `u` and `vh` in addition to `s`. True\n by default.\n hermitian : bool, optional\n If True, `a` is assumed to be Hermitian (symmetric if real-valued),\n enabling a more efficient method for finding singular values.\n Defaults to False.\n\n .. versionadded:: 1.17.0\n\n Returns\n -------\n u : { (..., M, M), (..., M, K) } array\n Unitary array(s). The first ``a.ndim - 2`` dimensions have the same\n size as those of the input `a`. The size of the last two dimensions\n depends on the value of `full_matrices`. Only returned when\n `compute_uv` is True.\n s : (..., K) array\n Vector(s) with the singular values, within each vector sorted in\n descending order. The first ``a.ndim - 2`` dimensions have the same\n size as those of the input `a`.\n vh : { (..., N, N), (..., K, N) } array\n Unitary array(s). The first ``a.ndim - 2`` dimensions have the same\n size as those of the input `a`. The size of the last two dimensions\n depends on the value of `full_matrices`. Only returned when\n `compute_uv` is True.\n\n Raises\n ------\n LinAlgError\n If SVD computation does not converge.\n\n See Also\n --------\n scipy.linalg.svd : Similar function in SciPy.\n scipy.linalg.svdvals : Compute singular values of a matrix.\n\n Notes\n -----\n\n .. versionchanged:: 1.8.0\n Broadcasting rules apply, see the `numpy.linalg` documentation for\n details.\n\n The decomposition is performed using LAPACK routine ``_gesdd``.\n\n SVD is usually described for the factorization of a 2D matrix :math:`A`.\n The higher-dimensional case will be discussed below. In the 2D case, SVD is\n written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`,\n :math:`S= \\\\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s`\n contains the singular values of `a` and `u` and `vh` are unitary. The rows\n of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are\n the eigenvectors of :math:`A A^H`. In both cases the corresponding\n (possibly non-zero) eigenvalues are given by ``s**2``.\n\n If `a` has more than two dimensions, then broadcasting rules apply, as\n explained in :ref:`routines.linalg-broadcasting`. This means that SVD is\n working in \"stacked\" mode: it iterates over all indices of the first\n ``a.ndim - 2`` dimensions and for each combination SVD is applied to the\n last two indices. The matrix `a` can be reconstructed from the\n decomposition with either ``(u * s[..., None, :]) @ vh`` or\n ``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the\n function ``np.matmul`` for python versions below 3.5.)\n\n If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are\n all the return values.\n\n Examples\n --------\n >>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)\n >>> b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3)\n\n Reconstruction based on full SVD, 2D case:\n\n >>> u, s, vh = np.linalg.svd(a, full_matrices=True)\n >>> u.shape, s.shape, vh.shape\n ((9, 9), (6,), (6, 6))\n >>> np.allclose(a, np.dot(u[:, :6] * s, vh))\n True\n >>> smat = np.zeros((9, 6), dtype=complex)\n >>> smat[:6, :6] = np.diag(s)\n >>> np.allclose(a, np.dot(u, np.dot(smat, vh)))\n True\n\n Reconstruction based on reduced SVD, 2D case:\n\n >>> u, s, vh = np.linalg.svd(a, full_matrices=False)\n >>> u.shape, s.shape, vh.shape\n ((9, 6), (6,), (6, 6))\n >>> np.allclose(a, np.dot(u * s, vh))\n True\n >>> smat = np.diag(s)\n >>> np.allclose(a, np.dot(u, np.dot(smat, vh)))\n True\n\n Reconstruction based on full SVD, 4D case:\n\n >>> u, s, vh = np.linalg.svd(b, full_matrices=True)\n >>> u.shape, s.shape, vh.shape\n ((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3))\n >>> np.allclose(b, np.matmul(u[..., :3] * s[..., None, :], vh))\n True\n >>> np.allclose(b, np.matmul(u[..., :3], s[..., None] * vh))\n True\n\n Reconstruction based on reduced SVD, 4D case:\n\n >>> u, s, vh = np.linalg.svd(b, full_matrices=False)\n >>> u.shape, s.shape, vh.shape\n ((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3))\n >>> np.allclose(b, np.matmul(u * s[..., None, :], vh))\n True\n >>> np.allclose(b, np.matmul(u, s[..., None] * vh))\n True\n\n ", "language": "en", "n_whitespaces": 1180, "n_words": 746, "vocab_size": 328 }
https://github.com/numpy/numpy.git
1
test_create_placeholder_if_not_exist_in_template
def test_create_placeholder_if_not_exist_in_template(self): page = create_page('Test', 'col_two.html', 'en') # I need to make it seem like the user added another placeholder to the SAME template. page._template_cache = 'col_three.html' request = self.get_request(page=page) context = SekizaiContext() context['request'] = request self.assertObjectDoesNotExist(page.placeholders.all(), slot='col_right') context = self.get_context(page=page) renderer = self.get_content_renderer(request) renderer.render_page_placeholder( 'col_right', context, inherit=False, page=page, ) self.assertObjectExist(page.placeholders.all(), slot='col_right')
c1290c9ff89cb00caa5469129fd527e9d82cd820
10
test_templatetags.py
188
ci: Added codespell (#7355) Co-authored-by: Christian Clauss <cclauss@me.com> * ci: codespell config taken from #7292
17,398
0
187
109
41
82,432
52
django-cms
19
cms/tests/test_templatetags.py
Python
16
{ "docstring": "\n Tests that adding a new placeholder to an existing page's template\n creates the placeholder.\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 14 }
https://github.com/django-cms/django-cms.git
6
extra_state_attributes
def extra_state_attributes(self) -> dict[str, int | float | None] | None: # Only add attributes to the original sensor if self.entity_description.key != "days_until_expiration": return None if self.coordinator.data is None: return None attrs = { ATTR_EXPIRES: self.coordinator.data.expiration_date.isoformat(), } if self.coordinator.data.name_servers: attrs[ATTR_NAME_SERVERS] = " ".join(self.coordinator.data.name_servers) if self.coordinator.data.last_updated: attrs[ATTR_UPDATED] = self.coordinator.data.last_updated.isoformat() if self.coordinator.data.registrar: attrs[ATTR_REGISTRAR] = self.coordinator.data.registrar return attrs
d15d081646c26d32f860d8f84b4f29d848dab148
13
sensor.py
212
Add data update coordinator to Whois (#64846) Co-authored-by: Joakim Sørensen <joasoe@gmail.com>
109,631
0
191
133
41
310,960
55
core
21
homeassistant/components/whois/sensor.py
Python
16
{ "docstring": "Return the state attributes of the monitored installation.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 7 }
https://github.com/home-assistant/core.git
8
populate_deserializable_objects
def populate_deserializable_objects(): global LOCAL if not hasattr(LOCAL, 'ALL_OBJECTS'): LOCAL.ALL_OBJECTS = {} LOCAL.GENERATED_WITH_V2 = None if LOCAL.ALL_OBJECTS and LOCAL.GENERATED_WITH_V2 == tf.__internal__.tf2.enabled( ): # Objects dict is already generated for the proper TF version: # do nothing. return LOCAL.ALL_OBJECTS = {} LOCAL.GENERATED_WITH_V2 = tf.__internal__.tf2.enabled() base_cls = base_layer.Layer generic_utils.populate_dict_with_module_objects( LOCAL.ALL_OBJECTS, ALL_MODULES, obj_filter=lambda x: inspect.isclass(x) and issubclass(x, base_cls)) # Overwrite certain V1 objects with V2 versions if tf.__internal__.tf2.enabled(): generic_utils.populate_dict_with_module_objects( LOCAL.ALL_OBJECTS, ALL_V2_MODULES, obj_filter=lambda x: inspect.isclass(x) and issubclass(x, base_cls)) # These deserialization aliases are added for backward compatibility, # as in TF 1.13, "BatchNormalizationV1" and "BatchNormalizationV2" # were used as class name for v1 and v2 version of BatchNormalization, # respectively. Here we explicitly convert them to their canonical names. LOCAL.ALL_OBJECTS[ 'BatchNormalizationV1'] = batch_normalization_v1.BatchNormalization LOCAL.ALL_OBJECTS[ 'BatchNormalizationV2'] = batch_normalization.BatchNormalization # Prevent circular dependencies. from keras import models # pylint: disable=g-import-not-at-top from keras.premade.linear import LinearModel # pylint: disable=g-import-not-at-top from keras.premade.wide_deep import WideDeepModel # pylint: disable=g-import-not-at-top from keras.feature_column.sequence_feature_column import SequenceFeatures # pylint: disable=g-import-not-at-top LOCAL.ALL_OBJECTS['Input'] = input_layer.Input LOCAL.ALL_OBJECTS['InputSpec'] = input_spec.InputSpec LOCAL.ALL_OBJECTS['Functional'] = models.Functional LOCAL.ALL_OBJECTS['Model'] = models.Model LOCAL.ALL_OBJECTS['SequenceFeatures'] = SequenceFeatures LOCAL.ALL_OBJECTS['Sequential'] = models.Sequential LOCAL.ALL_OBJECTS['LinearModel'] = LinearModel LOCAL.ALL_OBJECTS['WideDeepModel'] = WideDeepModel if tf.__internal__.tf2.enabled(): from keras.feature_column.dense_features_v2 import DenseFeatures # pylint: disable=g-import-not-at-top LOCAL.ALL_OBJECTS['DenseFeatures'] = DenseFeatures else: from keras.feature_column.dense_features import DenseFeatures # pylint: disable=g-import-not-at-top LOCAL.ALL_OBJECTS['DenseFeatures'] = DenseFeatures # Merging layers, function versions. LOCAL.ALL_OBJECTS['add'] = merging.add LOCAL.ALL_OBJECTS['subtract'] = merging.subtract LOCAL.ALL_OBJECTS['multiply'] = merging.multiply LOCAL.ALL_OBJECTS['average'] = merging.average LOCAL.ALL_OBJECTS['maximum'] = merging.maximum LOCAL.ALL_OBJECTS['minimum'] = merging.minimum LOCAL.ALL_OBJECTS['concatenate'] = merging.concatenate LOCAL.ALL_OBJECTS['dot'] = merging.dot @keras_export('keras.layers.serialize')
85ccb4e108551b7444213276ffb4c4c09f22f886
@keras_export('keras.layers.serialize')
14
serialization.py
659
Refactor `merge.py` into smaller logically organized files hosted under a `merging` directory. PiperOrigin-RevId: 424162837
79,730
1
353
387
141
268,861
231
keras
54
keras/layers/serialization.py
Python
50
{ "docstring": "Populates dict ALL_OBJECTS with every built-in layer.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
https://github.com/keras-team/keras.git
2
get_ordering
def get_ordering(self): ordering = self.get_validated_ordering() return [values[0] + name for name, values in ordering.items()]
d10f15e55806c6944827d801cd9c2d53f5da4186
9
views.py
53
Reformat with black
15,950
0
35
32
14
73,082
14
wagtail
7
wagtail/contrib/forms/views.py
Python
3
{ "docstring": "Return the field or fields to use for ordering the queryset", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
https://github.com/wagtail/wagtail.git
1
test_metrics_folder
def test_metrics_folder(): with _ray_start(include_dashboard=True) as context: session_dir = context["session_dir"] assert os.path.exists( f"{session_dir}/metrics/grafana/provisioning/dashboards/default.yml" ) assert os.path.exists( f"{session_dir}/metrics/grafana/provisioning/dashboards" "/default_grafana_dashboard.json" ) assert os.path.exists( f"{session_dir}/metrics/grafana/provisioning/datasources/default.yml" ) assert os.path.exists(f"{session_dir}/metrics/prometheus/prometheus.yml")
42da4445e7a3cb358a1a02ae433a004e9fa836b5
12
test_metrics_head.py
126
Export default configurations for grafana and prometheus (#28286)
28,528
0
130
62
17
127,793
24
ray
8
python/ray/tests/test_metrics_head.py
Python
14
{ "docstring": "\n Tests that the default dashboard files get created.\n ", "language": "en", "n_whitespaces": 15, "n_words": 8, "vocab_size": 8 }
https://github.com/ray-project/ray.git
6
logical_or
def logical_or(self, other, context=None): if context is None: context = getcontext() other = _convert_other(other, raiseit=True) if not self._islogical() or not other._islogical(): return context._raise_error(InvalidOperation) # fill to context.prec (opa, opb) = self._fill_logical(context, self._int, other._int) # make the operation, and clean starting zeroes result = "".join([str(int(a)|int(b)) for a,b in zip(opa,opb)]) return _dec_from_triple(0, result.lstrip('0') or '0', 0)
8198943edd73a363c266633e1aa5b2a9e9c9f526
14
_pydecimal.py
197
add python 3.10.4 for windows
55,652
0
139
122
45
219,615
54
XX-Net
23
python3.10.4/Lib/_pydecimal.py
Python
9
{ "docstring": "Applies an 'or' operation between self and other's digits.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/XX-net/XX-Net.git
5
parse_header
def parse_header(line): parts = _parseparam(';' + line) key = parts.__next__() pdict = {} for p in parts: i = p.find('=') if i >= 0: name = p[:i].strip().lower() value = p[i+1:].strip() if len(value) >= 2 and value[0] == value[-1] == '"': value = value[1:-1] value = value.replace('\\\\', '\\').replace('\\"', '"') pdict[name] = value return key, pdict # Classes for field storage # =========================
8198943edd73a363c266633e1aa5b2a9e9c9f526
17
cgi.py
224
add python 3.10.4 for windows
56,315
0
165
128
43
221,279
61
XX-Net
16
python3.10.4/Lib/cgi.py
Python
14
{ "docstring": "Parse a Content-type like header.\n\n Return the main content-type and a dictionary of options.\n\n ", "language": "en", "n_whitespaces": 20, "n_words": 14, "vocab_size": 13 }
https://github.com/XX-net/XX-Net.git
2
test_unmovable_translation_pages
def test_unmovable_translation_pages(self): self.login() # BlogIndex needs translated pages before child pages can be translated self.fr_blog_index = self.en_blog_index.copy_for_translation(self.fr_locale) self.de_blog_index = self.en_blog_index.copy_for_translation(self.de_locale) # Create blog_post copies for translation self.fr_blog_post = self.en_blog_post.copy_for_translation(self.fr_locale) self.de_blog_post = self.en_blog_post.copy_for_translation(self.de_locale) # Confirm location of English blog post page before it is moved # Should be living at /blog/blog-post/ right now. But will eventually # exist at /blog-post/ self.assertEqual(self.en_blog_post.get_parent().id, self.en_blog_index.id) # Confirm the fr and de blog post pages are under the blog index page # We'll confirm these have not moved after ther POST request. original_translated_parent_ids = [ p.id for p in self.en_blog_index.get_translations() ] self.assertIn(self.fr_blog_post.get_parent().id, original_translated_parent_ids) self.assertIn(self.de_blog_post.get_parent().id, original_translated_parent_ids) response = self.client.post( reverse( "wagtailadmin_pages:move_confirm", args=( self.en_blog_post.id, self.en_homepage.id, ), ), follow=True, ) self.assertEqual(response.status_code, 200) self.en_blog_post.refresh_from_db() self.fr_blog_post.refresh_from_db() self.de_blog_post.refresh_from_db() # Check that the en_blog_post page has moved directly under the home page. self.assertEqual( self.en_blog_post.get_parent(update=True).id, self.en_homepage.id ) # Check if the fr and de pages exist under their original parent page (/blog/) self.assertIn( self.fr_blog_post.get_parent(update=True).id, original_translated_parent_ids ) self.assertIn( self.de_blog_post.get_parent(update=True).id, original_translated_parent_ids )
4cc10322a1c86c1137f5042a13d94d8017498bf7
14
test_wagtail_hooks.py
423
Sync tree: cascade unpublish, move and delete (#7984) * Add construct_synced_page_tree_list hook and use in page unpublish view * Implement construct_synced_page_tree_list in simple_translation but only when sync page tree is enabled * Add hook documentation * Add construct_synced_page_tree_list hook tests (#8058) * Move translated and alias pages when WAGTAIL_I18N_ENABLED and WAGTAILSIMPLETRANSLATION_SYNC_PAGE_TREE are enabled Co-Authored-By: Kalob Taulien <4743971+KalobTaulien@users.noreply.github.com> * Delete corresponding translations when WAGTAIL_I18N_ENABLED and WAGTAILSIMPLETRANSLATION_SYNC_PAGE_TREE are true Co-Authored-By: Kalob Taulien <4743971+KalobTaulien@users.noreply.github.com> * Rename the hook to be more specific * Update singular string version in confirm_move.html * Update test test_translation_count_in_context Co-authored-by: Kalob Taulien <4743971+KalobTaulien@users.noreply.github.com> Co-authored-by: Karl Hobley <karl@kaed.uk>
16,633
0
541
264
108
77,194
157
wagtail
29
wagtail/contrib/simple_translation/tests/test_wagtail_hooks.py
Python
35
{ "docstring": "\n Test that moving a page with WAGTAILSIMPLETRANSLATION_SYNC_PAGE_TREE\n disabled doesn't apply to its translations.\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 13 }
https://github.com/wagtail/wagtail.git
4
canberra_distance
def canberra_distance(self, p): s, p = Point._normalize_dimension(self, Point(p)) if self.is_zero and p.is_zero: raise ValueError("Cannot project to the zero vector.") return Add(*((abs(a - b)/(abs(a) + abs(b))) for a, b in zip(s, p)))
498015021131af4dbb07eb110e5badaba8250c7b
15
point.py
120
Updated import locations
47,777
0
70
75
31
196,277
31
sympy
13
sympy/geometry/point.py
Python
5
{ "docstring": "The Canberra Distance from self to point p.\n\n Returns the weighted sum of horizontal and vertical distances to\n point p.\n\n Parameters\n ==========\n\n p : Point\n\n Returns\n =======\n\n canberra_distance : The weighted sum of horizontal and vertical\n distances to point p. The weight used is the sum of absolute values\n of the coordinates.\n\n Examples\n ========\n\n >>> from sympy import Point\n >>> p1, p2 = Point(1, 1), Point(3, 3)\n >>> p1.canberra_distance(p2)\n 1\n >>> p1, p2 = Point(0, 0), Point(3, 3)\n >>> p1.canberra_distance(p2)\n 2\n\n Raises\n ======\n\n ValueError when both vectors are zero.\n\n See Also\n ========\n\n sympy.geometry.point.Point.distance\n\n ", "language": "en", "n_whitespaces": 275, "n_words": 93, "vocab_size": 58 }
https://github.com/sympy/sympy.git
8
test_sac_compilation
def test_sac_compilation(self): config = sac.DEFAULT_CONFIG.copy() config["Q_model"] = sac.DEFAULT_CONFIG["Q_model"].copy() config["num_workers"] = 0 # Run locally. config["n_step"] = 3 config["twin_q"] = True config["learning_starts"] = 0 config["prioritized_replay"] = True config["rollout_fragment_length"] = 10 config["train_batch_size"] = 10 # If we use default buffer size (1e6), the buffer will take up # 169.445 GB memory, which is beyond travis-ci's current (Mar 19, 2021) # available system memory (8.34816 GB). config["buffer_size"] = 40000 # Test with saved replay buffer. config["store_buffer_in_checkpoints"] = True num_iterations = 1 ModelCatalog.register_custom_model("batch_norm", KerasBatchNormModel) ModelCatalog.register_custom_model("batch_norm_torch", TorchBatchNormModel) image_space = Box(-1.0, 1.0, shape=(84, 84, 3)) simple_space = Box(-1.0, 1.0, shape=(3,)) tune.register_env( "random_dict_env", lambda _: RandomEnv( { "observation_space": Dict( { "a": simple_space, "b": Discrete(2), "c": image_space, } ), "action_space": Box(-1.0, 1.0, shape=(1,)), } ), ) tune.register_env( "random_tuple_env", lambda _: RandomEnv( { "observation_space": Tuple( [simple_space, Discrete(2), image_space] ), "action_space": Box(-1.0, 1.0, shape=(1,)), } ), ) for fw in framework_iterator(config, with_eager_tracing=True): # Test for different env types (discrete w/ and w/o image, + cont). for env in [ "random_dict_env", "random_tuple_env", # "MsPacmanNoFrameskip-v4", "CartPole-v0", ]: print("Env={}".format(env)) # Test making the Q-model a custom one for CartPole, otherwise, # use the default model. config["Q_model"]["custom_model"] = ( "batch_norm{}".format("_torch" if fw == "torch" else "") if env == "CartPole-v0" else None ) trainer = sac.SACTrainer(config=config, env=env) for i in range(num_iterations): results = trainer.train() check_train_results(results) print(results) check_compute_single_action(trainer) # Test, whether the replay buffer is saved along with # a checkpoint (no point in doing it for all frameworks since # this is framework agnostic). if fw == "tf" and env == "CartPole-v0": checkpoint = trainer.save() new_trainer = sac.SACTrainer(config, env=env) new_trainer.restore(checkpoint) # Get some data from the buffer and compare. data = trainer.local_replay_buffer.replay_buffers[ "default_policy" ]._storage[: 42 + 42] new_data = new_trainer.local_replay_buffer.replay_buffers[ "default_policy" ]._storage[: 42 + 42] check(data, new_data) new_trainer.stop() trainer.stop()
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
18
test_sac.py
735
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
30,126
0
1,496
447
177
133,858
285
ray
47
rllib/agents/sac/tests/test_sac.py
Python
74
{ "docstring": "Tests whether an SACTrainer can be built with all frameworks.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/ray-project/ray.git