complexity
int64
1
139
fun_name
stringlengths
1
80
code
stringlengths
101
62.2k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
3.11k
ast_levels
int64
6
36
file_name
stringlengths
5
79
n_ast_nodes
int64
17
19.2k
commit_message
stringlengths
3
15.3k
d_id
int64
12
121k
n_ast_errors
int64
0
9
n_whitespaces
int64
4
10.8k
token_counts
int64
5
3.06k
vocab_size
int64
4
1.11k
id
int64
20
338k
n_words
int64
4
4.82k
repo
stringlengths
3
22
n_identifiers
int64
2
176
path
stringlengths
7
134
language
stringclasses
1 value
nloc
int64
1
413
documentation
dict
url
stringlengths
31
59
22
get_annotations
def get_annotations(obj, *, globals=None, locals=None, eval_str=False): if isinstance(obj, type): # class obj_dict = getattr(obj, '__dict__', None) if obj_dict and hasattr(obj_dict, 'get'): ann = obj_dict.get('__annotations__', None) if isinstance(ann, types.GetSetDescriptorType): ann = None else: ann = None obj_globals = None module_name = getattr(obj, '__module__', None) if module_name: module = sys.modules.get(module_name, None) if module: obj_globals = getattr(module, '__dict__', None) obj_locals = dict(vars(obj)) unwrap = obj elif isinstance(obj, types.ModuleType): # module ann = getattr(obj, '__annotations__', None) obj_globals = getattr(obj, '__dict__') obj_locals = None unwrap = None elif callable(obj): # this includes types.Function, types.BuiltinFunctionType, # types.BuiltinMethodType, functools.partial, functools.singledispatch, # "class funclike" from Lib/test/test_inspect... on and on it goes. ann = getattr(obj, '__annotations__', None) obj_globals = getattr(obj, '__globals__', None) obj_locals = None unwrap = obj else: raise TypeError(f"{obj!r} is not a module, class, or callable.") if ann is None: return {} if not isinstance(ann, dict): raise ValueError(f"{obj!r}.__annotations__ is neither a dict nor None") if not ann: return {} if not eval_str: return dict(ann) if unwrap is not None: while True: if hasattr(unwrap, '__wrapped__'): unwrap = unwrap.__wrapped__ continue if isinstance(unwrap, functools.partial): unwrap = unwrap.func continue break if hasattr(unwrap, "__globals__"): obj_globals = unwrap.__globals__ if globals is None: globals = obj_globals if locals is None: locals = obj_locals return_value = {key: value if not isinstance(value, str) else eval(value, globals, locals) for key, value in ann.items() } return return_value # ----------------------------------------------------------- type-checking
8198943edd73a363c266633e1aa5b2a9e9c9f526
15
inspect.py
573
add python 3.10.4 for windows
55,272
0
676
347
108
218,384
222
XX-Net
38
python3.10.4/Lib/inspect.py
Python
56
{ "docstring": "Compute the annotations dict for an object.\n\n obj may be a callable, class, or module.\n Passing in an object of any other type raises TypeError.\n\n Returns a dict. get_annotations() returns a new dict every time\n it's called; calling it twice on the same object will return two\n different but equivalent dicts.\n\n This function handles several details for you:\n\n * If eval_str is true, values of type str will\n be un-stringized using eval(). This is intended\n for use with stringized annotations\n (\"from __future__ import annotations\").\n * If obj doesn't have an annotations dict, returns an\n empty dict. (Functions and methods always have an\n annotations dict; classes, modules, and other types of\n callables may not.)\n * Ignores inherited annotations on classes. If a class\n doesn't have its own annotations dict, returns an empty dict.\n * All accesses to object members and dict values are done\n using getattr() and dict.get() for safety.\n * Always, always, always returns a freshly-created dict.\n\n eval_str controls whether or not values of type str are replaced\n with the result of calling eval() on those values:\n\n * If eval_str is true, eval() is called on values of type str.\n * If eval_str is false (the default), values of type str are unchanged.\n\n globals and locals are passed in to eval(); see the documentation\n for eval() for more information. If either globals or locals is\n None, this function may replace that value with a context-specific\n default, contingent on type(obj):\n\n * If obj is a module, globals defaults to obj.__dict__.\n * If obj is a class, globals defaults to\n sys.modules[obj.__module__].__dict__ and locals\n defaults to the obj class namespace.\n * If obj is a callable, globals defaults to obj.__globals__,\n although if obj is a wrapped function (using\n functools.update_wrapper()) it is first unwrapped.\n ", "language": "en", "n_whitespaces": 468, "n_words": 290, "vocab_size": 146 }
https://github.com/XX-net/XX-Net.git
1
test_prefix_complex_ordering
def test_prefix_complex_ordering(self): vrf1, vrf2, vrf3 = list(VRF.objects.all()) prefixes = [ Prefix(status=PrefixStatusChoices.STATUS_CONTAINER, vrf=None, prefix=netaddr.IPNetwork('10.0.0.0/8')), Prefix(status=PrefixStatusChoices.STATUS_CONTAINER, vrf=None, prefix=netaddr.IPNetwork('10.0.0.0/16')), Prefix(status=PrefixStatusChoices.STATUS_ACTIVE, vrf=None, prefix=netaddr.IPNetwork('10.1.0.0/16')), Prefix(status=PrefixStatusChoices.STATUS_ACTIVE, vrf=None, prefix=netaddr.IPNetwork('192.168.0.0/16')), Prefix(status=PrefixStatusChoices.STATUS_ACTIVE, vrf=vrf1, prefix=netaddr.IPNetwork('10.0.0.0/24')), Prefix(status=PrefixStatusChoices.STATUS_ACTIVE, vrf=vrf1, prefix=netaddr.IPNetwork('10.0.1.0/24')), Prefix(status=PrefixStatusChoices.STATUS_ACTIVE, vrf=vrf1, prefix=netaddr.IPNetwork('10.0.1.0/25')), Prefix(status=PrefixStatusChoices.STATUS_ACTIVE, vrf=vrf1, prefix=netaddr.IPNetwork('10.1.0.0/24')), Prefix(status=PrefixStatusChoices.STATUS_ACTIVE, vrf=vrf1, prefix=netaddr.IPNetwork('10.1.1.0/24')), ] Prefix.objects.bulk_create(prefixes) # Test self._compare(Prefix.objects.all(), prefixes)
d4a231585ac9a25d9739552d8c9e433dbf9398af
13
test_ordering.py
378
Clean up tests
78,338
0
191
246
28
266,206
43
netbox
21
netbox/ipam/tests/test_ordering.py
Python
15
{ "docstring": "\n This function tests a complex ordering of interwoven prefixes and vrfs. This is the current expected ordering of VRFs\n This includes the testing of the Container status.\n\n The proper ordering, to get proper containerization should be:\n None:10.0.0.0/8\n None:10.0.0.0/16\n VRF A:10.0.0.0/24\n VRF A:10.0.1.0/24\n VRF A:10.0.1.0/25\n None:10.1.0.0/16\n VRF A:10.1.0.0/24\n VRF A:10.1.1.0/24\n None: 192.168.0.0/16\n ", "language": "en", "n_whitespaces": 180, "n_words": 51, "vocab_size": 39 }
https://github.com/netbox-community/netbox.git
1
test_directed_partition
def test_directed_partition(): G = nx.DiGraph() H = nx.DiGraph() G.add_nodes_from(range(10)) H.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) G_edges = [ (0, 2), (0, 1), (1, 0), (2, 1), (2, 0), (3, 4), (4, 3), (7, 8), (8, 7), (9, 10), (10, 9), ] H_edges = [ (1, 2), (1, 6), (1, 9), (2, 3), (2, 4), (2, 5), (3, 4), (4, 3), (4, 5), (5, 4), (6, 7), (6, 8), (9, 10), (9, 11), (10, 11), (11, 10), ] G.add_edges_from(G_edges) H.add_edges_from(H_edges) G_expected_partition = [{0, 1, 2}, {3, 4}, {5}, {6}, {8, 7}, {9, 10}] G_partition = louvain_communities(G, seed=123, weight=None) H_expected_partition = [{2, 3, 4, 5}, {8, 1, 6, 7}, {9, 10, 11}] H_partition = louvain_communities(H, seed=123, weight=None) assert G_partition == G_expected_partition assert H_partition == H_expected_partition
8522eea3955f5cf3da43cacc27643d93768aeb03
9
test_louvain.py
442
Correct louvain formula, solve infinite loops (#5713) Fixes the formulae used to calculate gain and removal cost in calculation of one level of the Louvain partition tree. Errors here were causing infinite loops for some cases in the past, see gh-5175 and gh-5704. This PR also adds test cases to ensure infinite loops are not entered for these cases. Co-authored-by: Ross Barnowski <rossbar@berkeley.edu>
42,220
0
368
342
72
177,007
128
networkx
17
networkx/algorithms/community/tests/test_louvain.py
Python
44
{ "docstring": "\n Test 2 cases that were looping infinitely\n from issues #5175 and #5704\n ", "language": "en", "n_whitespaces": 22, "n_words": 12, "vocab_size": 12 }
https://github.com/networkx/networkx.git
2
tobitmap
def tobitmap(self, name="image"): self.load() if self.mode != "1": msg = "not a bitmap" raise ValueError(msg) data = self.tobytes("xbm") return b"".join( [ f"#define {name}_width {self.size[0]}\n".encode("ascii"), f"#define {name}_height {self.size[1]}\n".encode("ascii"), f"static char {name}_bits[] = {{\n".encode("ascii"), data, b"};", ] )
2ae55ccbdad9c842929fb238ea1eb81d1f999024
14
Image.py
173
Improve exception traceback readability
70,075
0
197
76
33
243,702
36
Pillow
12
src/PIL/Image.py
Python
15
{ "docstring": "\n Returns the image converted to an X11 bitmap.\n\n .. note:: This method only works for mode \"1\" images.\n\n :param name: The name prefix to use for the bitmap variables.\n :returns: A string containing an X11 bitmap.\n :raises ValueError: If the mode is not \"1\"\n ", "language": "en", "n_whitespaces": 87, "n_words": 44, "vocab_size": 35 }
https://github.com/python-pillow/Pillow.git
2
set_fontsize
def set_fontsize(self, s=None): if s is None: s = mpl.rcParams["legend.fontsize"] self.prop = FontProperties(size=s) self.stale = True
438d30b227b1fef7e8733578f851e76a8e360f24
10
offsetbox.py
64
Get rcParams from mpl
23,550
0
55
38
13
109,359
16
matplotlib
9
lib/matplotlib/offsetbox.py
Python
5
{ "docstring": "\n Set the fontsize in points.\n\n If *s* is not given, reset to :rc:`legend.fontsize`.\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 13 }
https://github.com/matplotlib/matplotlib.git
1
slider_var
def slider_var(self) -> tk.IntVar: retval = self._vars["slider"] assert isinstance(retval, tk.IntVar) return retval
7da2cc3dd266aabebf41a31384cc2e0e7e5af6e5
8
preview_tk.py
49
Training - Use custom preview pop-out
20,963
0
40
29
11
101,553
12
faceswap
7
lib/training/preview_tk.py
Python
6
{ "docstring": ":class:`tkinter.IntVar`: The variable holding the currently selected percentage scaling\n amount in the slider. ", "language": "en", "n_whitespaces": 20, "n_words": 13, "vocab_size": 12 }
https://github.com/deepfakes/faceswap.git
1
_make_pair_wise_relative_positions
def _make_pair_wise_relative_positions(self) -> None: device = self.tau.device coordinates = torch.stack(torch.meshgrid([ torch.arange(self.window_size[0], device=device), torch.arange(self.window_size[1], device=device)]), dim=0).flatten(1) relative_coordinates = coordinates[:, :, None] - coordinates[:, None, :] relative_coordinates = relative_coordinates.permute(1, 2, 0).reshape(-1, 2).float() relative_coordinates_log = torch.sign(relative_coordinates) * torch.log( 1.0 + relative_coordinates.abs()) self.register_buffer("relative_coordinates_log", relative_coordinates_log, persistent=False)
c6e4b7895a7dbcd9b98396cbef383dd1c72b0ad3
17
swin_transformer_v2_cr.py
223
Swin V2 CR impl refactor. * reformat and change some naming so closer to existing timm vision transformers * remove typing that wasn't adding clarity (or causing torchscript issues) * support non-square windows * auto window size adjust from image size * post-norm + main-branch no
119,926
0
123
147
35
331,789
41
pytorch-image-models
22
timm/models/swin_transformer_v2_cr.py
Python
11
{ "docstring": "Method initializes the pair-wise relative positions to compute the positional biases.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
https://github.com/huggingface/pytorch-image-models.git
4
_get_newest_folder
def _get_newest_folder(self) -> str: assert self._pathoutput is not None folders = [os.path.join(self._pathoutput, folder) for folder in os.listdir(self._pathoutput) if os.path.isdir(os.path.join(self._pathoutput, folder))] folders.sort(key=os.path.getmtime) retval = folders[-1] if folders else self._pathoutput logger.debug("sorted folders: %s, return value: %s", folders, retval) return retval
dc18c74eea0c7837a820d27628cb12b0824fa30e
13
utils.py
153
Bugfix: Preview for extract in batch mode
20,911
0
123
99
32
101,499
38
faceswap
17
lib/gui/utils.py
Python
19
{ "docstring": " Obtain the most recent folder created in the extraction output folder when processing\n in batch mode.\n\n Returns\n -------\n str\n The most recently modified folder within the parent output folder. If no folders have\n been created, returns the parent output folder\n\n ", "language": "en", "n_whitespaces": 98, "n_words": 40, "vocab_size": 29 }
https://github.com/deepfakes/faceswap.git
16
average_shortest_path_length
def average_shortest_path_length(G, weight=None, method=None): r single_source_methods = ["unweighted", "dijkstra", "bellman-ford"] all_pairs_methods = ["floyd-warshall", "floyd-warshall-numpy"] supported_methods = single_source_methods + all_pairs_methods if method is None: method = "unweighted" if weight is None else "dijkstra" if method not in supported_methods: raise ValueError(f"method not supported: {method}") n = len(G) # For the special case of the null graph, raise an exception, since # there are no paths in the null graph. if n == 0: msg = ( "the null graph has no paths, thus there is no average" "shortest path length" ) raise nx.NetworkXPointlessConcept(msg) # For the special case of the trivial graph, return zero immediately. if n == 1: return 0 # Shortest path length is undefined if the graph is disconnected. if G.is_directed() and not nx.is_weakly_connected(G): raise nx.NetworkXError("Graph is not weakly connected.") if not G.is_directed() and not nx.is_connected(G): raise nx.NetworkXError("Graph is not connected.") # Compute all-pairs shortest paths.
b5d41847b8db0c82372faf69cd3a339d11da7ef0
11
generic.py
248
DOC: Update documentation to include callables for weight argument (#5307) Update docs to include functions as valid input for weight argument.
41,808
0
272
239
85
176,291
147
networkx
17
networkx/algorithms/shortest_paths/generic.py
Python
93
{ "docstring": "Returns the average shortest path length.\n\n The average shortest path length is\n\n .. math::\n\n a =\\sum_{s,t \\in V} \\frac{d(s, t)}{n(n-1)}\n\n where `V` is the set of nodes in `G`,\n `d(s, t)` is the shortest path from `s` to `t`,\n and `n` is the number of nodes in `G`.\n\n Parameters\n ----------\n G : NetworkX graph\n\n weight : None, string or function, optional (default = None)\n If None, every edge has weight/distance/cost 1.\n If a string, use this edge attribute as the edge weight.\n Any edge attribute not present defaults to 1.\n If this is a function, the weight of an edge is the value\n returned by the function. The function must accept exactly\n three positional arguments: the two endpoints of an edge and\n the dictionary of edge attributes for that edge.\n The function must return a number.\n\n method : string, optional (default = 'unweighted' or 'djikstra')\n The algorithm to use to compute the path lengths.\n Supported options are 'unweighted', 'dijkstra', 'bellman-ford',\n 'floyd-warshall' and 'floyd-warshall-numpy'.\n Other method values produce a ValueError.\n The default method is 'unweighted' if `weight` is None,\n otherwise the default method is 'dijkstra'.\n\n Raises\n ------\n NetworkXPointlessConcept\n If `G` is the null graph (that is, the graph on zero nodes).\n\n NetworkXError\n If `G` is not connected (or not weakly connected, in the case\n of a directed graph).\n\n ValueError\n If `method` is not among the supported options.\n\n Examples\n --------\n >>> G = nx.path_graph(5)\n >>> nx.average_shortest_path_length(G)\n 2.0\n\n For disconnected graphs, you can compute the average shortest path\n length for each component\n\n >>> G = nx.Graph([(1, 2), (3, 4)])\n >>> for C in (G.subgraph(c).copy() for c in nx.connected_components(G)):\n ... print(nx.average_shortest_path_length(C))\n 1.0\n 1.0\n\n ", "language": "en", "n_whitespaces": 489, "n_words": 269, "vocab_size": 156 }
https://github.com/networkx/networkx.git
2
assertDisallows
def assertDisallows(self, func_name): try: with self.assertRaises(Exception): yield except Exception as e: # pylint: disable=broad-except raise RuntimeError( f"Expected a transfer to be disallowed while running: {func_name}" ) from e
b7e1fec2500daec9e42e79c5983183c759e318ed
12
transfer_guard_test.py
63
Implement the JAX transfer guard API Adds `--jax_transfer_guard` flag and `jax.transfer_guard()` context manager that allows logging or disallowing unintended transfers. The API distinguishes between two types of transfers: * explicit transfers: `jax.device_put*()` and `jax.device_get()` calls. * implicit transfers: Other transfers (e.g., printing a `DeviceArray`). The transfer guard can take an action based on its guard level: * "allow": Silently allow all transfers (default; same as the previous behavior). * "log": Log and allow implicit transfers. Silently allow explicit transfers. * "disallow": Disallow implicit transfers. Silently allow explicit transfers. * "log_explicit": Log and allow all transfers. * "disallow_explicit": Disallow all transfers. The API also allows fine-control the transfer guard level of individual transfer directions. Their flag and context manager names are suffixed with the transfer direction: * "host_to_device": Converting a Python value into a `DeviceBuffer`. * "device_to_device": Copying a `DeviceBuffer` to a different device. * "device_to_host": Fetching the value of a `DeviceBuffer`. Example: ``` x = jnp.array(1) y = jnp.array(2) z = jnp.array(3) print(x) # No error with jax.transfer_guard("disallow"): print(x) # No error; x is already fetched print(jax.device_get(y)) # No error print(z) # Error! ``` PiperOrigin-RevId: 427562278
26,562
0
85
32
28
119,250
28
jax
7
tests/transfer_guard_test.py
Python
8
{ "docstring": "Asserts that a transfer in the context is disallowed.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/google/jax.git
1
onModuleEncounter
def onModuleEncounter(self, module_filename, module_name, module_kind): # Virtual method, pylint: disable=no-self-use,unused-argument return None
31e4a7edb61212d4dddafb281ea9c842e646e508
6
PluginBase.py
23
Cleanup, avoid "shlib" naming, and use "extension" instead. * This should make the code more clear for others to read.
42,744
0
33
14
12
178,559
12
Nuitka
5
nuitka/plugins/PluginBase.py
Python
2
{ "docstring": "Help decide whether to include a module.\n\n Args:\n module_filename: filename\n module_name: full module name\n module_kind: one of \"py\", \"extension\" (shared library)\n Returns:\n True or False\n ", "language": "en", "n_whitespaces": 90, "n_words": 25, "vocab_size": 25 }
https://github.com/Nuitka/Nuitka.git
4
copy_placeholders_and_check_results
def copy_placeholders_and_check_results(self, placeholders): for original_placeholder in placeholders: # get the plugins original_plugins = original_placeholder.get_plugins() # copy them to a new placeholder copied_placeholder = Placeholder.objects.create(slot=original_placeholder.slot) copy_plugins_to( original_placeholder.get_plugins(), copied_placeholder ) copied_plugins = copied_placeholder.get_plugins() # we should find the same number of plugins in both placeholders self.assertEqual( original_plugins.count(), copied_plugins.count() ) # quick check: make sure the two querysets match: for original, copy in zip(original_plugins, copied_plugins): self.assertEqual( Text.objects.get(id=original.id).body, Text.objects.get(id=copy.id).body ) # Now build a *tree* of the plugins, and match those - it's not # enough just to compare querysets as above; we should *also* check # that when we build a tree, the various nodes are assembled as we # would expect. We will pump the trees into a pair of lists: original_plugins_list = [] copied_plugins_list = [] # This function builds the tree of plugins, starting from its roots. # In that respect it's like many of the plugin tree-building # routines elsewhere in the system.
c1290c9ff89cb00caa5469129fd527e9d82cd820
16
test_nested_plugins.py
204
ci: Added codespell (#7355) Co-authored-by: Christian Clauss <cclauss@me.com> * ci: codespell config taken from #7292
17,385
0
527
321
103
82,410
154
django-cms
24
cms/tests/test_nested_plugins.py
Python
54
{ "docstring": "\n This function is not itself a test; rather, it can be used by any test\n that has created placeholders. It will check that whatever the plugin\n structure in the placeholder, it will be copied accurately when they are\n copied.\n\n placeholders is a list of placeholders\n ", "language": "en", "n_whitespaces": 88, "n_words": 45, "vocab_size": 37 }
https://github.com/django-cms/django-cms.git
1
test_sqlite_error_codes
def test_sqlite_error_codes(self, code): pyvalue = getattr(sqlite3, f"SQLITE_{code.name}") assert pyvalue == code.value
ee4d6e0396a6b570f4d5592a9c4c1a9fee1027b6
11
test_sql.py
45
sql: Add *all* primary sqlite result codes For three reasons: - There are only 31 of them, and we don't really expect any more to turn up (last happened in 2013, and we have a test for it happening) - It makes for nicer debug output - It always felt strange to only have a small subset in the enum
117,948
0
32
23
10
321,856
11
qutebrowser
8
tests/unit/misc/test_sql.py
Python
3
{ "docstring": "Cross check our error codes with the ones in Python 3.11+.\n\n See https://github.com/python/cpython/commit/86d8b465231\n ", "language": "en", "n_whitespaces": 27, "n_words": 13, "vocab_size": 13 }
https://github.com/qutebrowser/qutebrowser.git
2
get_latest_revision_as_object
def get_latest_revision_as_object(self): latest_revision = self.get_latest_revision() if latest_revision: return latest_revision.as_object() return self
cf3cea9a5b171efff525baefe7d25df6f7cd2c60
9
__init__.py
43
Add docs for RevisionMixin
16,874
0
50
24
10
79,156
11
wagtail
5
wagtail/models/__init__.py
Python
5
{ "docstring": "\n Returns the latest revision of the object as an instance of the model.\n If no latest revision exists, returns the object itself.\n ", "language": "en", "n_whitespaces": 44, "n_words": 22, "vocab_size": 15 }
https://github.com/wagtail/wagtail.git
3
test_empty_objects
def test_empty_objects(call_ray_start_shared): objects = [0, b"", "", [], np.array(()), {}, set(), None] with ray_start_client_server_for_address(call_ray_start_shared) as ray: for obj in objects: ref = ray.put(obj) if isinstance(obj, np.ndarray): assert np.array_equal(ray.get(ref), obj) else: assert ray.get(ref) == obj
297341e107daee1ea3aff991ae8ea8c90993c683
15
test_client.py
147
[Test][Client] Only start ray once in client tests (#28835) It looks like we're frequently starting and shutting down Ray in this test because `ray_start_client_server` isn't connecting to the Ray created by `ray_start_regular_shared`, and is instead starting a new Ray head process every time it launches. Ray client tests are failing frequently with: ``` [2022-10-06 07:31:46,253 E 13235 13751] core_worker_process.cc:277: The core worker has already been shutdown. This happens when the language frontend accesses the Ray's worker after it is shutdown. The process will exit ``` Which is probably caused by having multiple ray clusters running simultaneous, with some shutting down asynchronously. This refactor forces all of the tests in the module to use the same Ray cluster. Also fixes two other sources of potential flakiness: * Joins the thread in test_client_thread_safe (seems like this has a bad interaction when the client server is cleaned up) * Calls ray.get in `test_stdout_log_stream`, to make sure that the remote function is done running before we try searching for its output Should also have the happy side effect of speeding up test_client. Ran the `Small & Client` tests (regular and external redis) twice each, no flakes, and windows version of test_client.
30,158
0
113
92
31
133,942
34
ray
15
python/ray/tests/test_client.py
Python
9
{ "docstring": "\n Tests that client works with \"empty\" objects. Sanity check, since put requests\n will fail if the serialized version of an object consists of zero bytes.\n ", "language": "en", "n_whitespaces": 35, "n_words": 25, "vocab_size": 24 }
https://github.com/ray-project/ray.git
6
parse_multiple_json
def parse_multiple_json(json_file, offset=None): json_info_list = [] if not os.path.exists(json_file): return json_info_list try: with open(json_file, "r") as f: if offset: f.seek(offset) for line in f: if line[-1] != "\n": # Incomplete line break json_info = json.loads(line) json_info_list.append(json_info) offset += len(line) except BaseException as e: logging.error(e.message) return json_info_list, offset
d2f0c3b2f64b41f6541f6521e98cf3a37577c016
14
utils.py
173
Clean up docstyle in data, ml, and tune packages (#25188)
31,937
0
221
100
38
140,353
47
ray
21
python/ray/tune/automlboard/common/utils.py
Python
17
{ "docstring": "Parse multiple json records from the given file.\n\n Seek to the offset as the start point before parsing\n if offset set. return empty list if the json file does\n not exists or exception occurs.\n\n Args:\n json_file: File path to be parsed.\n offset: Initial seek position of the file.\n\n Returns:\n A dict of json info.\n New offset after parsing.\n\n ", "language": "en", "n_whitespaces": 104, "n_words": 58, "vocab_size": 46 }
https://github.com/ray-project/ray.git
1
test_worker_duty_configs
def test_worker_duty_configs(self) -> None: worker1_config = self._make_worker_config( worker_app="synapse.app.generic_worker", worker_name="worker1", extras={ "notify_appservices_from_worker": "worker2", "update_user_directory_from_worker": "worker1", }, ) self.assertFalse(worker1_config.should_notify_appservices) self.assertTrue(worker1_config.should_update_user_directory) worker2_config = self._make_worker_config( worker_app="synapse.app.generic_worker", worker_name="worker2", extras={ "notify_appservices_from_worker": "worker2", "update_user_directory_from_worker": "worker1", }, ) self.assertTrue(worker2_config.should_notify_appservices) self.assertFalse(worker2_config.should_update_user_directory)
699192fc1a1055a4bec2345bc80f120f28470c73
12
test_workers.py
170
Add the `update_user_directory_from_worker` configuration option (superseding `update_user_directory`) to allow a generic worker to be designated as the worker to update the user directory. (#12654) Co-authored-by: Shay <hillerys@element.io>
72,159
0
243
96
22
248,221
32
synapse
12
tests/config/test_workers.py
Python
24
{ "docstring": "\n Additional tests for the worker duties\n ", "language": "en", "n_whitespaces": 21, "n_words": 6, "vocab_size": 6 }
https://github.com/matrix-org/synapse.git
2
upgrade
def upgrade(): try: with op.batch_alter_table('connection') as batch_op: batch_op.alter_column("conn_id", nullable=False, existing_type=sa.String(250, **COLLATION_ARGS)) batch_op.create_unique_constraint(constraint_name="unique_conn_id", columns=["conn_id"]) except sa.exc.IntegrityError: raise Exception("Make sure there are no duplicate connections with the same conn_id or null values")
69f6f9e01b6df76c3c8fa266d460324163957887
15
8d48763f6d53_add_unique_constraint_to_conn_id.py
117
Autogenerate migration reference doc (#21601) * document airflow version in each alembic migration module and use this to autogen the doc * update each migration module to have the same description used in migration ref (so it can be used in autogen)
8,603
0
75
65
29
45,476
30
airflow
16
airflow/migrations/versions/8d48763f6d53_add_unique_constraint_to_conn_id.py
Python
7
{ "docstring": "Apply Add unique constraint to ``conn_id`` and set it as non-nullable", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/apache/airflow.git
33
classify_pde
def classify_pde(eq, func=None, dict=False, *, prep=True, **kwargs): if func and len(func.args) != 2: raise NotImplementedError("Right now only partial " "differential equations of two variables are supported") if prep or func is None: prep, func_ = _preprocess(eq, func) if func is None: func = func_ if isinstance(eq, Equality): if eq.rhs != 0: return classify_pde(eq.lhs - eq.rhs, func) eq = eq.lhs f = func.func x = func.args[0] y = func.args[1] fx = f(x,y).diff(x) fy = f(x,y).diff(y) # TODO : For now pde.py uses support offered by the ode_order function # to find the order with respect to a multi-variable function. An # improvement could be to classify the order of the PDE on the basis of # individual variables. order = ode_order(eq, f(x,y)) # hint:matchdict or hint:(tuple of matchdicts) # Also will contain "default":<default hint> and "order":order items. matching_hints = {'order': order} if not order: if dict: matching_hints["default"] = None return matching_hints else: return () eq = expand(eq) a = Wild('a', exclude = [f(x,y)]) b = Wild('b', exclude = [f(x,y), fx, fy, x, y]) c = Wild('c', exclude = [f(x,y), fx, fy, x, y]) d = Wild('d', exclude = [f(x,y), fx, fy, x, y]) e = Wild('e', exclude = [f(x,y), fx, fy]) n = Wild('n', exclude = [x, y]) # Try removing the smallest power of f(x,y) # from the highest partial derivatives of f(x,y) reduced_eq = None if eq.is_Add: var = set(combinations_with_replacement((x,y), order)) dummyvar = var.copy() power = None for i in var: coeff = eq.coeff(f(x,y).diff(*i)) if coeff != 1: match = coeff.match(a*f(x,y)**n) if match and match[a]: power = match[n] dummyvar.remove(i) break dummyvar.remove(i) for i in dummyvar: coeff = eq.coeff(f(x,y).diff(*i)) if coeff != 1: match = coeff.match(a*f(x,y)**n) if match and match[a] and match[n] < power: power = match[n] if power: den = f(x,y)**power reduced_eq = Add(*[arg/den for arg in eq.args]) if not reduced_eq: reduced_eq = eq if order == 1: reduced_eq = collect(reduced_eq, f(x, y)) r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e) if r: if not r[e]: ## Linear first-order homogeneous partial-differential ## equation with constant coefficients r.update({'b': b, 'c': c, 'd': d}) matching_hints["1st_linear_constant_coeff_homogeneous"] = r else: if r[b]**2 + r[c]**2 != 0: ## Linear first-order general partial-differential ## equation with constant coefficients r.update({'b': b, 'c': c, 'd': d, 'e': e}) matching_hints["1st_linear_constant_coeff"] = r matching_hints[ "1st_linear_constant_coeff_Integral"] = r else: b = Wild('b', exclude=[f(x, y), fx, fy]) c = Wild('c', exclude=[f(x, y), fx, fy]) d = Wild('d', exclude=[f(x, y), fx, fy]) r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e) if r: r.update({'b': b, 'c': c, 'd': d, 'e': e}) matching_hints["1st_linear_variable_coeff"] = r # Order keys based on allhints. retlist = [i for i in allhints if i in matching_hints] if dict: # Dictionaries are ordered arbitrarily, so make note of which # hint would come first for pdsolve(). Use an ordered dict in Py 3. matching_hints["default"] = None matching_hints["ordered_hints"] = tuple(retlist) for i in allhints: if i in matching_hints: matching_hints["default"] = i break return matching_hints else: return tuple(retlist)
6a9ff86786f73ca07fa9004913037de6ba5cb155
19
pde.py
1,330
More list comprehensions
48,937
0
1,390
837
234
198,452
489
sympy
54
sympy/solvers/pde.py
Python
89
{ "docstring": "\n Returns a tuple of possible pdsolve() classifications for a PDE.\n\n The tuple is ordered so that first item is the classification that\n pdsolve() uses to solve the PDE by default. In general,\n classifications near the beginning of the list will produce\n better solutions faster than those near the end, though there are\n always exceptions. To make pdsolve use a different classification,\n use pdsolve(PDE, func, hint=<classification>). See also the pdsolve()\n docstring for different meta-hints you can use.\n\n If ``dict`` is true, classify_pde() will return a dictionary of\n hint:match expression terms. This is intended for internal use by\n pdsolve(). Note that because dictionaries are ordered arbitrarily,\n this will most likely not be in the same order as the tuple.\n\n You can get help on different hints by doing help(pde.pde_hintname),\n where hintname is the name of the hint without \"_Integral\".\n\n See sympy.pde.allhints or the sympy.pde docstring for a list of all\n supported hints that can be returned from classify_pde.\n\n\n Examples\n ========\n\n >>> from sympy.solvers.pde import classify_pde\n >>> from sympy import Function, Eq\n >>> from sympy.abc import x, y\n >>> f = Function('f')\n >>> u = f(x, y)\n >>> ux = u.diff(x)\n >>> uy = u.diff(y)\n >>> eq = Eq(1 + (2*(ux/u)) + (3*(uy/u)), 0)\n >>> classify_pde(eq)\n ('1st_linear_constant_coeff_homogeneous',)\n ", "language": "en", "n_whitespaces": 296, "n_words": 204, "vocab_size": 136 }
https://github.com/sympy/sympy.git
1
getlength
def getlength(self, text, *args, **kwargs): width, height = self.font.getsize(text) return width ## # Wrapper for FreeType fonts. Application code should use the # <b>truetype</b> factory function to create font objects.
c854bf8d1c05022bec4309fbf6b547e494db9373
9
ImageFont.py
48
add getbbox and getlength to basic ImageFont and update related tests
69,962
0
49
28
29
243,052
30
Pillow
9
src/PIL/ImageFont.py
Python
3
{ "docstring": "\n Returns length (in pixels) of given text.\n This is the amount by which following text should be offset.\n\n .. versionadded:: 9.2.0\n ", "language": "en", "n_whitespaces": 50, "n_words": 21, "vocab_size": 21 }
https://github.com/python-pillow/Pillow.git
2
get_available_item_locations_for_batched_item
def get_available_item_locations_for_batched_item(item_code, from_warehouses, required_qty, company): warehouse_condition = 'and warehouse in %(warehouses)s' if from_warehouses else '' batch_locations = frappe.db.sql(.format(warehouse_condition=warehouse_condition), { #nosec 'item_code': item_code, 'company': company, 'today': today(), 'warehouses': from_warehouses }, as_dict=1) return batch_locations
517fbf1d1f0a7d44e817b3f22ae30142e7bdf4c8
12
pick_list.py
102
fix: Ambigous column in picklist query
13,659
0
23
61
29
64,544
32
erpnext
13
erpnext/stock/doctype/pick_list/pick_list.py
Python
30
{ "docstring": "\n\t\tSELECT\n\t\t\tsle.`warehouse`,\n\t\t\tsle.`batch_no`,\n\t\t\tSUM(sle.`actual_qty`) AS `qty`\n\t\tFROM\n\t\t\t`tabStock Ledger Entry` sle, `tabBatch` batch\n\t\tWHERE\n\t\t\tsle.batch_no = batch.name\n\t\t\tand sle.`item_code`=%(item_code)s\n\t\t\tand sle.`company` = %(company)s\n\t\t\tand batch.disabled = 0\n\t\t\tand sle.is_cancelled=0\n\t\t\tand IFNULL(batch.`expiry_date`, '2200-01-01') > %(today)s\n\t\t\t{warehouse_condition}\n\t\tGROUP BY\n\t\t\tsle.`warehouse`,\n\t\t\tsle.`batch_no`,\n\t\t\tsle.`item_code`\n\t\tHAVING `qty` > 0\n\t\tORDER BY IFNULL(batch.`expiry_date`, '2200-01-01'), batch.`creation`\n\t", "language": "en", "n_whitespaces": 29, "n_words": 49, "vocab_size": 36 }
https://github.com/frappe/erpnext.git
1
test_thermostat_hvac_modes
async def test_thermostat_hvac_modes(hass, hk_driver): entity_id = "climate.test" hass.states.async_set( entity_id, HVACMode.OFF, {ATTR_HVAC_MODES: [HVACMode.HEAT, HVACMode.OFF]} ) await hass.async_block_till_done() acc = Thermostat(hass, hk_driver, "Climate", entity_id, 1, None) hk_driver.add_accessory(acc) await acc.run() await hass.async_block_till_done() hap = acc.char_target_heat_cool.to_HAP() assert hap["valid-values"] == [0, 1] assert acc.char_target_heat_cool.value == 0 with pytest.raises(ValueError): acc.char_target_heat_cool.set_value(3) await hass.async_block_till_done() assert acc.char_target_heat_cool.value == 0 acc.char_target_heat_cool.set_value(1) await hass.async_block_till_done() assert acc.char_target_heat_cool.value == 1 with pytest.raises(ValueError): acc.char_target_heat_cool.set_value(2) await hass.async_block_till_done() assert acc.char_target_heat_cool.value == 1
f453726b1862d1d247f6aefdd5f23455b87c11cf
11
test_type_thermostats.py
307
Cleanup HVACAction and HVACMode in tests (#78813)
106,864
0
150
187
39
308,103
66
core
23
tests/components/homekit/test_type_thermostats.py
Python
24
{ "docstring": "Test if unsupported HVAC modes are deactivated in HomeKit.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/home-assistant/core.git
1
test_datetime_fractional_seconds
def test_datetime_fractional_seconds(all_parsers): parser = all_parsers data = result = parser.read_csv( StringIO(data), header=0, date_parser=lambda x: pd.to_datetime(x, format="%Y %m %d %H %M %S.%f"), parse_dates={"ymdHMS": [0, 1, 2, 3, 4, 5]}, ) expected = DataFrame( [ [datetime(2001, 1, 5, 10, 0, 0, microsecond=123456), 0.0, 10.0], [datetime(2001, 1, 5, 10, 0, 0, microsecond=500000), 1.0, 11.0], ], columns=["ymdHMS", "a", "b"], ) tm.assert_frame_equal(result, expected) @xfail_pyarrow
d2e972301c099967ef3050c9feda1d116e1dd85a
@xfail_pyarrow
14
test_parse_dates.py
202
DEP: Enforce deprecation of date converters for csv (#49086) * DEP: Enforce deprecation of date converters for csv * Add whatsnew * Add check
40,537
1
150
147
47
170,154
59
pandas
22
pandas/tests/io/parser/test_parse_dates.py
Python
21
{ "docstring": "\\\nyear,month,day,hour,minute,second,a,b\n2001,01,05,10,00,0.123456,0.0,10.\n2001,01,5,10,0,0.500000,1.,11.\n", "language": "en", "n_whitespaces": 0, "n_words": 4, "vocab_size": 4 }
https://github.com/pandas-dev/pandas.git
2
_get_html_response
def _get_html_response(url, session): # type: (str, PipSession) -> Response if is_archive_file(Link(url).filename): _ensure_html_response(url, session=session) logger.debug('Getting page %s', redact_auth_from_url(url)) resp = session.get( url, headers={ "Accept": "text/html", # We don't want to blindly returned cached data for # /simple/, because authors generally expecting that # twine upload && pip install will function, but if # they've done a pip install in the last ~10 minutes # it won't. Thus by setting this to zero we will not # blindly use any cached data, however the benefit of # using max-age=0 instead of no-cache, is that we will # still support conditional requests, so we will still # minimize traffic sent in cases where the page hasn't # changed at all, we will just always incur the round # trip for the conditional GET now instead of only # once per 10 minutes. # For more information, please see pypa/pip#5670. "Cache-Control": "max-age=0", }, ) raise_for_status(resp) # The check for archives above only works if the url ends with # something that looks like an archive. However that is not a # requirement of an url. Unless we issue a HEAD request on every # url we cannot know ahead of time for sure if something is HTML # or not. However we can check after we've downloaded it. _ensure_html_header(resp) return resp
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
12
collector.py
139
upd; format
12,260
0
452
70
149
60,719
217
transferlearning
15
.venv/lib/python3.8/site-packages/pip/_internal/index/collector.py
Python
14
{ "docstring": "Access an HTML page with GET, and return the response.\n\n This consists of three parts:\n\n 1. If the URL looks suspiciously like an archive, send a HEAD first to\n check the Content-Type is HTML, to avoid downloading a large file.\n Raise `_NotHTTP` if the content type cannot be determined, or\n `_NotHTML` if it is not HTML.\n 2. Actually perform the request. Raise HTTP exceptions on network failures.\n 3. Check the Content-Type header to make sure we got HTML, and raise\n `_NotHTML` otherwise.\n ", "language": "en", "n_whitespaces": 121, "n_words": 82, "vocab_size": 66 }
https://github.com/jindongwang/transferlearning.git
22
bcoo_update_layout
def bcoo_update_layout(mat, *, n_batch=None, n_dense=None, on_inefficient='error'): # TODO(jakevdp): allow specification of nse? # TODO(jakevdp): there is room for some improvements here: # - we could probably do better in the case of converting a dense dim to # a batch dim or vice-versa. Worth adding that special case? # - we could work to preserve broadcasted batch dimensions when possible. # - if indices are known to be unique, we can convert them to batch/dense # dimensions more efficiently. n_batch = mat.n_batch if n_batch is None else operator.index(n_batch) n_dense = mat.n_dense if n_dense is None else operator.index(n_dense) if (n_batch, n_dense) == (mat.n_batch, mat.n_dense): return mat n_sparse = mat.ndim - n_batch - n_dense if on_inefficient not in ['error', 'warn', None]: raise ValueError("on_inefficent={on_inefficient!r}; expected one of ['error', 'warn', None].") if n_batch < 0: raise ValueError(f"n_batch must be non-negative; got {n_batch}") if n_dense < 0: raise ValueError(f"n_dense must be non-negative; got {n_dense}") if n_sparse < 0: raise ValueError(f"sum of n_batch={n_batch} and n_dense={n_dense} " f"cannot be larger than mat.ndim={mat.ndim}.") def _maybe_err_or_warn(msg): if on_inefficient == 'error': msg += (" To disable this error, set the on_inefficient argument " "of bcoo_update_layout to 'warn' or None.") raise SparseEfficiencyError(msg) elif on_inefficient == 'warn': msg += (" To disable this warning, set the on_inefficient argument " "of bcoo_update_layout to None.") warnings.warn(msg, category=SparseEfficiencyWarning) # TODO(jakevdp): are efficiency warnings necessary when nse is 0 or 1? if (n_dense > mat.n_dense and any(d > 1 for d in mat.shape[-n_dense:mat.ndim - mat.n_dense])): _maybe_err_or_warn(f"For matrix of shape {mat.shape}, increasing n_dense from " f"{mat.n_dense} to {n_dense} results in inefficient storage.") if n_batch > mat.n_batch and any(d > 1 for d in mat.shape[mat.n_batch:n_batch]): _maybe_err_or_warn(f"For matrix of shape {mat.shape}, increasing n_batch from " f"{mat.n_batch} to {n_batch} results in inefficient storage.") new_data, new_indices = mat.data, mat.indices shape = mat.shape current_n_batch = mat.n_batch current_n_dense = mat.n_dense if n_dense < current_n_dense: n = current_n_dense - n_dense
bf4c3b64af43b86c3005c0f8bec450655ab47a8d
14
bcoo.py
531
[sparse] add bcoo_update_layout utility
26,831
0
480
492
162
120,400
306
jax
27
jax/experimental/sparse/bcoo.py
Python
62
{ "docstring": "Update the storage layout of a BCOO matrix.\n\n In general, increasing ``mat.n_batch`` or ``mat.n_dense`` will lead to very inefficient\n storage, with many explicitly-stored zeros, unless the new batch or dense dimensions have\n size 0 or 1. In such cases, ``bcoo_update_layout`` will raise a :class:`SparseEfficiencyError`.\n This can be silenced by specifying the ``on_inefficient`` argument.\n\n Args:\n mat : BCOO array\n n_batch : optional(int) the number of batch dimensions in the output matrix. If None,\n then n_batch = mat.n_batch.\n n_dense : optional(int) the number of dense dimensions in the output matrix. If None,\n then n_dense = mat.n_dense.\n on_inefficient : optional(string), one of ``['error', 'warn', None]``. Specify the\n behavior in case of an inefficient reconfiguration. This is defined as a reconfiguration\n where the size of the resulting representation is much larger than the size of the\n input representation.\n\n Returns:\n mat_out : BCOO array\n A BCOO array representing the same sparse array as the input, with the specified\n layout. ``mat_out.todense()`` will match ``mat.todense()`` up to appropriate precision.\n ", "language": "en", "n_whitespaces": 219, "n_words": 162, "vocab_size": 100 }
https://github.com/google/jax.git
1
test_s3_zip
def test_s3_zip(self): unzipped_paths = _unzip_if_needed([self.s3_path + "/enormous.zip"], "json") self.assertEqual( str(Path(unzipped_paths[0]).absolute()), str(Path("./").absolute() / "enormous.json"), )
569fe0109629048d08e1d9e023f7769f10bd2244
15
test_dataset_reader.py
96
[RLlib] improved unittests for dataset_reader and fixed bugs (#26458)
27,738
0
64
54
14
124,998
14
ray
9
rllib/offline/tests/test_dataset_reader.py
Python
6
{ "docstring": "Tests whether the unzip_if_needed function works correctly on s3 zip\n files", "language": "en", "n_whitespaces": 17, "n_words": 11, "vocab_size": 11 }
https://github.com/ray-project/ray.git
1
test_occupancy_sensor_read_state
async def test_occupancy_sensor_read_state(hass, utcnow): helper = await setup_test_component(hass, create_occupancy_sensor_service) await helper.async_update( ServicesTypes.OCCUPANCY_SENSOR, {CharacteristicsTypes.OCCUPANCY_DETECTED: False} ) state = await helper.poll_and_get_state() assert state.state == "off" await helper.async_update( ServicesTypes.OCCUPANCY_SENSOR, {CharacteristicsTypes.OCCUPANCY_DETECTED: True} ) state = await helper.poll_and_get_state() assert state.state == "on" assert state.attributes["device_class"] == BinarySensorDeviceClass.OCCUPANCY
58b8c30221a6f6e5acbbe98b7e3298b03fb741f5
11
test_binary_sensor.py
150
Improve homekit_controller tests (#65266)
110,116
0
88
90
24
311,451
41
core
16
tests/components/homekit_controller/test_binary_sensor.py
Python
13
{ "docstring": "Test that we can read the state of a HomeKit occupancy sensor accessory.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
https://github.com/home-assistant/core.git
2
get_diff_with_remote_resource
def get_diff_with_remote_resource(self) -> str: if not self.was_created: raise NonExistingResourceError("Cannot compute diff with a non existing remote resource.") current_config = self.configuration remote_config = self.remote_resource.connection_configuration diff = compute_diff(remote_config, current_config) return diff.pretty()
706d7f16868f062d89e9d24e37ab059ae1a6d8b2
10
resources.py
77
🐙 octavia-cli: implement `apply` (#10703)
623
0
82
45
26
4,112
29
airbyte
13
octavia-cli/octavia_cli/apply/resources.py
Python
15
{ "docstring": "Compute the diff between current resource and the remote resource.\n\n Raises:\n NonExistingResourceError: Raised if the remote resource does not exist.\n\n Returns:\n str: The prettyfied diff.\n ", "language": "en", "n_whitespaces": 68, "n_words": 25, "vocab_size": 21 }
https://github.com/airbytehq/airbyte.git
1
test_i18n_language_non_english_default
def test_i18n_language_non_english_default(self): with self.settings(LANGUAGE_CODE="fr"), translation.override("en-us"): response = self.client.get(reverse("admin:jsi18n")) self.assertNotContains(response, "Choisir une heure")
9c19aff7c7561e3a82978a272ecdaad40dda5c00
13
tests.py
83
Refs #33476 -- Reformatted code with Black.
52,010
0
48
44
12
207,585
12
django
11
tests/admin_views/tests.py
Python
4
{ "docstring": "\n Check if the JavaScript i18n view returns an empty language catalog\n if the default language is non-English but the selected language\n is English. See #13388 and #3594 for more details.\n ", "language": "en", "n_whitespaces": 59, "n_words": 30, "vocab_size": 24 }
https://github.com/django/django.git
3
callbacks
def callbacks(self, callbacks_class) -> "AlgorithmConfig": if callbacks_class is None: callbacks_class = DefaultCallbacks # Check, whether given `callbacks` is a callable. if not callable(callbacks_class): raise ValueError( "`config.callbacks_class` must be a callable method that " "returns a subclass of DefaultCallbacks, got " f"{callbacks_class}!" ) self.callbacks_class = callbacks_class return self
182744bbd151c166b8028355eae12a5da63fb3cc
12
algorithm_config.py
78
[RLlib] AlgorithmConfig: Next steps (volume 01); Algos, RolloutWorker, PolicyMap, WorkerSet use AlgorithmConfig objects under the hood. (#29395)
30,282
0
167
40
39
134,391
47
ray
6
rllib/algorithms/algorithm_config.py
Python
22
{ "docstring": "Sets the callbacks configuration.\n\n Args:\n callbacks_class: Callbacks class, whose methods will be run during\n various phases of training and environment sample collection.\n See the `DefaultCallbacks` class and\n `examples/custom_metrics_and_callbacks.py` for more usage information.\n\n Returns:\n This updated AlgorithmConfig object.\n ", "language": "en", "n_whitespaces": 125, "n_words": 37, "vocab_size": 35 }
https://github.com/ray-project/ray.git
2
test_fed_filtering
def test_fed_filtering(self): fed_hostname = self.hs.hostname + "2" subspace = "#subspace:" + fed_hostname # Create a few rooms which will have different properties. public_room = "#public:" + fed_hostname knock_room = "#knock:" + fed_hostname not_invited_room = "#not_invited:" + fed_hostname invited_room = "#invited:" + fed_hostname restricted_room = "#restricted:" + fed_hostname restricted_accessible_room = "#restricted_accessible:" + fed_hostname world_readable_room = "#world_readable:" + fed_hostname joined_room = self.helper.create_room_as(self.user, tok=self.token) # Poke an invite over federation into the database. self._poke_fed_invite(invited_room, "@remote:" + fed_hostname) # Note that these entries are brief, but should contain enough info. children_rooms = ( ( public_room, { "room_id": public_room, "world_readable": False, "join_rules": JoinRules.PUBLIC, }, ), ( knock_room, { "room_id": knock_room, "world_readable": False, "join_rules": JoinRules.KNOCK, }, ), ( not_invited_room, { "room_id": not_invited_room, "world_readable": False, "join_rules": JoinRules.INVITE, }, ), ( invited_room, { "room_id": invited_room, "world_readable": False, "join_rules": JoinRules.INVITE, }, ), ( restricted_room, { "room_id": restricted_room, "world_readable": False, "join_rules": JoinRules.RESTRICTED, "allowed_room_ids": [], }, ), ( restricted_accessible_room, { "room_id": restricted_accessible_room, "world_readable": False, "join_rules": JoinRules.RESTRICTED, "allowed_room_ids": [self.room], }, ), ( world_readable_room, { "room_id": world_readable_room, "world_readable": True, "join_rules": JoinRules.INVITE, }, ), ( joined_room, { "room_id": joined_room, "world_readable": False, "join_rules": JoinRules.INVITE, }, ), ) subspace_room_entry = _RoomEntry( subspace, { "room_id": subspace, "world_readable": True, }, # Place each room in the sub-space. [ { "type": EventTypes.SpaceChild, "room_id": subspace, "state_key": room_id, "content": {"via": [fed_hostname]}, } for room_id, _ in children_rooms ], )
7754af24ab163a3666bc04c7df409e59ace0d763
14
test_room_summary.py
544
Remove the unstable `/spaces` endpoint. (#12073) ...and various code supporting it. The /spaces endpoint was from an old version of MSC2946 and included both a Client-Server and Server-Server API. Note that the unstable /hierarchy endpoint (from the final version of MSC2946) is not yet removed.
71,495
0
1,598
484
104
247,085
218
synapse
33
tests/handlers/test_room_summary.py
Python
129
{ "docstring": "\n Rooms returned over federation should be properly filtered to only include\n rooms the user has access to.\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 17 }
https://github.com/matrix-org/synapse.git
1
require_soundfile
def require_soundfile(test_case): return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
57e6464ac9a31156f1c93e59107323e6ec01309e
10
testing_utils.py
37
Update all require decorators to use skipUnless when possible (#16999)
6,816
0
13
20
7
37,511
7
transformers
5
src/transformers/testing_utils.py
Python
2
{ "docstring": "\n Decorator marking a test that requires soundfile\n\n These tests are skipped when soundfile isn't installed.\n\n ", "language": "en", "n_whitespaces": 25, "n_words": 15, "vocab_size": 14 }
https://github.com/huggingface/transformers.git
15
_resolve_project_threshold_config
def _resolve_project_threshold_config(self) -> SelectType: org_id = self.builder.params.get("organization_id") project_ids = self.builder.params.get("project_id") project_threshold_configs = ( ProjectTransactionThreshold.objects.filter( organization_id=org_id, project_id__in=project_ids, ) .order_by("project_id") .values_list("project_id", "threshold", "metric") ) transaction_threshold_configs = ( ProjectTransactionThresholdOverride.objects.filter( organization_id=org_id, project_id__in=project_ids, ) .order_by("project_id") .values_list("transaction", "project_id", "threshold", "metric") ) num_project_thresholds = project_threshold_configs.count() sentry_sdk.set_tag("project_threshold.count", num_project_thresholds) sentry_sdk.set_tag( "project_threshold.count.grouped", format_grouped_length(num_project_thresholds, [10, 100, 250, 500]), ) num_transaction_thresholds = transaction_threshold_configs.count() sentry_sdk.set_tag("txn_threshold.count", num_transaction_thresholds) sentry_sdk.set_tag( "txn_threshold.count.grouped", format_grouped_length(num_transaction_thresholds, [10, 100, 250, 500]), ) if ( num_project_thresholds + num_transaction_thresholds > constants.MAX_QUERYABLE_TRANSACTION_THRESHOLDS ): raise InvalidSearchQuery( f"Exceeded {constants.MAX_QUERYABLE_TRANSACTION_THRESHOLDS} configured transaction thresholds limit, try with fewer Projects." ) # Arrays need to have toUint64 casting because clickhouse will define the type as the narrowest possible type # that can store listed argument types, which means the comparison will fail because of mismatched types project_thresholds = {} project_threshold_config_keys = [] project_threshold_config_values = [] for project_id, threshold, metric in project_threshold_configs: metric = TRANSACTION_METRICS[metric] if ( threshold == constants.DEFAULT_PROJECT_THRESHOLD and metric == constants.DEFAULT_PROJECT_THRESHOLD_METRIC ): # small optimization, if the configuration is equal to the default, # we can skip it in the final query continue project_thresholds[project_id] = (metric, threshold) project_threshold_config_keys.append(Function("toUInt64", [project_id])) project_threshold_config_values.append((metric, threshold)) project_threshold_override_config_keys = [] project_threshold_override_config_values = [] for transaction, project_id, threshold, metric in transaction_threshold_configs: metric = TRANSACTION_METRICS[metric] if ( project_id in project_thresholds and threshold == project_thresholds[project_id][1] and metric == project_thresholds[project_id][0] ): # small optimization, if the configuration is equal to the project # configs, we can skip it in the final query continue elif ( project_id not in project_thresholds and threshold == constants.DEFAULT_PROJECT_THRESHOLD and metric == constants.DEFAULT_PROJECT_THRESHOLD_METRIC ): # small optimization, if the configuration is equal to the default # and no project configs were set, we can skip it in the final query continue transaction_id = self.resolve_tag_value(transaction) # Don't add to the config if we can't resolve it if transaction_id is None: continue project_threshold_override_config_keys.append( (Function("toUInt64", [project_id]), (Function("toUInt64", [transaction_id]))) ) project_threshold_override_config_values.append((metric, threshold)) project_threshold_config_index: SelectType = Function( "indexOf", [ project_threshold_config_keys, self.builder.column("project_id"), ], constants.PROJECT_THRESHOLD_CONFIG_INDEX_ALIAS, ) project_threshold_override_config_index: SelectType = Function( "indexOf", [ project_threshold_override_config_keys, (self.builder.column("project_id"), self.builder.column("transaction")), ], constants.PROJECT_THRESHOLD_OVERRIDE_CONFIG_INDEX_ALIAS, )
e1b25d625b185588fc7c2834dff5ea5bb3a98ce0
14
metrics.py
741
fix(mep): Use project thresholds for apdex calculation (#37256) - Currently apdex is always based on the satisfaction tags in the transaction.duration metrics. This updates the apdex function so we read the threshold config, and use that to determine which metric we should read the satisfaction tags from instead
19,036
0
1,399
513
165
93,967
318
sentry
48
src/sentry/search/events/datasets/metrics.py
Python
117
{ "docstring": "This is mostly duplicated code from the discover dataset version\n TODO: try to make this more DRY with the discover version\n ", "language": "en", "n_whitespaces": 35, "n_words": 21, "vocab_size": 18 }
https://github.com/getsentry/sentry.git
1
test_validate_invalid_subscription_and_query
def test_validate_invalid_subscription_and_query(): result = validate_subscription_query(TEST_INVALID_MULTIPLE_SUBSCRIPTION_AND_QUERY) assert result is False TEST_INVALID_MULTIPLE_SUBSCRIPTION =
aca6418d6c36956bc1ab530e6ef7e146ec9df90c
8
test_create_deliveries_for_subscription.py
31
Add Webhook payload via graphql subscriptions (#9394) * Add PoC of webhook subscriptions * add async webhooks subscription payloads feature * remove unneeded file * add translations subscription handling, fixes after review * remove todo * add descriptions * add descriptions, move subsrciption_payloads.py * refactor * fix imports, add changelog * check_document_is_single_subscription refactor Co-authored-by: Maciej Korycinski <maciej@mirumee.com> Co-authored-by: Marcin Gębala <5421321+maarcingebala@users.noreply.github.com>
5,024
0
16
14
9
26,497
11
saleor
5
saleor/plugins/webhook/tests/subscription_webhooks/test_create_deliveries_for_subscription.py
Python
3
{ "docstring": "\nsubscription{\n event{\n ...on ProductUpdated{\n product{\n id\n }\n }\n }\n}\nsubscription{\n event{\n ...on ProductCreated{\n product{\n id\n }\n }\n }\n}\n", "language": "en", "n_whitespaces": 66, "n_words": 20, "vocab_size": 8 }
https://github.com/saleor/saleor.git
3
_get_semantics_within_frame
def _get_semantics_within_frame(self, vnframe): semantics_within_single_frame = [] for pred in vnframe.findall("SEMANTICS/PRED"): arguments = [ {"type": arg.get("type"), "value": arg.get("value")} for arg in pred.findall("ARGS/ARG") ] semantics_within_single_frame.append( { "predicate_value": pred.get("value"), "arguments": arguments, "negated": pred.get("bool") == "!", } ) return semantics_within_single_frame
8b43b49b0cd8c12cae1d48df27edfdd98cf859fd
15
verbnet.py
158
Read 'bool' field from VerbNet
7,591
0
225
87
32
42,524
36
nltk
10
nltk/corpus/reader/verbnet.py
Python
15
{ "docstring": "Returns semantics within a single frame\n\n A utility function to retrieve semantics within a frame in VerbNet\n Members of the semantics dictionary:\n 1) Predicate value\n 2) Arguments\n\n :param vnframe: An ElementTree containing the xml contents of\n a VerbNet frame.\n :return: semantics: semantics dictionary\n ", "language": "en", "n_whitespaces": 103, "n_words": 43, "vocab_size": 33 }
https://github.com/nltk/nltk.git
3
doc_resample_reduce
def doc_resample_reduce(result, refer_to, params=None, compatibility_params=True): action = f"compute {result} for each group" params_substitution = ( ( ) if compatibility_params else "" ) if params: params_substitution = format_string( "{params}\n{params_substitution}", params=params, params_substitution=params_substitution, ) build_rules = f return doc_resample( action=action, extra_params=params_substitution, build_rules=build_rules, refer_to=refer_to, )
58bbcc37477866d19c8b092a0e1974a4f0baa586
11
doc_utils.py
123
REFACTOR-#2656: Update modin to fit algebra (code only) (#3717) Co-authored-by: Yaroslav Igoshev <Poolliver868@mail.ru> Co-authored-by: Vasily Litvinov <vasilij.n.litvinov@intel.com> Co-authored-by: Alexey Prutskov <alexey.prutskov@intel.com> Co-authored-by: Devin Petersohn <devin-petersohn@users.noreply.github.com> Signed-off-by: Rehan Durrani <rehan@ponder.io>
35,243
0
180
73
32
153,059
41
modin
11
modin/core/storage_formats/base/doc_utils.py
Python
31
{ "docstring": "\n Build decorator which adds docstring for the resample reduce method.\n\n Parameters\n ----------\n result : str\n The result of the method.\n refer_to : str\n Method name in ``modin.pandas.base.Resampler`` module to refer to for\n more information about parameters and output format.\n params : str, optional\n Method parameters in the NumPy docstyle format to substitute\n to the docstring template.\n compatibility_params : bool, default: True\n Whether method takes `*args` and `**kwargs` that do not affect\n the result.\n\n Returns\n -------\n callable\n \n *args : iterable\n Serves the compatibility purpose. Does not affect the result.\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n \n - Labels on the specified axis are the group names (time-stamps)\n - Labels on the opposit of specified axis are preserved.\n - Each element of QueryCompiler is the {result} for the\n corresponding group and column/row.", "language": "en", "n_whitespaces": 308, "n_words": 135, "vocab_size": 83 }
https://github.com/modin-project/modin.git
7
quote_name_unless_alias
def quote_name_unless_alias(self, name): if name in self.quote_cache: return self.quote_cache[name] if ( (name in self.query.alias_map and name not in self.query.table_map) or name in self.query.extra_select or ( self.query.external_aliases.get(name) and name not in self.query.table_map ) ): self.quote_cache[name] = name return name r = self.connection.ops.quote_name(name) self.quote_cache[name] = r return r
9c19aff7c7561e3a82978a272ecdaad40dda5c00
13
compiler.py
163
Refs #33476 -- Reformatted code with Black.
51,230
0
202
106
24
205,831
46
django
14
django/db/models/sql/compiler.py
Python
16
{ "docstring": "\n A wrapper around connection.ops.quote_name that doesn't quote aliases\n for table names. This avoids problems with some SQL dialects that treat\n quoted strings specially (e.g. PostgreSQL).\n ", "language": "en", "n_whitespaces": 54, "n_words": 25, "vocab_size": 24 }
https://github.com/django/django.git
11
formfield_for_manytomany
def formfield_for_manytomany(self, db_field, request, **kwargs): # If it uses an intermediary model that isn't auto created, don't show # a field in admin. if not db_field.remote_field.through._meta.auto_created: return None db = kwargs.get("using") if "widget" not in kwargs: autocomplete_fields = self.get_autocomplete_fields(request) if db_field.name in autocomplete_fields: kwargs["widget"] = AutocompleteSelectMultiple( db_field, self.admin_site, using=db, ) elif db_field.name in self.raw_id_fields: kwargs["widget"] = widgets.ManyToManyRawIdWidget( db_field.remote_field, self.admin_site, using=db, ) elif db_field.name in [*self.filter_vertical, *self.filter_horizontal]: kwargs["widget"] = widgets.FilteredSelectMultiple( db_field.verbose_name, db_field.name in self.filter_vertical ) if "queryset" not in kwargs: queryset = self.get_field_queryset(db, db_field, request) if queryset is not None: kwargs["queryset"] = queryset form_field = db_field.formfield(**kwargs) if isinstance(form_field.widget, SelectMultiple) and not isinstance( form_field.widget, (CheckboxSelectMultiple, AutocompleteSelectMultiple) ): msg = _( "Hold down “Control”, or “Command” on a Mac, to select more than one." ) help_text = form_field.help_text form_field.help_text = ( format_lazy("{} {}", help_text, msg) if help_text else msg ) return form_field
9c19aff7c7561e3a82978a272ecdaad40dda5c00
15
options.py
376
Refs #33476 -- Reformatted code with Black.
50,383
0
627
237
89
203,453
139
django
36
django/contrib/admin/options.py
Python
38
{ "docstring": "\n Get a form Field for a ManyToManyField.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 6 }
https://github.com/django/django.git
1
serving
def serving(self, inputs): output = self.call(inputs) return self.serving_output(output) SWIN_START_DOCSTRING = r SWIN_INPUTS_DOCSTRING = r
8e8384663d716d4b5a4f510070ff954fc0ba4a52
8
modeling_tf_swin.py
49
Update serving code to enable `saved_model=True` (#18153) * Add serving_output and serving methods to some vision models * Add serving outputs for DeiT * Don't convert hidden states - differing shapes * Make saveable * Fix up * Make swin saveable * Add in tests * Fix funnel tests (can't convert to tensor) * Fix numpy call * Tidy up a bit * Add in hidden states - resnet * Remove numpy * Fix failing tests - tensor shape and skipping tests * Remove duplicated function * PR comments - formatting and var names * PR comments Add suggestions made by Joao Gante: * Use tf.shape instead of shape_list * Use @tooslow decorator on tests * Simplify some of the logic * PR comments Address Yih-Dar Sheih comments - making tensor names consistent and make types float * Types consistent with docs; disable test on swin (slow) * CI trigger * Change input_features to float32 * Add serving_output for segformer * Fixup Co-authored-by: Amy Roberts <amyeroberts@users.noreply.github.com>
5,928
0
25
22
11
32,431
14
transformers
8
src/transformers/models/swin/modeling_tf_swin.py
Python
3
{ "docstring": "\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`SwinConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoFeatureExtractor`]. See\n [`AutoFeatureExtractor.__call__`] for details.\n head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n", "language": "en", "n_whitespaces": 382, "n_words": 190, "vocab_size": 113 }
https://github.com/huggingface/transformers.git
2
task_group
def task_group(python_callable=None, **tg_kwargs): if callable(python_callable): return TaskGroupDecorator(function=python_callable, kwargs=tg_kwargs) return cast(Callable[[F], F], functools.partial(TaskGroupDecorator, kwargs=tg_kwargs))
8fe9783fcd813dced8de849c8130d0eb7f90bac3
10
task_group.py
78
Typing support for operator mapping functions (#21415)
8,287
0
29
51
12
44,543
13
airflow
12
airflow/decorators/task_group.py
Python
4
{ "docstring": "\n Python TaskGroup decorator.\n\n This wraps a function into an Airflow TaskGroup. When used as the\n ``@task_group()`` form, all arguments are forwarded to the underlying\n TaskGroup class. Can be used to parametrize TaskGroup.\n\n :param python_callable: Function to decorate.\n :param tg_kwargs: Keyword arguments for the TaskGroup object.\n ", "language": "en", "n_whitespaces": 67, "n_words": 45, "vocab_size": 35 }
https://github.com/apache/airflow.git
3
get_tail
def get_tail(self, n=10, raw=True, output=False, include_latest=False): self.writeout_cache() if not include_latest: n += 1 cur = self._run_sql("ORDER BY session DESC, line DESC LIMIT ?", (n,), raw=raw, output=output) if not include_latest: return reversed(list(cur)[1:]) return reversed(list(cur))
ebea766ceb57ec0080300b246be0699f41957e26
12
history.py
124
Fix HistoryAccessor.get_tail bug (#13666) Current implementation of get_tail in HistoryAccessor assumes context present only in subclass , so it's moved there and the old implementation is restored.
52,500
0
128
79
29
208,746
33
ipython
11
IPython/core/history.py
Python
9
{ "docstring": "Get the last n lines from the history database.\n\n Parameters\n ----------\n n : int\n The number of lines to get\n raw, output : bool\n See :meth:`get_range`\n include_latest : bool\n If False (default), n+1 lines are fetched, and the latest one\n is discarded. This is intended to be used where the function\n is called by a user command, which it should not return.\n\n Returns\n -------\n Tuples as :meth:`get_range`\n ", "language": "en", "n_whitespaces": 185, "n_words": 67, "vocab_size": 54 }
https://github.com/ipython/ipython.git
2
get_available_prefixes
def get_available_prefixes(self): params = { 'prefix__net_contained': str(self.prefix) } if hasattr(self, 'vrf'): params['vrf'] = self.vrf child_prefixes = Prefix.objects.filter(**params).values_list('prefix', flat=True) return netaddr.IPSet(self.prefix) - netaddr.IPSet(child_prefixes)
7ba0b420f181cffac86a9de384a1ec9d8a9a07dd
11
ip.py
120
Fixes #10109: Fix available prefixes calculation for container prefixes in the global table
78,137
0
86
70
20
265,551
22
netbox
15
netbox/ipam/models/ip.py
Python
8
{ "docstring": "\n Return all available prefixes within this Aggregate or Prefix as an IPSet.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 12 }
https://github.com/netbox-community/netbox.git
1
to_sql
def to_sql(cls, qc, **kwargs): # we first insert an empty DF in order to create the full table in the database # This also helps to validate the input against pandas # we would like to_sql() to complete only when all rows have been inserted into the database # since the mapping operation is non-blocking, each partition will return an empty DF # so at the end, the blocking operation will be this empty DF to_pandas empty_df = qc.getitem_row_array([0]).to_pandas().head(0) empty_df.to_sql(**kwargs) # so each partition will append its respective DF kwargs["if_exists"] = "append" columns = qc.columns
0faf4675140415e17d4112f9d0d37cfe87770b9e
13
io.py
89
REFACTOR-#3871: move related to pandas functionality into 'PandasOnRayIO' class (#3872) Signed-off-by: Anatoly Myachev <anatoly.myachev@intel.com>
35,219
0
172
77
65
152,977
95
modin
9
modin/core/execution/ray/implementations/pandas_on_ray/io/io.py
Python
8
{ "docstring": "\n Write records stored in the `qc` to a SQL database.\n\n Parameters\n ----------\n qc : BaseQueryCompiler\n The query compiler of the Modin dataframe that we want to run ``to_sql`` on.\n **kwargs : dict\n Parameters for ``pandas.to_sql(**kwargs)``.\n ", "language": "en", "n_whitespaces": 100, "n_words": 35, "vocab_size": 31 }
https://github.com/modin-project/modin.git
4
run_py
def run_py(executable, *code): if os.name == 'nt' and len(code) > 1: # Windows can't do newlines in arguments... oshandle, filename = tempfile.mkstemp() with os.fdopen(oshandle, 'w') as f: f.write('\n'.join(code)) cmd = [executable, filename] try: ret = subprocess.run(cmd, text=True, check=True, stdout=subprocess.PIPE).stdout finally: os.remove(filename) else: cmd = [executable, '-c', '\n'.join(code)] ret = subprocess.run(cmd, text=True, check=True, stdout=subprocess.PIPE).stdout return ret.rstrip()
ab7a2ee55811c1d25dc482f4d5126eb4d7bbe714
15
link_pyqt.py
236
Switch to Python 3.7 subprocess API Follow-up for #6905
117,293
0
216
142
44
320,701
55
qutebrowser
24
scripts/link_pyqt.py
Python
16
{ "docstring": "Run the given python code with the given executable.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 7 }
https://github.com/qutebrowser/qutebrowser.git
2
fingerprint
def fingerprint(self) -> Text: if self.is_dense(): f_as_text = self.features.tobytes() else: f_as_text = rasa.shared.nlu.training_data.util.sparse_matrix_to_string( self.features ) return rasa.shared.utils.io.deep_container_fingerprint( [self.type, self.origin, self.attribute, f_as_text] )
4cdceaab5271a5b51463ec562c8eb55f96b771c5
15
features.py
113
Bump numpy from 1.19.5 to 1.21.6 (#11078) * Bump numpy from 1.19.5 to 1.21.6 Bumps [numpy](https://github.com/numpy/numpy) from 1.19.5 to 1.21.6. - [Release notes](https://github.com/numpy/numpy/releases) - [Changelog](https://github.com/numpy/numpy/blob/main/doc/HOWTO_RELEASE.rst.txt) - [Commits](https://github.com/numpy/numpy/compare/v1.19.5...v1.21.6) --- updated-dependencies: - dependency-name: numpy dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com> * fixed mypy errors for numpy 1.21.6 upgrade * removed duplicate np.array call Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Thomas Werkmeister <thomas@werkmeister.me> Co-authored-by: melindaloubser1 <melinda.loubser@gmail.com>
38,391
0
116
71
19
159,675
22
rasa
19
rasa/shared/nlu/training_data/features.py
Python
11
{ "docstring": "Calculate a stable string fingerprint for the features.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/RasaHQ/rasa.git
1
test_stop_by_max_time_mins
def test_stop_by_max_time_mins(): tpot_obj = TPOTClassifier(config_dict='TPOT light') tpot_obj._start_datetime = datetime.now() sleep(0.11) tpot_obj.max_time_mins = 0.1/60. assert_raises(KeyboardInterrupt, tpot_obj._stop_by_max_time_mins)
388616b6247ca4ea8de4e2f340d6206aee523541
10
tpot_tests.py
72
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
43,511
0
33
46
13
181,724
15
tpot
12
tests/tpot_tests.py
Python
6
{ "docstring": "Assert that _stop_by_max_time_mins raises KeyboardInterrupt when maximum minutes have elapsed.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/EpistasisLab/tpot.git
2
get_requires_python
def get_requires_python(dist): # type: (pkg_resources.Distribution) -> Optional[str] pkg_info_dict = get_metadata(dist) requires_python = pkg_info_dict.get("Requires-Python") if requires_python is not None: # Convert to a str to satisfy the type checker, since requires_python # can be a Header object. requires_python = str(requires_python) return requires_python
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
10
packaging.py
63
upd; format
12,487
0
80
34
31
61,284
41
transferlearning
7
.venv/lib/python3.8/site-packages/pip/_internal/utils/packaging.py
Python
6
{ "docstring": "\n Return the \"Requires-Python\" metadata for a distribution, or None\n if not present.\n ", "language": "en", "n_whitespaces": 22, "n_words": 12, "vocab_size": 12 }
https://github.com/jindongwang/transferlearning.git
1
test_remove_failure
def test_remove_failure(): fileset_pkg_name = "info_fake" fileset_lslpp_error = lslpp_call = MagicMock(return_value=fileset_lslpp_error) list_pkgs_mock = MagicMock( side_effect=[ {"bos.net.tcp.tcpdump": "7.2.4.1"}, {"bos.net.tcp.tcpdump": "7.2.4.1"}, ] ) with patch.dict( aixpkg.__salt__, { "cmd.run_all": lslpp_call, "config.get": MagicMock(return_value=False), }, ), patch.object(aixpkg, "list_pkgs", list_pkgs_mock): expected = { "changes": {}, "errors": [f"Fileset {fileset_pkg_name} not installed."], } with pytest.raises(CommandExecutionError) as exc_info: result = aixpkg.remove(fileset_pkg_name) assert exc_info.value.info == expected, exc_info.value.info assert lslpp_call.call_count == 1 lslpp_call.assert_any_call( ["/usr/bin/lslpp", "-Lc", f"{fileset_pkg_name}"], python_shell=False, )
b2f8271fed3f05160431c55ad7c4e8f3e3e95c3e
14
test_aixpkg.py
266
Complete intial tests for AIX yum and dnf support
53,822
0
274
150
53
215,105
66
salt
25
tests/pytests/unit/modules/test_aixpkg.py
Python
31
{ "docstring": "\n Test remove package / fileset and experience failure\n #Package Name:Fileset:Level:State:PTF Id:Fix State:Type:Description:Destination Dir.:Uninstaller:Message Catalog:Message Set:Message Number:Parent:Automatic:EFIX Locked:Install Path:Build Date\nlslpp: Fileset info_fake not installed.\n", "language": "en", "n_whitespaces": 29, "n_words": 24, "vocab_size": 24 }
https://github.com/saltstack/salt.git
3
test_evaluated_individuals_
def test_evaluated_individuals_(): tpot_obj = TPOTClassifier( random_state=42, population_size=2, offspring_size=4, generations=1, verbosity=0, config_dict='TPOT light' ) tpot_obj.fit(training_features, training_target) assert isinstance(tpot_obj.evaluated_individuals_, dict) for pipeline_string in sorted(tpot_obj.evaluated_individuals_.keys()): deap_pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset) sklearn_pipeline = tpot_obj._toolbox.compile(expr=deap_pipeline) operator_count = tpot_obj._operator_count(deap_pipeline) try: cv_scores = model_selection.cross_val_score(sklearn_pipeline, training_features, training_target, cv=5, scoring='accuracy', verbose=0) mean_cv_scores = np.mean(cv_scores) except Exception: mean_cv_scores = -float('inf') assert np.allclose(tpot_obj.evaluated_individuals_[pipeline_string]['internal_cv_score'], mean_cv_scores) assert np.allclose(tpot_obj.evaluated_individuals_[pipeline_string]['operator_count'], operator_count)
388616b6247ca4ea8de4e2f340d6206aee523541
15
tpot_tests.py
274
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
43,557
0
197
176
46
181,771
55
tpot
41
tests/tpot_tests.py
Python
22
{ "docstring": "Assert that evaluated_individuals_ stores current pipelines and their CV scores.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/EpistasisLab/tpot.git
1
user_can_delete_obj
def user_can_delete_obj(self, user, obj): perm_codename = self.get_perm_codename("delete") return self.user_has_specific_permission(user, perm_codename)
d10f15e55806c6944827d801cd9c2d53f5da4186
9
permission.py
45
Reformat with black
15,953
0
31
27
10
73,139
10
wagtail
7
wagtail/contrib/modeladmin/helpers/permission.py
Python
3
{ "docstring": "\n Return a boolean to indicate whether `user` is permitted to 'delete'\n a specific `self.model` instance.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 13 }
https://github.com/wagtail/wagtail.git
4
test_draw_edges_toggling_with_arrows_kwarg
def test_draw_edges_toggling_with_arrows_kwarg(): import matplotlib.collections import matplotlib.patches UG = nx.path_graph(3) DG = nx.path_graph(3, create_using=nx.DiGraph) pos = {n: (n, n) for n in UG} # Use FancyArrowPatches when arrows=True, regardless of graph type for G in (UG, DG): edges = nx.draw_networkx_edges(G, pos, arrows=True) assert len(edges) == len(G.edges) assert isinstance(edges[0], mpl.patches.FancyArrowPatch) # Use LineCollection when arrows=False, regardless of graph type for G in (UG, DG): edges = nx.draw_networkx_edges(G, pos, arrows=False) assert isinstance(edges, mpl.collections.LineCollection) # Default behavior when arrows=None: FAPs for directed, LC's for undirected edges = nx.draw_networkx_edges(UG, pos) assert isinstance(edges, mpl.collections.LineCollection) edges = nx.draw_networkx_edges(DG, pos) assert len(edges) == len(G.edges) assert isinstance(edges[0], mpl.patches.FancyArrowPatch) @pytest.mark.parametrize("drawing_func", (nx.draw, nx.draw_networkx))
5c0b11afb4c0882a070d522ef3fa41482ba935d3
@pytest.mark.parametrize("drawing_func", (nx.draw, nx.draw_networkx))
11
test_pylab.py
316
Use isort with pre-commit to enforce import guidelines (#5659) * Add isort to pre-commit * Run isort on all python files (except __init__.py ones)
42,097
1
184
190
59
176,781
102
networkx
26
networkx/drawing/tests/test_pylab.py
Python
18
{ "docstring": "\n The `arrows` keyword argument is used as a 3-way switch to select which\n type of object to use for drawing edges:\n - ``arrows=None`` -> default (FancyArrowPatches for directed, else LineCollection)\n - ``arrows=True`` -> FancyArrowPatches\n - ``arrows=False`` -> LineCollection\n ", "language": "en", "n_whitespaces": 63, "n_words": 38, "vocab_size": 32 }
https://github.com/networkx/networkx.git
1
test_unknown_filter
def test_unknown_filter(self): # Insert session metrics: self.store_session(self.build_session(project_id=self.project.id)) response = self.get_response( self.organization.slug, field="sum(sentry.sessions.session)", statsPeriod="1h", interval="1h", datasource="snuba", query="foo:123", # Unknown tag key ) assert response.status_code == 400 response = self.get_success_response( self.organization.slug, field="sum(sentry.sessions.session)", statsPeriod="1h", interval="1h", datasource="snuba", query="release:123", # Unknown tag value is fine. ) groups = response.data["groups"] assert len(groups) == 0
8f22ac2a9290cb173f1dcdcf7b680c7992c6d4ad
12
test_organization_metrics.py
187
fix: remove print statement (#31046)
19,204
0
252
116
33
95,400
48
sentry
21
tests/sentry/api/endpoints/test_organization_metrics.py
Python
22
{ "docstring": "Use a tag key/value in filter that does not exist in the indexer", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 12 }
https://github.com/getsentry/sentry.git
14
get_user_input
def get_user_input(): model_types = list(auto_module.configuration_auto.MODEL_NAMES_MAPPING.keys()) # Get old model type valid_model_type = False while not valid_model_type: old_model_type = input( "What is the model you would like to duplicate? Please provide the lowercase `model_type` (e.g. roberta): " ) if old_model_type in model_types: valid_model_type = True else: print(f"{old_model_type} is not a valid model type.") near_choices = difflib.get_close_matches(old_model_type, model_types) if len(near_choices) >= 1: if len(near_choices) > 1: near_choices = " or ".join(near_choices) print(f"Did you mean {near_choices}?") old_model_info = retrieve_info_for_model(old_model_type) old_tokenizer_class = old_model_info["model_patterns"].tokenizer_class old_feature_extractor_class = old_model_info["model_patterns"].feature_extractor_class old_processor_class = old_model_info["model_patterns"].processor_class old_frameworks = old_model_info["frameworks"] old_checkpoint = None if len(old_model_info["model_patterns"].checkpoint) == 0: old_checkpoint = get_user_field( "We couldn't find the name of the base checkpoint for that model, please enter it here." ) model_name = get_user_field( "What is the name (with no special casing) for your new model in the paper (e.g. RoBERTa)? " ) default_patterns = ModelPatterns(model_name, model_name) model_type = get_user_field( "What identifier would you like to use for the `model_type` of this model? ", default_value=default_patterns.model_type, ) model_lower_cased = get_user_field( "What lowercase name would you like to use for the module (folder) of this model? ", default_value=default_patterns.model_lower_cased, ) model_camel_cased = get_user_field( "What prefix (camel-cased) would you like to use for the model classes of this model (e.g. Roberta)? ", default_value=default_patterns.model_camel_cased, ) model_upper_cased = get_user_field( "What prefix (upper-cased) would you like to use for the constants relative to this model? ", default_value=default_patterns.model_upper_cased, ) config_class = get_user_field( "What will be the name of the config class for this model? ", default_value=f"{model_camel_cased}Config" ) checkpoint = get_user_field( "Please give a checkpoint identifier (on the model Hub) for this new model (e.g. facebook/roberta-base): " ) old_processing_classes = [ c for c in [old_feature_extractor_class, old_tokenizer_class, old_processor_class] if c is not None ] old_processing_classes = ", ".join(old_processing_classes) keep_processing = get_user_field( f"Will your new model use the same processing class as {old_model_type} ({old_processing_classes}) (yes/no)? ", convert_to=convert_to_bool, fallback_message="Please answer yes/no, y/n, true/false or 1/0. ", ) if keep_processing: feature_extractor_class = old_feature_extractor_class processor_class = old_processor_class tokenizer_class = old_tokenizer_class else: if old_tokenizer_class is not None: tokenizer_class = get_user_field( "What will be the name of the tokenizer class for this model? ", default_value=f"{model_camel_cased}Tokenizer", ) else: tokenizer_class = None if old_feature_extractor_class is not None: feature_extractor_class = get_user_field( "What will be the name of the feature extractor class for this model? ", default_value=f"{model_camel_cased}FeatureExtractor", ) else: feature_extractor_class = None if old_processor_class is not None: processor_class = get_user_field( "What will be the name of the processor class for this model? ", default_value=f"{model_camel_cased}Processor", ) else: processor_class = None model_patterns = ModelPatterns( model_name, checkpoint, model_type=model_type, model_lower_cased=model_lower_cased, model_camel_cased=model_camel_cased, model_upper_cased=model_upper_cased, config_class=config_class, tokenizer_class=tokenizer_class, feature_extractor_class=feature_extractor_class, processor_class=processor_class, ) add_copied_from = get_user_field( "Should we add # Copied from statements when creating the new modeling file (yes/no)? ", convert_to=convert_to_bool, default_value="yes", fallback_message="Please answer yes/no, y/n, true/false or 1/0.", ) all_frameworks = get_user_field( "Should we add a version of your new model in all the frameworks implemented by" f" {old_model_type} ({old_frameworks}) (yes/no)? ", convert_to=convert_to_bool, default_value="yes", fallback_message="Please answer yes/no, y/n, true/false or 1/0.", ) if all_frameworks: frameworks = None else: frameworks = get_user_field( "Please enter the list of framworks you want (pt, tf, flax) separated by spaces", is_valid_answer=lambda x: all(p in ["pt", "tf", "flax"] for p in x.split(" ")), ) frameworks = list(set(frameworks.split(" "))) return (old_model_type, model_patterns, add_copied_from, frameworks, old_checkpoint)
fc21c9be62483d06adae6239ebe6ca77c2cb6269
19
add_new_model_like.py
870
[CookieCutter] Clarify questions (#18959) * Clarify cookiecutter questions * Update first question Co-authored-by: Niels Rogge <nielsrogge@Nielss-MacBook-Pro.local>
6,143
0
1,351
503
224
33,727
525
transformers
53
src/transformers/commands/add_new_model_like.py
Python
121
{ "docstring": "\n Ask the user for the necessary inputs to add the new model.\n ", "language": "en", "n_whitespaces": 19, "n_words": 12, "vocab_size": 10 }
https://github.com/huggingface/transformers.git
2
__enter__
def __enter__(self): return_value = super().__enter__() try: os.makedirs(self.settings.value_of(PREFECT_HOME), exist_ok=True) except OSError: warnings.warn( "Failed to create the Prefect home directory at " f"{self.settings.value_of(PREFECT_HOME)}", stacklevel=2, ) return return_value
4adc737611ffa284d9952779ba2f68174a7e73cc
16
context.py
104
Squash issues with tests
11,265
0
138
52
24
55,188
25
prefect
14
src/prefect/context.py
Python
11
{ "docstring": "\n Upon initialization, we can create the home directory contained in the settings and\n configure logging. These steps are optional. Logging can only be set up once per\n process and later attempts to configure logging will fail.\n ", "language": "en", "n_whitespaces": 65, "n_words": 36, "vocab_size": 32 }
https://github.com/PrefectHQ/prefect.git
7
get_name
def get_name(self): r if self._name: return self._name elif self._parent: par = self._parent()
f3166e673fe8d40277b804d35d77dcdb760fc3b3
11
results.py
47
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
3,468
0
54
103
12
20,651
12
pipenv
5
pipenv/patched/notpip/_vendor/pyparsing/results.py
Python
39
{ "docstring": "\n Returns the results name for this token expression. Useful when several\n different expressions might match at a particular location.\n\n Example::\n\n integer = Word(nums)\n ssn_expr = Regex(r\"\\d\\d\\d-\\d\\d-\\d\\d\\d\\d\")\n house_number_expr = Suppress('#') + Word(nums, alphanums)\n user_data = (Group(house_number_expr)(\"house_number\")\n | Group(ssn_expr)(\"ssn\")\n | Group(integer)(\"age\"))\n user_info = OneOrMore(user_data)\n\n result = user_info.parse_string(\"22 111-22-3333 #221B\")\n for item in result:\n print(item.get_name(), ':', item[0])\n\n prints::\n\n age : 22\n ssn : 111-22-3333\n house_number : 221B\n ", "language": "en", "n_whitespaces": 271, "n_words": 64, "vocab_size": 54 }
https://github.com/pypa/pipenv.git
2
get_party_shipping_address
def get_party_shipping_address(doctype, name): out = frappe.db.sql( "SELECT dl.parent " "from `tabDynamic Link` dl join `tabAddress` ta on dl.parent=ta.name " "where " "dl.link_doctype=%s " "and dl.link_name=%s " 'and dl.parenttype="Address" ' "and ifnull(ta.disabled, 0) = 0 and" '(ta.address_type="Shipping" or ta.is_shipping_address=1) ' "order by ta.is_shipping_address desc, ta.address_type desc limit 1", (doctype, name), ) if out: return out[0][0] else: return ""
494bd9ef78313436f0424b918f200dab8fc7c20b
10
party.py
91
style: format code with black
13,797
0
40
48
49
65,123
57
erpnext
7
erpnext/accounts/party.py
Python
17
{ "docstring": "\n\tReturns an Address name (best guess) for the given doctype and name for which `address_type == 'Shipping'` is true.\n\tand/or `is_shipping_address = 1`.\n\n\tIt returns an empty string if there is no matching record.\n\n\t:param doctype: Party Doctype\n\t:param name: Party name\n\t:return: String\n\t", "language": "en", "n_whitespaces": 38, "n_words": 44, "vocab_size": 37 }
https://github.com/frappe/erpnext.git
5
_trainable_name
def _trainable_name(self, include_trial_id=False): if self.custom_trial_name: return self.custom_trial_name if "env" in self.config: env = self.config["env"] if isinstance(env, type): env = env.__name__ identifier = "{}_{}".format(self.trainable_name, env) else: identifier = self.trainable_name if include_trial_id: identifier += "_" + self.trial_id return identifier.replace("/", "_")
8a2f6bda62378c07a66169ee49504cc3703f7d35
11
trial.py
146
[tune/structure] Introduce experiment package (#26033) Experiment, Trial, and config parsing moves into an `experiment` package. Notably, the new public facing APIs will be ``` from ray.tune.experiment import Experiment from ray.tune.experiment import Trial ```
32,846
0
161
85
28
142,916
38
ray
14
python/ray/tune/experiment/trial.py
Python
13
{ "docstring": "Combines ``env`` with ``trainable_name`` and ``trial_id``.\n\n Can be overridden with a custom string creator.\n ", "language": "en", "n_whitespaces": 28, "n_words": 14, "vocab_size": 13 }
https://github.com/ray-project/ray.git
9
class_distribution
def class_distribution(y, sample_weight=None): classes = [] n_classes = [] class_prior = [] n_samples, n_outputs = y.shape if sample_weight is not None: sample_weight = np.asarray(sample_weight) if issparse(y): y = y.tocsc() y_nnz = np.diff(y.indptr) for k in range(n_outputs): col_nonzero = y.indices[y.indptr[k] : y.indptr[k + 1]] # separate sample weights for zero and non-zero elements if sample_weight is not None: nz_samp_weight = sample_weight[col_nonzero] zeros_samp_weight_sum = np.sum(sample_weight) - np.sum(nz_samp_weight) else: nz_samp_weight = None zeros_samp_weight_sum = y.shape[0] - y_nnz[k] classes_k, y_k = np.unique( y.data[y.indptr[k] : y.indptr[k + 1]], return_inverse=True ) class_prior_k = np.bincount(y_k, weights=nz_samp_weight) # An explicit zero was found, combine its weight with the weight # of the implicit zeros if 0 in classes_k: class_prior_k[classes_k == 0] += zeros_samp_weight_sum # If an there is an implicit zero and it is not in classes and # class_prior, make an entry for it if 0 not in classes_k and y_nnz[k] < y.shape[0]: classes_k = np.insert(classes_k, 0, 0) class_prior_k = np.insert(class_prior_k, 0, zeros_samp_weight_sum) classes.append(classes_k) n_classes.append(classes_k.shape[0]) class_prior.append(class_prior_k / class_prior_k.sum()) else: for k in range(n_outputs): classes_k, y_k = np.unique(y[:, k], return_inverse=True) classes.append(classes_k) n_classes.append(classes_k.shape[0]) class_prior_k = np.bincount(y_k, weights=sample_weight) class_prior.append(class_prior_k / class_prior_k.sum()) return (classes, n_classes, class_prior)
e4a7edc4aec597cba8b2bbce704772c7872e55f8
16
multiclass.py
541
DOC ensures sklearn.utils.multiclass.class_distribution passes numpydoc validation (#24452)
76,590
0
598
348
106
260,959
185
scikit-learn
33
sklearn/utils/multiclass.py
Python
38
{ "docstring": "Compute class priors from multioutput-multiclass target data.\n\n Parameters\n ----------\n y : {array-like, sparse matrix} of size (n_samples, n_outputs)\n The labels for each example.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n classes : list of size n_outputs of ndarray of size (n_classes,)\n List of classes for each column.\n\n n_classes : list of int of size n_outputs\n Number of classes in each column.\n\n class_prior : list of size n_outputs of ndarray of size (n_classes,)\n Class distribution of each column.\n ", "language": "en", "n_whitespaces": 146, "n_words": 81, "vocab_size": 46 }
https://github.com/scikit-learn/scikit-learn.git
1
evaluate
def evaluate(self) -> float | None | Tuple[float, Any] | Tuple[None, Any]: # Note that the first item of the returned value will be used as the default metric used by NNI. raise NotImplementedError
5a3d82e842906dc8f695fafe52434fde781615be
7
evaluator.py
41
[Compression] lightning & legacy evaluator - step 1 (#4950)
24,843
0
55
26
29
113,152
34
nni
6
nni/algorithms/compression/v2/pytorch/utils/evaluator.py
Python
9
{ "docstring": "\n NNI assume the evaluation function user passed in should return a float number or a dict as metric.\n If the evaluation function returned a dict, take the value with dict key ``default`` as the first element of ``evaluate`` returned value,\n and put the dict as the second element of the returned value.\n For any other type of the metric returned by evaluation function, ``evaluate`` will directly returned\n (it should be a float, but NNI does not prevent other types from being returned, this will handle by the object calling ``evaluate``).\n ", "language": "en", "n_whitespaces": 133, "n_words": 90, "vocab_size": 59 }
https://github.com/microsoft/nni.git
1
_get_gradients
def _get_gradients(self, tape, loss, var_list, grad_loss=None): grads = tape.gradient(loss, var_list, grad_loss) return list(zip(grads, var_list))
84afc5193d38057e2e2badf9c889ea87d80d8fbf
9
optimizer_v2.py
56
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,418
0
35
38
13
275,528
14
keras
10
keras/optimizers/optimizer_v2/optimizer_v2.py
Python
3
{ "docstring": "Called in `minimize` to compute gradients from loss.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/keras-team/keras.git
3
p_mean_variance
def p_mean_variance(self, model, x, t, transformer_out, clip_denoised=True, model_kwargs=None): if model_kwargs is None: model_kwargs = {} B, C = x.shape[:2] assert t.shape == (B,) model_output = model(x, t, transformer_out) assert model_output.shape == (B, C * 2, *x.shape[2:]) model_output, model_var_values = torch.split(model_output, C, dim=1) min_log = _extract_into_tensor(self.noise_scheduler.posterior_log_variance_clipped, t, x.shape) max_log = _extract_into_tensor(np.log(self.noise_scheduler.betas), t, x.shape) # The model_var_values is [-1, 1] for [min_var, max_var]. frac = (model_var_values + 1) / 2 model_log_variance = frac * max_log + (1 - frac) * min_log model_variance = torch.exp(model_log_variance) pred_xstart = self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output) if clip_denoised: pred_xstart = pred_xstart.clamp(-1, 1) model_mean, _, _ = self.q_posterior_mean_variance(x_start=pred_xstart, x_t=x, t=t) assert model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape return model_mean, model_variance, model_log_variance, pred_xstart
1e21f061601dda0aa9740e88bfce68bf4aac4acd
12
modeling_glide.py
356
Classifier-free guidance scheduler + GLIDe pipeline
120,634
0
261
243
77
334,468
113
diffusers
37
models/vision/glide/modeling_glide.py
Python
19
{ "docstring": "\n Apply the model to get p(x_{t-1} | x_t), as well as a prediction of\n the initial x, x_0.\n\n :param model: the model, which takes a signal and a batch of timesteps\n as input.\n :param x: the [N x C x ...] tensor at time t.\n :param t: a 1-D Tensor of timesteps.\n :param clip_denoised: if True, clip the denoised signal into [-1, 1].\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :return: a dict with the following keys:\n - 'mean': the model mean output.\n - 'variance': the model variance output.\n - 'log_variance': the log of 'variance'.\n - 'pred_xstart': the prediction for x_0.\n ", "language": "en", "n_whitespaces": 276, "n_words": 116, "vocab_size": 76 }
https://github.com/huggingface/diffusers.git
1
button_platform_only
def button_platform_only(): with patch( "homeassistant.components.zha.PLATFORMS", ( Platform.BINARY_SENSOR, Platform.BUTTON, Platform.DEVICE_TRACKER, Platform.NUMBER, Platform.SELECT, Platform.SENSOR, ), ): yield @pytest.fixture
4bc5d7bfed07c20d6f3438ab91c734a620505a33
@pytest.fixture
11
test_button.py
71
Speed up zha tests (#73627)
112,578
1
118
40
16
313,967
16
core
11
tests/components/zha/test_button.py
Python
13
{ "docstring": "Only setup the button and required base platforms to speed up tests.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
https://github.com/home-assistant/core.git
1
get_blocks_with_metadata
def get_blocks_with_metadata(self) -> List[Tuple[ObjectRef[Block], BlockMetadata]]: self.get_blocks() # Force bulk evaluation in LazyBlockList. return list(self.iter_blocks_with_metadata())
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
9
block_list.py
55
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,329
0
36
33
14
130,603
14
ray
10
python/ray/data/impl/block_list.py
Python
8
{ "docstring": "Bulk version of iter_blocks_with_metadata().\n\n Prefer calling this instead of the iter form for performance if you\n don't need lazy evaluation.\n ", "language": "en", "n_whitespaces": 41, "n_words": 20, "vocab_size": 19 }
https://github.com/ray-project/ray.git
6
get_setters
def get_setters(self): setters = [] for name in dir(self.o): if not name.startswith('set_'): continue func = getattr(self.o, name) if (not callable(func) or self.number_of_parameters(func) < 2 or self.is_alias(func)): continue setters.append(name[4:]) return setters
87197156eb299c1f37ac23c16c83ecb000e9872b
13
artist.py
130
improve matplotlib import time by caching ArtistInspector
23,628
0
170
78
25
109,533
30
matplotlib
13
lib/matplotlib/artist.py
Python
12
{ "docstring": "\n Get the attribute strings with setters for object.\n\n For example, for a line, return ``['markerfacecolor', 'linewidth',\n ....]``.\n ", "language": "en", "n_whitespaces": 46, "n_words": 17, "vocab_size": 16 }
https://github.com/matplotlib/matplotlib.git
1
test_cable_cannot_have_the_same_terminination_on_both_ends
def test_cable_cannot_have_the_same_terminination_on_both_ends(self): cable = Cable(a_terminations=[self.interface1], b_terminations=[self.interface1]) with self.assertRaises(ValidationError): cable.clean()
3a461d02793e6f9d41c2b1a92647e691de1abaac
11
test_models.py
67
Update Cable instantiations to match new signature
77,900
0
41
39
9
264,889
9
netbox
10
netbox/dcim/tests/test_models.py
Python
4
{ "docstring": "\n A cable cannot be made with the same A and B side terminations\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 12 }
https://github.com/netbox-community/netbox.git
3
_parse_img_level_ann
def _parse_img_level_ann(self, image_level_ann_file): item_lists = defaultdict(list) with self.file_client.get_local_path( image_level_ann_file) as local_path: with open(local_path, 'r') as f: reader = csv.reader(f) i = -1 for line in reader: i += 1 if i == 0: continue else: img_id = line[0] label_id = line[1] assert label_id in self.label_id_mapping image_level_label = int( self.label_id_mapping[label_id]) confidence = float(line[2]) item_lists[img_id].append( dict( image_level_label=image_level_label, confidence=confidence)) return item_lists
36c1f477b273cb2fb0dea3c921ec267db877c039
19
openimages.py
201
Refactor OpenImages.
70,677
0
491
122
45
245,152
58
mmdetection
24
mmdet/datasets/openimages.py
Python
23
{ "docstring": "Parse image level annotations from csv style ann_file.\n\n Args:\n image_level_ann_file (str): CSV style image level annotation\n file path.\n\n Returns:\n defaultdict[list[dict]]: Annotations where item of the defaultdict\n indicates an image, each of which has (n) dicts.\n Keys of dicts are:\n\n - `image_level_label` (int): of shape 1.\n - `confidence` (float): of shape 1.\n ", "language": "en", "n_whitespaces": 161, "n_words": 51, "vocab_size": 41 }
https://github.com/open-mmlab/mmdetection.git
3
stride_pool_pos
def stride_pool_pos(self, pos_id, block_index): if self.separate_cls: # Under separate <cls>, we treat the <cls> as the first token in # the previous block of the 1st real block. Since the 1st real # block always has position 1, the position of the previous block # will be at `1 - 2 ** block_index`. cls_pos = tf.constant([-(2**block_index) + 1], dtype=pos_id.dtype) pooled_pos_id = pos_id[1:-1] if self.truncate_seq else pos_id[1:] return tf.concat([cls_pos, pooled_pos_id[::2]], 0) else: return pos_id[::2]
7732d0fe7a759c9844215920e9f1c5540eafb1a6
15
modeling_tf_funnel.py
131
Upgrade black to version ~=22.0 (#15565) * Upgrade black to version ~=22.0 * Check copies * Fix code
6,387
0
182
82
54
35,045
73
transformers
12
src/transformers/models/funnel/modeling_tf_funnel.py
Python
7
{ "docstring": "\n Pool `pos_id` while keeping the cls token separate (if `self.separate_cls=True`).\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
https://github.com/huggingface/transformers.git
3
_mark_func_graph_as_unsaveable
def _mark_func_graph_as_unsaveable(graph, learning_phase): if graph.building_function and is_placeholder(learning_phase): graph.mark_as_unsaveable( "The keras learning phase placeholder was used inside a function. " "Exporting placeholders is not supported when saving out a SavedModel. " "Please call `tf.keras.backend.set_learning_phase(0)` in the function " "to set the learning phase to a constant value." )
84afc5193d38057e2e2badf9c889ea87d80d8fbf
11
backend.py
53
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,234
0
111
27
40
269,615
47
keras
6
keras/backend.py
Python
8
{ "docstring": "Mark func graph as unsaveable due to use of symbolic keras learning phase.\n\n Functions that capture the symbolic learning phase cannot be exported to\n SavedModel. Mark the funcgraph as unsaveable, so that an error will be raised\n if it is exported.\n\n Args:\n graph: Graph or FuncGraph object.\n learning_phase: Learning phase placeholder or int defined in the graph.\n ", "language": "en", "n_whitespaces": 82, "n_words": 57, "vocab_size": 46 }
https://github.com/keras-team/keras.git
5
maybe_fsdp_wrap
def maybe_fsdp_wrap(opt): if not should_use_fsdp(opt): # make a no-op yield return # zero3 not supported at this time. Throw an exception if opt['ddp_backend'] == 'zero3': raise NotImplementedError( '--ddp-backend zero3 is not supported at this time. For details, see ' 'https://github.com/facebookresearch/ParlAI/issues/3753.' ) reshard_after_forward = opt['ddp_backend'] == 'zero3' compute_dtype = torch.float16 if opt['fp16'] else torch.float32 mixed_precision = opt['fp16'] and opt['fp16_impl'] == 'safe' fsdp_args = dict( reshard_after_forward=reshard_after_forward, mixed_precision=mixed_precision, compute_dtype=compute_dtype, state_dict_device=torch.device('cpu'), flatten_parameters=False, process_group=get_dist_group(), ) with fairscale_enable_wrap(wrapper_cls=FSDP, **fsdp_args): yield
5322cd4f5821e339bf1edab98d93b5a008b97a2b
12
fsdp.py
200
[circle] Fixing broken unit tests (#4343)
47,093
0
210
112
55
194,805
74
ParlAI
20
parlai/utils/fsdp.py
Python
22
{ "docstring": "\n Context manager for enabling wrapping in FullyShardedDataParallel.\n ", "language": "en", "n_whitespaces": 14, "n_words": 7, "vocab_size": 7 }
https://github.com/facebookresearch/ParlAI.git
2
apply
def apply(self, X): X = self._validate_X_predict(X) results = Parallel( n_jobs=self.n_jobs, verbose=self.verbose, prefer="threads", )(delayed(tree.apply)(X, check_input=False) for tree in self.estimators_) return np.array(results).T
5f75acdd12d77b973471961ad716367c6199d01c
12
_forest.py
105
MNT Bump joblib version dependency to 1.0.0 (#22365)
75,393
0
88
67
19
258,733
20
scikit-learn
16
sklearn/ensemble/_forest.py
Python
8
{ "docstring": "\n Apply trees in the forest to X, return leaf indices.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, its dtype will be converted to\n ``dtype=np.float32``. If a sparse matrix is provided, it will be\n converted into a sparse ``csr_matrix``.\n\n Returns\n -------\n X_leaves : ndarray of shape (n_samples, n_estimators)\n For each datapoint x in X and for each tree in the forest,\n return the index of the leaf x ends up in.\n ", "language": "en", "n_whitespaces": 190, "n_words": 78, "vocab_size": 56 }
https://github.com/scikit-learn/scikit-learn.git
5
_get_all_ret_events_after_time
def _get_all_ret_events_after_time(masters, minions, event_listener, start_time): minion_pattern = "salt/job/*/ret/{}" events = [] for minion in minions: tag = minion_pattern.format(minion.id) matchers = [(master.id, tag) for master in masters] ret_events = event_listener.get_events(matchers, after_time=start_time) events.append([event for event in ret_events if event.data["fun"] == "test.echo"]) return tuple(events)
fbecbe82483ffed562c0d2673d5a5e792553aec2
14
test_multimaster.py
138
fix multimaster tests failing
54,075
0
84
87
32
215,653
41
salt
20
tests/pytests/scenarios/multimaster/test_multimaster.py
Python
9
{ "docstring": "\n Get all the ret events that happened after `start_time`\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 9 }
https://github.com/saltstack/salt.git
3
test_not_recorded_if_not_used
def test_not_recorded_if_not_used(self, dag_maker, xcom_value): with dag_maker(dag_id="test_not_recorded_for_unused") as dag:
197cff3194e855b9207c3c0da8ae093a0d5dda55
12
test_taskinstance.py
38
Ensure TaskMap only checks "relevant" dependencies (#23053) When looking for "mapped dependants" of a task, we only want a task if it not only is a direct downstream of the task, but also it actually "uses" the task's pushed XCom for task mapping. So we need to peek into the mapped downstream task's expansion kwargs, and only count it as a mapped dependant if the upstream is referenced there.
9,249
0
22
86
8
47,760
8
airflow
6
tests/models/test_taskinstance.py
Python
10
{ "docstring": "Return value should not be recorded if no downstreams are mapped.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/apache/airflow.git
44
get_active_users
def get_active_users(browser, username, posts, boundary, logger): user_link = "https://www.instagram.com/{}/".format(username) # check URL of the webpage, if it already is user's profile page, # then do not navigate to it again web_address_navigator(browser, user_link) try: total_posts = browser.execute_script( "return window._sharedData.entry_data." "ProfilePage[0].graphql.user.edge_owner_to_timeline_media.count" ) except WebDriverException: try: topCount_elements = browser.find_elements( By.XPATH, read_xpath(get_active_users.__name__, "topCount_elements") ) if topCount_elements: # prevent an empty string scenario total_posts = format_number(topCount_elements[0].text) else: logger.info("Failed to get posts count on your profile! ~empty string") total_posts = None except NoSuchElementException: logger.info("Failed to get posts count on your profile!") total_posts = None # if posts > total user posts, assume total posts posts = ( posts if total_posts is None else total_posts if posts > total_posts else posts ) active_users = [] sc_rolled = 0 start_time = time.time() too_many_requests = 0 # helps to prevent misbehaviours when requests # list of active users repeatedly within less than 10 min of breaks message = ( "~collecting the entire usernames from posts without a boundary!\n" if boundary is None else "~collecting only the visible usernames from posts without scrolling " "at the boundary of zero..\n" if boundary == 0 else "~collecting the usernames from posts with the boundary of {}" "\n".format(boundary) ) # posts argument is the number of posts to collect usernames logger.info( "Getting active users who liked the latest {} posts:\n {}".format( posts, message ) ) count = 1 checked_posts = 0 user_list = [] while count <= posts: # load next post try: latest_post = browser.find_element( By.XPATH, read_xpath(get_active_users.__name__, "profile_posts").format(count), ) # avoid no posts if latest_post: click_element(browser, latest_post) except (NoSuchElementException, WebDriverException): logger.warning("Failed to click on the latest post to grab active likers!") return [] try: checked_posts += 1 sleep_actual(2) try: likers_count = browser.find_element( By.XPATH, read_xpath(get_active_users.__name__, "likers_count") ).text if likers_count: # prevent an empty string scenarios likers_count = format_number(likers_count) # liked by 'username' AND 165 others (166 in total) likers_count += 1 else: logger.info( "Failed to get likers count on your post {} " "~empty string".format(count) ) likers_count = None except NoSuchElementException: logger.info("Failed to get likers count on your post {}".format(count)) likers_count = None try: likes_button = browser.find_elements( By.XPATH, read_xpath(get_active_users.__name__, "likes_button") ) if likes_button != []: if likes_button[1] is not None: likes_button = likes_button[1] else: likes_button = likes_button[0] click_element(browser, likes_button) sleep_actual(3) else: raise NoSuchElementException except (IndexError, NoSuchElementException): # Video have no likes button / no posts in page logger.info("Video found, try next post until we run out of posts") # edge case of account having only videos, or last post is a video. if checked_posts >= total_posts: break # if not reached posts(parameter) value, continue (but load next post) browser.back() # go to next post count += 1 continue # get a reference to the 'Likes' dialog box dialog = browser.find_element( By.XPATH, read_xpath("class_selectors", "likes_dialog_body_xpath") ) scroll_it = True try_again = 0 start_time = time.time() if likers_count: amount = ( likers_count if boundary is None else None if boundary == 0 else (boundary if boundary < likers_count else likers_count) ) else: amount = None tmp_scroll_height = 0 user_list_len = -1 while scroll_it is not False and boundary != 0: scroll_height = browser.execute_script( ) # check if it should keep scrolling down or exit if ( scroll_height >= tmp_scroll_height and len(user_list) > user_list_len ): tmp_scroll_height = scroll_height user_list_len = len(user_list) scroll_it = True else: scroll_it = False if scroll_it is True: scroll_it = browser.execute_script("window.scrollBy(0, 1000)") update_activity(browser, state=None) if sc_rolled > 91 or too_many_requests > 1: # old value 100 print("\n") logger.info("Too Many Requests sent! ~will sleep some :>\n") sleep_actual(600) sc_rolled = 0 too_many_requests = ( 0 if too_many_requests >= 1 else too_many_requests ) else: sleep_actual(1.2) # old value 5.6 sc_rolled += 1 user_list = get_users_from_dialog(user_list, dialog, logger) # write & update records at Progress Tracker if amount: progress_tracker(len(user_list), amount, start_time, None) print("\n") if boundary is not None: if len(user_list) >= boundary: break if ( scroll_it is False and likers_count and likers_count - 1 > len(user_list) ): if ( boundary is not None and likers_count - 1 > boundary ) or boundary is None: if try_again <= 1: # can increase the amount of tries logger.info( "Failed to get the desired amount of " "usernames but trying again..." "\t|> post: {} |> attempt: {}\n".format( posts, try_again + 1 ) ) try_again += 1 too_many_requests += 1 scroll_it = True nap_it = 4 if try_again == 0 else 7 sleep_actual(nap_it) user_list = get_users_from_dialog(user_list, dialog, logger) logger.info( "Post {} | Likers: found {}, catched {}\n\n".format( count, likers_count, len(user_list) ) ) except NoSuchElementException as exc: logger.error( "Ku-ku! There is an error searching active users" "~\t{}\n\n".format(str(exc).encode("utf-8")) ) for user in user_list: active_users.append(user) sleep_actual(1) # if not reached posts(parameter) value, continue if count != posts + 1: try: # click close button close_dialog_box(browser) browser.back() except Exception: logger.error("Unable to go to next profile post") count += 1 real_time = time.time() diff_in_minutes = int((real_time - start_time) / 60) diff_in_seconds = int((real_time - start_time) % 60) # delete duplicated users active_users = list(set(active_users)) logger.info( "Gathered total of {} unique active followers from the latest {} " "posts in {} minutes and {} seconds".format( len(active_users), posts, diff_in_minutes, diff_in_seconds ) ) return active_users
2a157d452611d37cf50ccb7d56ff1a06e9790ecb
23
util.py
1,504
PR - Fix `extract_text_from_element()`and `find_element*()` to `find_element()` (#6438) * Updated getUserData() and find_element* Signed-off-by: elulcao <elulcao@icloud.com> Thanks @breuerfelix for reviewing, 🚀 People in this thread please let me know if something is not OK, IG changed a lot these days. 🤗 @her
844
0
3,763
882
340
5,822
836
InstaPy
68
instapy/util.py
Python
203
{ "docstring": "Returns a list with usernames who liked the latest n posts\n let main = document.getElementsByTagName('main')\n return main[0].scrollHeight\n ", "language": "en", "n_whitespaces": 74, "n_words": 17, "vocab_size": 17 }
https://github.com/InstaPy/InstaPy.git
1
test_complex_pipeline_with_shared_prompt_model_yaml
def test_complex_pipeline_with_shared_prompt_model_yaml(tmp_path): with open(tmp_path / "tmp_config.yml", "w") as tmp_file: tmp_file.write( f ) pipeline = Pipeline.load_from_yaml(path=tmp_path / "tmp_config.yml") result = pipeline.run(query="not relevant", documents=[Document("Berlin is an amazing city.")]) assert "Berlin" in result["results"][0] assert len(result["meta"]["invocation_context"]) > 0
9ebf164cfdfb320503b7161493420c1b0ec577a3
13
test_prompt_node.py
141
feat: Expand LLM support with PromptModel, PromptNode, and PromptTemplate (#3667) Co-authored-by: ZanSara <sarazanzo94@gmail.com>
75,230
0
73
78
31
258,375
34
haystack
15
test/nodes/test_prompt_node.py
Python
34
{ "docstring": "\n version: ignore\n components:\n - name: pmodel\n type: PromptModel\n - name: p1\n params:\n model_name_or_path: pmodel\n default_prompt_template: question-generation\n output_variable: questions\n type: PromptNode\n - name: p2\n params:\n model_name_or_path: pmodel\n default_prompt_template: question-answering\n type: PromptNode\n pipelines:\n - name: query\n nodes:\n - name: p1\n inputs:\n - Query\n - name: p2\n inputs:\n - p1\n ", "language": "en", "n_whitespaces": 371, "n_words": 47, "vocab_size": 23 }
https://github.com/deepset-ai/haystack.git
1
test_new_export
def test_new_export(self): payload = self.make_payload("issue") with self.feature("organizations:discover-query"): response = self.get_success_response(self.org.slug, status_code=201, **payload) data_export = ExportedData.objects.get(id=response.data["id"]) assert response.data == { "id": data_export.id, "user": { "id": str(self.user.id), "email": self.user.email, "username": self.user.username, }, "dateCreated": data_export.date_added, "dateFinished": None, "dateExpired": None, "query": { "type": payload["query_type"], "info": payload["query_info"], }, "status": ExportStatus.Early, "checksum": None, "fileName": None, }
096b5511e244eecd8799b2a0324655207ce8985e
14
test_data_export.py
258
ref(tests): Remove `get_valid_response()` (#34822)
19,772
0
299
150
41
100,179
50
sentry
23
tests/sentry/data_export/endpoints/test_data_export.py
Python
23
{ "docstring": "\n Ensures that a request to this endpoint returns a 201 status code\n and an appropriate response object\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 16 }
https://github.com/getsentry/sentry.git
2
test_masked_unmasked_combinations
def test_masked_unmasked_combinations(self): cases = [ (TEST_SECRET, TEST_SECRET, None), (TEST_SECRET, MASKED_TEST_SECRET2, None), (TEST_SECRET, None, TEST_SECRET), (TEST_SECRET, None, MASKED_TEST_SECRET2), (MASKED_TEST_SECRET1, TEST_SECRET, None), (MASKED_TEST_SECRET1, MASKED_TEST_SECRET2, None), (MASKED_TEST_SECRET1, None, TEST_SECRET), (MASKED_TEST_SECRET1, None, MASKED_TEST_SECRET2), ] for args in cases: with self.subTest(args=args): cookie, post_token, meta_token = args req = self._get_POST_csrf_cookie_request( cookie=cookie, post_token=post_token, meta_token=meta_token, ) mw = CsrfViewMiddleware(token_view) mw.process_request(req) resp = mw.process_view(req, token_view, (), {}) self.assertIsNone(resp)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
13
tests.py
207
Refs #33476 -- Reformatted code with Black.
50,085
0
348
149
38
202,367
59
django
20
tests/csrf_tests/tests.py
Python
23
{ "docstring": "\n All combinations are allowed of (1) masked and unmasked cookies,\n (2) masked and unmasked tokens, and (3) tokens provided via POST and\n the X-CSRFToken header.\n ", "language": "en", "n_whitespaces": 54, "n_words": 25, "vocab_size": 20 }
https://github.com/django/django.git
1
mode
def mode(self, **kwargs): # noqa: PR02 return DataFrameDefault.register(pandas.DataFrame.mode)(self, **kwargs)
57e29bc5d82348006c5170ef9ac0a9eedcd9acf9
10
query_compiler.py
44
REFACTOR-#4513: Fix spelling mistakes in docs and docstrings (#4514) Co-authored-by: Rehan Sohail Durrani <rdurrani@berkeley.edu> Signed-off-by: jeffreykennethli <jkli@ponder.io>
35,631
0
24
26
9
153,816
9
modin
7
modin/core/storage_formats/base/query_compiler.py
Python
2
{ "docstring": "\n Get the modes for every column or row.\n\n Parameters\n ----------\n axis : {0, 1}\n numeric_only : bool\n dropna : bool\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler with modes calculated along given axis.\n ", "language": "en", "n_whitespaces": 143, "n_words": 43, "vocab_size": 36 }
https://github.com/modin-project/modin.git
5
reduce_using_automaton
def reduce_using_automaton(self, word): # Modify the automaton if new rules are found. if self._new_rules: self._add_to_automaton(self._new_rules) self._new_rules = {} flag = 1 while flag: flag = 0 current_state = self.reduction_automaton.states['start'] for i, s in enumerate(word.letter_form_elm): next_state_name = current_state.transitions[s] next_state = self.reduction_automaton.states[next_state_name] if next_state.state_type == 'd': subst = next_state.rh_rule word = word.substituted_word(i - len(next_state_name) + 1, i+1, subst) flag = 1 break current_state = next_state return word
7d773eb18daaef3c54f34d1ac6cbc5b83a5bb16c
18
rewritingsystem.py
191
Cleanup loops and ranges
48,884
0
298
118
48
198,364
65
sympy
21
sympy/combinatorics/rewritingsystem.py
Python
18
{ "docstring": "\n Reduce a word using an automaton.\n\n Summary:\n All the symbols of the word are stored in an array and are given as the input to the automaton.\n If the automaton reaches a dead state that subword is replaced and the automaton is run from the beginning.\n The complete word has to be replaced when the word is read and the automaton reaches a dead state.\n So, this process is repeated until the word is read completely and the automaton reaches the accept state.\n\n Arguments:\n word (instance of FreeGroupElement) -- Word that needs to be reduced.\n\n ", "language": "en", "n_whitespaces": 163, "n_words": 95, "vocab_size": 53 }
https://github.com/sympy/sympy.git
8
_format
def _format(val, valtype, floatfmt, missingval="", has_invisible=True): # noqa if val is None: return missingval if valtype in [int, _text_type]: return "{0}".format(val) elif valtype is _binary_type: try: return _text_type(val, "ascii") except TypeError: return _text_type(val) elif valtype is float: is_a_colored_number = has_invisible and isinstance( val, (_text_type, _binary_type) ) if is_a_colored_number: raw_val = _strip_invisible(val) formatted_val = format(float(raw_val), floatfmt) return val.replace(raw_val, formatted_val) else: return format(float(val), floatfmt) else: return "{0}".format(val)
adf24bfa9723b0621183bb27f0c889b813c06e8a
15
tabulate.py
251
[State Observability] Use a table format by default (#26159) NOTE: tabulate is copied/pasted to the codebase for table formatting. This PR changes the default layout to be the table format for both summary and list APIs.
27,798
0
224
132
47
125,184
65
ray
18
python/ray/_private/thirdparty/tabulate/tabulate.py
Python
22
{ "docstring": "Format a value according to its type.\n\n Unicode is supported:\n\n >>> hrow = ['\\u0431\\u0443\\u043a\\u0432\\u0430', '\\u0446\\u0438\\u0444\\u0440\\u0430'] ; \\\n tbl = [['\\u0430\\u0437', 2], ['\\u0431\\u0443\\u043a\\u0438', 4]] ; \\\n good_result = '\\\\u0431\\\\u0443\\\\u043a\\\\u0432\\\\u0430 \\\\u0446\\\\u0438\\\\u0444\\\\u0440\\\\u0430\\\\n------- -------\\\\n\\\\u0430\\\\u0437 2\\\\n\\\\u0431\\\\u0443\\\\u043a\\\\u0438 4' ; \\\n tabulate(tbl, headers=hrow) == good_result\n True\n\n ", "language": "en", "n_whitespaces": 100, "n_words": 39, "vocab_size": 32 }
https://github.com/ray-project/ray.git
3
test_single_ignore_dates_set
def test_single_ignore_dates_set(self): test_cases = [ ("1985-05-01", [datetime.date(1985, 5, 1)]), ( "1985-05-01,1991-12-05", [datetime.date(1985, 5, 1), datetime.date(1991, 12, 5)], ), ("2010-12-13", [datetime.date(2010, 12, 13)]), ] for env_str, expected_dates in test_cases: expected_date_set = set() for expected_date in expected_dates: expected_date_set.add(expected_date) self.assertSetEqual( _parse_ignore_dates(env_str), expected_date_set, )
8a6aaf4e2d05021a14adc681c66dff6a815aa2a0
11
test_settings.py
161
Adds additional testing for both date parsing and consumed document created date
116,958
0
231
108
34
319,518
40
paperless-ngx
13
src/paperless/tests/test_settings.py
Python
17
{ "docstring": "\n GIVEN:\n - Ignore dates are set per certain inputs\n THEN:\n - All ignore dates are parsed\n ", "language": "en", "n_whitespaces": 60, "n_words": 16, "vocab_size": 13 }
https://github.com/paperless-ngx/paperless-ngx.git
5
get_version
def get_version(name, members): # the expression ending for versions must start as # '.so.[0-9]', i.e., *.so.[at least one digit] # while multiple, more specific expressions could be specified # to search for .so.X, .so.X.Y and .so.X.Y.Z # after the first required 'dot' digit # any combination of additional 'dot' digits pairs are accepted # anything more than libFOO.so.digits.digits.digits # should be seen as a member name outside normal expectations exprs = [rf'lib{name}\.so\.[0-9]+[0-9.]*', rf'lib{name}_?64\.so\.[0-9]+[0-9.]*'] for expr in exprs: versions = [] for line in members: m = re.search(expr, line) if m: versions.append(m.group(0)) if versions: return _last_version(versions, '.') return None
8198943edd73a363c266633e1aa5b2a9e9c9f526
15
_aix.py
124
add python 3.10.4 for windows
56,521
0
210
67
77
221,805
98
XX-Net
13
python3.10.4/Lib/ctypes/_aix.py
Python
12
{ "docstring": "\n Sort list of members and return highest numbered version - if it exists.\n This function is called when an unversioned libFOO.a(libFOO.so) has\n not been found.\n\n Versioning for the member name is expected to follow\n GNU LIBTOOL conventions: the highest version (x, then X.y, then X.Y.z)\n * find [libFoo.so.X]\n * find [libFoo.so.X.Y]\n * find [libFoo.so.X.Y.Z]\n\n Before the GNU convention became the standard scheme regardless of\n binary size AIX packagers used GNU convention \"as-is\" for 32-bit\n archive members but used an \"distinguishing\" name for 64-bit members.\n This scheme inserted either 64 or _64 between libFOO and .so\n - generally libFOO_64.so, but occasionally libFOO64.so\n ", "language": "en", "n_whitespaces": 147, "n_words": 101, "vocab_size": 75 }
https://github.com/XX-net/XX-Net.git
4
result_list
def result_list(context): view = context["view"] object_list = context["object_list"] headers = list(result_headers(view)) num_sorted_fields = 0 for h in headers: if h["sortable"] and h["sorted"]: num_sorted_fields += 1 context.update( { "result_headers": headers, "num_sorted_fields": num_sorted_fields, "results": list(results(view, object_list, context["request"])), } ) return context @register.simple_tag
d10f15e55806c6944827d801cd9c2d53f5da4186
@register.simple_tag
15
modeladmin_tags.py
150
Reformat with black
15,981
1
131
83
36
73,191
40
wagtail
13
wagtail/contrib/modeladmin/templatetags/modeladmin_tags.py
Python
16
{ "docstring": "\n Displays the headers and data list together\n ", "language": "en", "n_whitespaces": 14, "n_words": 7, "vocab_size": 7 }
https://github.com/wagtail/wagtail.git
1
test_wrapped_bleak_client_raises_device_missing
async def test_wrapped_bleak_client_raises_device_missing(hass, enable_bluetooth): switchbot_device = BLEDevice("44:44:33:11:23:45", "wohand") client = HaBleakClientWrapper(switchbot_device) assert client.is_connected is False with pytest.raises(bleak.BleakError): await client.connect() assert client.is_connected is False await client.disconnect()
1b144c0e4dd683e3b47668a89da5eb6da4ae5e08
10
test_models.py
100
Update to bleak 0.18.0 (#79008)
86,939
0
53
56
19
287,751
25
core
14
tests/components/bluetooth/test_models.py
Python
8
{ "docstring": "Test wrapped bleak client dispatches calls as expected.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/home-assistant/core.git
1
add_exomw
def add_exomw(self): from nltk.corpus import extended_omw self._exomw_reader = extended_omw self.add_provs(self._exomw_reader)
8ffd0d8190552d45f8b92e18da3fc41639e5185d
8
wordnet.py
43
Initialize empty provenance for default English
7,547
0
38
25
9
42,454
10
nltk
7
nltk/corpus/reader/wordnet.py
Python
4
{ "docstring": "\n Add languages from Extended OMW\n\n >>> import nltk\n >>> from nltk.corpus import wordnet as wn\n >>> wn.add_exomw()\n >>> print(wn.synset('intrinsically.r.01').lemmas(lang=\"eng_wikt\"))\n [Lemma('intrinsically.r.01.per_se'), Lemma('intrinsically.r.01.as_such')]\n ", "language": "en", "n_whitespaces": 71, "n_words": 21, "vocab_size": 16 }
https://github.com/nltk/nltk.git
7
log_status_change_thread
def log_status_change_thread(log_queue, request_iterator): std_handler = StdStreamHandler(log_queue) current_handler = None root_logger = logging.getLogger("ray") default_level = root_logger.getEffectiveLevel() try: for req in request_iterator: if current_handler is not None: root_logger.setLevel(default_level) root_logger.removeHandler(current_handler) std_handler.unregister_global() if not req.enabled: current_handler = None continue current_handler = LogstreamHandler(log_queue, req.loglevel) std_handler.register_global() root_logger.addHandler(current_handler) root_logger.setLevel(req.loglevel) except grpc.RpcError as e: logger.debug(f"closing log thread " f"grpc error reading request_iterator: {e}") finally: if current_handler is not None: root_logger.setLevel(default_level) root_logger.removeHandler(current_handler) std_handler.unregister_global() log_queue.put(None)
608276bb96b5b49769cd8816414c280c5431d843
13
logservicer.py
254
Simplify logging configuration. (#30863)
31,203
0
291
148
45
137,621
65
ray
26
python/ray/util/client/server/logservicer.py
Python
26
{ "docstring": "This is run in a separate thread and therefore needs a separate logging\n configuration outside of the default ray logging configuration.\n ", "language": "en", "n_whitespaces": 27, "n_words": 21, "vocab_size": 18 }
https://github.com/ray-project/ray.git
3
set_to_context
def set_to_context(self, name): attribute = self.fattributes[name] if isinstance(attribute, NonInheritableFieldAttribute): # setting to sentinel will trigger 'default/default()' on getter setattr(self, name, Sentinel) else: try: setattr(self, name, self._get_parent_attribute(name, omit=True)) except AttributeError: # mostly playcontext as only tasks/handlers/blocks really resolve parent setattr(self, name, Sentinel)
ff6e4da36addccb06001f7b05b1a9c04ae1d7984
15
base.py
100
fixes to FA inheritance (#78990) finalized applies to all field attributes fix getting parent value also remove unused/needed extend/prepend signature moar testing
79,551
0
158
64
35
268,567
41
ansible
12
lib/ansible/playbook/base.py
Python
9
{ "docstring": " set to parent inherited value or Sentinel as appropriate", "language": "en", "n_whitespaces": 9, "n_words": 9, "vocab_size": 9 }
https://github.com/ansible/ansible.git
1
device2_info
def device2_info() -> str: return load_fixture("soundtouch/device2_info.xml") @pytest.fixture(scope="session")
efbd47c828c6c2e1cd967df2a4cefd2b00c60c25
@pytest.fixture(scope="session")
8
conftest.py
42
Rewrite SoundTouch tests to use mocked payloads (#72984)
113,386
1
12
12
7
314,785
7
core
6
tests/components/soundtouch/conftest.py
Python
3
{ "docstring": "Load SoundTouch device 2 info response and return it.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/home-assistant/core.git
1
autoscale
def autoscale(self, A): A = np.asanyarray(A) self.halfrange = max(self._vcenter-A.min(), A.max()-self._vcenter)
84def85c848a172afab987298eb402bd39aceeaa
11
colors.py
69
FIX: CenteredNorm use vmin/vmax for halfrange This changes CenteredNorm to use vmin and vmax to represent the halfrange rather than storing it separately and needing to update the vmin/vmax in all of the methods. Additionally, if you now change vcenter, the halfrange does not automatically update.
23,974
0
59
42
9
110,209
10
matplotlib
9
lib/matplotlib/colors.py
Python
4
{ "docstring": "\n Set *halfrange* to ``max(abs(A-vcenter))``, then set *vmin* and *vmax*.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
https://github.com/matplotlib/matplotlib.git
1
test_pss_evaluate_metric_batch_counter
def test_pss_evaluate_metric_batch_counter(self): strategy = tf.distribute.ParameterServerStrategy( self.cluster_resolver, variable_partitioner=None, )
062073cfc4a5fe4c24ed3e326c673951c040982f
9
parameter_server_training_metric_test.py
38
Use Model metrics as logs in `fit` and `evaluate` instead of last worker train or test step result Currently the model evaluate returns the last scheduled worker metrics. This is troublesome when using distributed workers as the last one could fail. in Parameter Server Strategy, the last worker may finish sooner than earlier scheduled worker resulting in incorrect metrics being returned. So always rely on current model metrics. PiperOrigin-RevId: 471137058
83,101
0
51
171
8
279,716
8
keras
8
keras/integration_test/parameter_server_training_metric_test.py
Python
21
{ "docstring": "Verify that metric data is complete during evaluate when using\n ParameterServerStrategy\n ", "language": "en", "n_whitespaces": 25, "n_words": 11, "vocab_size": 11 }
https://github.com/keras-team/keras.git
7
_handle_mouse_press
def _handle_mouse_press(self, e): is_rocker_gesture = (config.val.input.mouse.rocker_gestures and e.buttons() == Qt.MouseButton.LeftButton | Qt.MouseButton.RightButton) if e.button() in [Qt.MouseButton.XButton1, Qt.MouseButton.XButton2] or is_rocker_gesture: self._mousepress_backforward(e) return True self._ignore_wheel_event = True pos = e.pos() if pos.x() < 0 or pos.y() < 0: log.mouse.warning("Ignoring invalid click at {}".format(pos)) return False if e.button() != Qt.MouseButton.NoButton: self._tab.elements.find_at_pos(pos, self._mousepress_insertmode_cb) return False
0877fb0d78635692e481c8bde224fac5ad0dd430
12
eventfilter.py
230
Run scripts/dev/rewrite_enums.py
117,521
0
190
143
40
321,087
51
qutebrowser
30
qutebrowser/browser/eventfilter.py
Python
14
{ "docstring": "Handle pressing of a mouse button.\n\n Args:\n e: The QMouseEvent.\n\n Return:\n True if the event should be filtered, False otherwise.\n ", "language": "en", "n_whitespaces": 63, "n_words": 20, "vocab_size": 20 }
https://github.com/qutebrowser/qutebrowser.git
2
_calc_open_trade_value
def _calc_open_trade_value(self) -> float: open_trade = Decimal(self.amount) * Decimal(self.open_rate) fees = open_trade * Decimal(self.fee_open) if self.is_short: return float(open_trade - fees) else: return float(open_trade + fees)
b58e811b1486ae62e835cbea3e40cf88128243a0
11
trade_model.py
90
Move trade/order Models to their own class
34,492
0
82
54
19
149,702
25
freqtrade
10
freqtrade/persistence/trade_model.py
Python
11
{ "docstring": "\n Calculate the open_rate including open_fee.\n :return: Price in of the open trade incl. Fees\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 13 }
https://github.com/freqtrade/freqtrade.git
1
test_file_upload_authed
def test_file_upload_authed(self) -> None: self.login("hamlet") fp = StringIO("zulip!") fp.name = "zulip.txt" result = self.client_post("/json/user_uploads", {"file": fp}) self.assert_json_success(result) self.assertIn("uri", result.json()) uri = result.json()["uri"] base = "/user_uploads/" self.assertEqual(base, uri[: len(base)]) # In the future, local file requests will follow the same style as S3 # requests; they will be first authenthicated and redirected self.assert_streaming_content(self.client_get(uri), b"zulip!") # check if DB has attachment marked as unclaimed entry = Attachment.objects.get(file_name="zulip.txt") self.assertEqual(entry.is_claimed(), False) self.subscribe(self.example_user("hamlet"), "Denmark") body = "First message ...[zulip.txt](http://localhost:9991" + uri + ")" self.send_stream_message(self.example_user("hamlet"), "Denmark", body, "test") # Now try the endpoint that's supposed to return a temporary URL for access # to the file. result = self.client_get("/json" + uri) self.assert_json_success(result) data = result.json() url_only_url = data["url"] # Ensure this is different from the original uri: self.assertNotEqual(url_only_url, uri) self.assertIn("user_uploads/temporary/", url_only_url) self.assertTrue(url_only_url.endswith("zulip.txt")) # The generated URL has a token authorizing the requestor to access the file # without being logged in. self.logout() self.assert_streaming_content(self.client_get(url_only_url), b"zulip!") # The original uri shouldn't work when logged out: result = self.client_get(uri) self.assertEqual(result.status_code, 401)
ba7ea7cc809ace3e8ecf25311e54d78f62b0d0c8
11
test_upload.py
450
test_classes: Extract assert_streaming_content helper. This also fixes a warning from RealmExportTest.test_endpoint_local_uploads: “ResourceWarning: unclosed file <_io.BufferedReader name='/srv/zulip/var/…/test-export.tar.gz'>”. Signed-off-by: Anders Kaseorg <anders@zulip.com>
17,592
0
414
253
117
83,069
162
zulip
34
zerver/tests/test_upload.py
Python
32
{ "docstring": "\n A call to /json/user_uploads should return a uri and actually create an\n entry in the database. This entry will be marked unclaimed till a message\n refers it.\n ", "language": "en", "n_whitespaces": 56, "n_words": 27, "vocab_size": 25 }
https://github.com/zulip/zulip.git
2
get_current_branch
def get_current_branch(cls, location): # type: (str) -> Optional[str] # git-symbolic-ref exits with empty stdout if "HEAD" is a detached # HEAD rather than a symbolic ref. In addition, the -q causes the # command to exit with status code 1 instead of 128 in this case # and to suppress the message to stderr. args = ['symbolic-ref', '-q', 'HEAD'] output = cls.run_command( args, extra_ok_returncodes=(1, ), show_stdout=False, stdout_only=True, cwd=location, ) ref = output.strip() if ref.startswith('refs/heads/'): return ref[len('refs/heads/'):] return None
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
12
git.py
122
upd; format
12,532
0
229
71
64
61,366
78
transferlearning
14
.venv/lib/python3.8/site-packages/pip/_internal/vcs/git.py
Python
13
{ "docstring": "\n Return the current branch, or None if HEAD isn't at a branch\n (e.g. detached HEAD).\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 15 }
https://github.com/jindongwang/transferlearning.git
4
localize_tag
def localize_tag(parser, token): use_l10n = None bits = list(token.split_contents()) if len(bits) == 1: use_l10n = True elif len(bits) > 2 or bits[1] not in ("on", "off"): raise TemplateSyntaxError("%r argument should be 'on' or 'off'" % bits[0]) else: use_l10n = bits[1] == "on" nodelist = parser.parse(("endlocalize",)) parser.delete_first_token() return LocalizeNode(nodelist, use_l10n)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
12
l10n.py
161
Refs #33476 -- Reformatted code with Black.
51,487
0
97
95
39
206,326
49
django
13
django/templatetags/l10n.py
Python
12
{ "docstring": "\n Force or prevents localization of values, regardless of the value of\n `settings.USE_L10N`.\n\n Sample usage::\n\n {% localize off %}\n var pi = {{ 3.1415 }};\n {% endlocalize %}\n ", "language": "en", "n_whitespaces": 65, "n_words": 27, "vocab_size": 23 }
https://github.com/django/django.git
1
flush
def flush(self) -> None: self._write_to_log_file() self._write_to_devtools() self._buffer.clear()
3b40eb828b5217fd023c07694a02f148664b9588
8
redirect_output.py
46
Redirecting stdout to both devtools and logfile
44,020
0
35
25
7
182,967
7
textual
6
src/textual/devtools/redirect_output.py
Python
8
{ "docstring": "Flush the buffer. This will send all buffered log messages to\n the devtools server and the log file. In the case of the devtools,\n where possible, log messages will be batched and sent as one.\n ", "language": "en", "n_whitespaces": 56, "n_words": 35, "vocab_size": 26 }
https://github.com/Textualize/textual.git
6
get_execution_info
def get_execution_info(self, job_id, function_descriptor): function_id = function_descriptor.function_id # If the function has already been loaded, # There's no need to load again if function_id in self._function_execution_info: return self._function_execution_info[function_id] if self._worker.load_code_from_local: # Load function from local code. if not function_descriptor.is_actor_method(): # If the function is not able to be loaded, # try to load it from GCS, # even if load_code_from_local is set True if self._load_function_from_local(function_descriptor) is True: return self._function_execution_info[function_id] # Load function from GCS. # Wait until the function to be executed has actually been # registered on this worker. We will push warnings to the user if # we spend too long in this loop. # The driver function may not be found in sys.path. Try to load # the function from GCS. with profiling.profile("wait_for_function"): self._wait_for_function(function_descriptor, job_id) try: function_id = function_descriptor.function_id info = self._function_execution_info[function_id] except KeyError as e: message = ( "Error occurs in get_execution_info: " "job_id: %s, function_descriptor: %s. Message: %s" % (job_id, function_descriptor, e) ) raise KeyError(message) return info
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
13
function_manager.py
206
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,113
0
497
118
99
130,122
162
ray
17
python/ray/_private/function_manager.py
Python
21
{ "docstring": "Get the FunctionExecutionInfo of a remote function.\n Args:\n job_id: ID of the job that the function belongs to.\n function_descriptor: The FunctionDescriptor of the function to get.\n Returns:\n A FunctionExecutionInfo object.\n ", "language": "en", "n_whitespaces": 84, "n_words": 30, "vocab_size": 23 }
https://github.com/ray-project/ray.git
2
_enable_task_listeners
def _enable_task_listeners(): if get_listener_manager().has_listeners: register_task_instance_state_events()
dba00ce6a32b7f50153887c6974f62985ca8023f
9
local_task_job.py
30
Add Listener Plugin API that tracks TaskInstance state changes (#20443) This adds new Plugin API - "listeners". It enables plugin authors to write [pluggy hook implementation][1] that will be called on certain formalized extension points. To differentiate between current Airflow extension points, like plugins, and current Airflow hooks, implementations of those hooks are called listeners. The API is ment to be called across all dags, and all operators - in contrast to current on_success_callback, pre_execute and related family which are meant to provide callbacks for particular dag authors, or operator creators. pluggy mechanism enables us to execute multiple, or none, listeners that implement particular extension point, so that users can use multiple listeners seamlessly. In this PR, three such extension points are added. When TaskInstance's state is changed to RUNNING, on_task_instance_running hook is called. On change toSUCCESS on_task_instance_success is called, similarly on FAILED on_task_instance_failed is called. Actual notification mechanism is be implemented using [SQLAlchemy’s events mechanism][2]. This ensures that plugins will get every change of state, regardless of where in the codebase it happened, and not require manual annotation of TI state changes across the codebase. To make sure that this change is not affecting performance, running this mechanism on scheduler is disabled by default. The SQLAlchemy event mechanism is also not affected by default - the event listener is only added if we have any plugin which actually provides any listener. [1]: https://pluggy.readthedocs.io/en/stable/ [2]: https://docs.sqlalchemy.org/en/13/orm/session_events.html#after-flush Signed-off-by: Maciej Obuchowski <obuchowski.maciej@gmail.com>
8,086
0
30
15
5
43,885
5
airflow
4
airflow/jobs/local_task_job.py
Python
3
{ "docstring": "\n Check if we have any registered listeners, then register sqlalchemy hooks for\n TI state change if we do.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 16 }
https://github.com/apache/airflow.git
2
get_binance_available_quotes_for_each_coin
def get_binance_available_quotes_for_each_coin() -> dict: trading_pairs = _get_trading_pairs() results = defaultdict(list) for pair in trading_pairs: results[pair["baseAsset"]].append(pair["quoteAsset"]) return results @log_start_end(log=logger) @check_api_key(["API_BINANCE_KEY", "API_BINANCE_SECRET"])
59d8b36bb0467a1a99513b10e8b8471afaa56fd6
@log_start_end(log=logger) @check_api_key(["API_BINANCE_KEY", "API_BINANCE_SECRET"])
12
binance_model.py
99
[IMPROVE] Fix Docstring formatting/Fix missing, incomplete type hints (#3412) * Fixes * Update stocks_helper.py * update git-actions set-output to new format * Update stocks_helper.py * Update terminal_helper.py * removed LineAnnotateDrawer from qa_view * lint * few changes * updates * sdk auto gen modules done * Update stocks_helper.py * updates to changed imports, and remove first sdk_modules * Update generate_sdk.py * Update generate_sdk.py * pylint * revert stocks_helper * Update generate_sdk.py * Update sdk.py * Update generate_sdk.py * full auto generation, added sdk.py/controllers creation * missed enable forecasting * added running black in subprocess after sdk files generation completes * removed deleted sdk_arg_logger * comment out tests * property doc fix * clean up * Update generate_sdk.py * make trailmap classes useable for doc generation * Update generate_sdk.py * added lineon to trailmap class for linking to func in markdown * changed lineon to dict * added full_path to trailmap for linking in docs * updated portfolio * feat: initial files * feat: added meta head * feat: added funcdef * added func_def to trailmap attributes for markdown in docs, added missing type hints to covid functions * feat: added view and merged with jaun * Update generate_sdk.py * Update generate_sdk.py * Update generate_sdk.py * Update generate_sdk.py * init * fix returns * fix: random stuff * fix: random * fixed encoding issue on windows * fix: generate tabs * update * Update generate_sdk_markdown.py * Create .pydocstyle.ini * added type hint classes for views * fixes * alt, ba * alt-economy * Update finviz_compare_model.py * fixs * Update substack_model.py * Update generate_sdk.py * last of my section * porfolio * po * Update optimizer_model.py * fixing more things * few more * keys done * update * fixes * Update generate_sdk_markdown.py * Update generate_sdk_markdown.py * mypy forecast fix * Update generate_sdk_markdown.py * Update generate_sdk_markdown.py * Update generate_sdk_markdown.py * fixes * forecast fixes * one more fix * Update coinbase_model.py * Update generate_sdk_markdown.py Co-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com> Co-authored-by: James Maslek <jmaslek11@gmail.com> Co-authored-by: jose-donato <zmcdonato@gmail.com> Co-authored-by: andrewkenreich <andrew.kenreich@gmail.com>
85,880
1
40
40
18
286,564
20
OpenBBTerminal
13
openbb_terminal/cryptocurrency/due_diligence/binance_model.py
Python
15
{ "docstring": "Helper methods that for every coin available on Binance add all quote assets. [Source: Binance]\n\n Returns\n -------\n dict\n All quote assets for given coin\n {'ETH' : ['BTC', 'USDT' ...], 'UNI' : ['ETH', 'BTC','BUSD', ...]\n\n ", "language": "en", "n_whitespaces": 60, "n_words": 34, "vocab_size": 30 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
1
nunique_approx
def nunique_approx(self, split_every=None): from dask.dataframe import hyperloglog # here to avoid circular import issues return aca( [self], chunk=hyperloglog.compute_hll_array, combine=hyperloglog.reduce_state, aggregate=hyperloglog.estimate_count, split_every=split_every, b=16, meta=float, )
cccb9d8d8e33a891396b1275c2448c352ef40c27
9
core.py
79
absolufy-imports - No relative - PEP8 (#8796) Conversation in https://github.com/dask/distributed/issues/5889
36,555
0
130
54
23
156,098
24
dask
16
dask/dataframe/core.py
Python
11
{ "docstring": "Approximate number of unique rows.\n\n This method uses the HyperLogLog algorithm for cardinality\n estimation to compute the approximate number of unique rows.\n The approximate error is 0.406%.\n\n Parameters\n ----------\n split_every : int, optional\n Group partitions into groups of this size while performing a\n tree-reduction. If set to False, no tree-reduction will be used.\n Default is 8.\n\n Returns\n -------\n a float representing the approximate number of elements\n ", "language": "en", "n_whitespaces": 169, "n_words": 66, "vocab_size": 52 }
https://github.com/dask/dask.git
3
fromisoformat
def fromisoformat(cls, date_string): if not isinstance(date_string, str): raise TypeError('fromisoformat: argument must be str') try: assert len(date_string) == 10 return cls(*_parse_isoformat_date(date_string)) except Exception: raise ValueError(f'Invalid isoformat string: {date_string!r}')
8198943edd73a363c266633e1aa5b2a9e9c9f526
12
datetime.py
89
add python 3.10.4 for windows
56,569
0
99
49
26
222,412
27
XX-Net
10
python3.10.4/Lib/datetime.py
Python
8
{ "docstring": "Construct a date from the output of date.isoformat().", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/XX-net/XX-Net.git