complexity
int64
1
139
fun_name
stringlengths
1
80
code
stringlengths
101
62.2k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
3.11k
ast_levels
int64
6
36
file_name
stringlengths
5
79
n_ast_nodes
int64
17
19.2k
commit_message
stringlengths
3
15.3k
d_id
int64
12
121k
n_ast_errors
int64
0
9
n_whitespaces
int64
4
10.8k
token_counts
int64
5
3.06k
vocab_size
int64
4
1.11k
id
int64
20
338k
n_words
int64
4
4.82k
repo
stringlengths
3
22
n_identifiers
int64
2
176
path
stringlengths
7
134
language
stringclasses
1 value
nloc
int64
1
413
documentation
dict
url
stringlengths
31
59
1
preprocess_input
def preprocess_input(x, data_format=None): return x @keras_export("keras.applications.efficientnet_v2.decode_predictions")
3613c3defc39c236fb1592c4f7ba1a9cc887343a
@keras_export("keras.applications.efficientnet_v2.decode_predictions")
7
efficientnet_v2.py
32
Remove pylint comments. PiperOrigin-RevId: 452353044
82,629
1
11
12
6
278,616
6
keras
4
keras/applications/efficientnet_v2.py
Python
2
{ "docstring": "A placeholder method for backward compatibility.\n\n The preprocessing logic has been included in the EfficientNetV2 model\n implementation. Users are no longer required to call this method to\n normalize the input data. This method does nothing and only kept as a\n placeholder to align the API surface between old and new version of model.\n\n Args:\n x: A floating point `numpy.array` or a `tf.Tensor`.\n data_format: Optional data format of the image tensor/array. Defaults to\n None, in which case the global setting\n `tf.keras.backend.image_data_format()` is used (unless you changed it,\n it defaults to \"channels_last\").{mode}\n\n Returns:\n Unchanged `numpy.array` or `tf.Tensor`.\n ", "language": "en", "n_whitespaces": 152, "n_words": 95, "vocab_size": 76 }
https://github.com/keras-team/keras.git
2
make_increasing_ohlc
def make_increasing_ohlc(open, high, low, close, dates, **kwargs): (flat_increase_x, flat_increase_y, text_increase) = _OHLC( open, high, low, close, dates ).get_increase() if "name" in kwargs: showlegend = True else: kwargs.setdefault("name", "Increasing") showlegend = False kwargs.setdefault("line", dict(color=_DEFAULT_INCREASING_COLOR, width=1)) kwargs.setdefault("text", text_increase) ohlc_incr = dict( type="scatter", x=flat_increase_x, y=flat_increase_y, mode="lines", showlegend=showlegend, **kwargs, ) return ohlc_incr
43e3a4011080911901176aab919c0ecf5046ddd3
11
_ohlc.py
183
switch to black .22
57,818
0
148
117
39
226,144
48
plotly.py
23
packages/python/plotly/plotly/figure_factory/_ohlc.py
Python
20
{ "docstring": "\n Makes increasing ohlc sticks\n\n _make_increasing_ohlc() and _make_decreasing_ohlc separate the\n increasing trace from the decreasing trace so kwargs (such as\n color) can be passed separately to increasing or decreasing traces\n when direction is set to 'increasing' or 'decreasing' in\n FigureFactory.create_candlestick()\n\n :param (list) open: opening values\n :param (list) high: high values\n :param (list) low: low values\n :param (list) close: closing values\n :param (list) dates: list of datetime objects. Default: None\n :param kwargs: kwargs to be passed to increasing trace via\n plotly.graph_objs.Scatter.\n\n :rtype (trace) ohlc_incr_data: Scatter trace of all increasing ohlc\n sticks.\n ", "language": "en", "n_whitespaces": 146, "n_words": 89, "vocab_size": 59 }
https://github.com/plotly/plotly.py.git
1
test_tags_help_text_no_spaces_allowed
def test_tags_help_text_no_spaces_allowed(self): widget = widgets.AdminTagWidget() help_text = widget.get_context(None, None, {})["widget"]["help_text"] html = widget.render("tags", None, {}) help_text_html_element = self.get_help_text_html_element(html) self.assertEqual( help_text, "Tags can only consist of a single word, no spaces allowed." ) self.assertHTMLEqual( help_text_html_element, % help_text, )
1822d7eee23cf5fceff8b1f58f3ca2f0a32c6e34
11
test_widgets.py
122
display help text message for tag field - resolves #1874 - ensure message is dynamic based on the setting TAG_SPACES_ALLOWED - Update wagtail/admin/templates/wagtailadmin/widgets/tag_widget.html
16,621
0
134
72
31
77,079
37
wagtail
13
wagtail/admin/tests/test_widgets.py
Python
12
{ "docstring": "Checks that the tags help text html element content is correct when TAG_SPACES_ALLOWED is False<p class=\"help\">%s</p>", "language": "en", "n_whitespaces": 15, "n_words": 16, "vocab_size": 15 }
https://github.com/wagtail/wagtail.git
3
handle_template_exception
def handle_template_exception(ex, field): if ex.args and ex.args[0].startswith("UndefinedError: 'None' has no attribute"): # Common during HA startup - so just a warning _LOGGER.warning(ex) return _LOGGER.error("Error parsing template for field %s", field, exc_info=ex)
73a368c24246b081cdb98923ca3180937d436c3b
10
helpers.py
75
Refactor history_stats to minimize database access (part 2) (#70255)
95,827
0
85
44
31
296,853
31
core
9
homeassistant/components/history_stats/helpers.py
Python
5
{ "docstring": "Log an error nicely if the template cannot be interpreted.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/home-assistant/core.git
3
find_profile_dir_by_name
def find_profile_dir_by_name(cls, ipython_dir, name=u'default', config=None): dirname = u'profile_' + name paths = [ipython_dir] for p in paths: profile_dir = os.path.join(p, dirname) if os.path.isdir(profile_dir): return cls(location=profile_dir, config=config) else: raise ProfileDirError('Profile directory not found in paths: %s' % dirname)
1ec91ebf328bdf3450130de4b4604c79dc1e19d9
12
profiledir.py
119
FIX CVE-2022-21699 See https://github.com/ipython/ipython/security/advisories/GHSA-pq7m-3gw7-gq5x
52,335
0
120
75
32
208,471
37
ipython
15
IPython/core/profiledir.py
Python
9
{ "docstring": "Find an existing profile dir by profile name, return its ProfileDir.\n\n This searches through a sequence of paths for a profile dir. If it\n is not found, a :class:`ProfileDirError` exception will be raised.\n\n The search path algorithm is:\n 1. ``os.getcwd()`` # removed for security reason.\n 2. ``ipython_dir``\n\n Parameters\n ----------\n ipython_dir : unicode or str\n The IPython directory to use.\n name : unicode or str\n The name of the profile. The name of the profile directory\n will be \"profile_<profile>\".\n ", "language": "en", "n_whitespaces": 183, "n_words": 78, "vocab_size": 57 }
https://github.com/ipython/ipython.git
11
add_dep_paths
def add_dep_paths(): paths = [] if old_deps is not None: for importer, modname, ispkg in pkgutil.iter_modules( old_deps.__path__): if not ispkg: continue try: mod = importer.find_module(modname).load_module(modname) except ImportError as e: logging.warning(f"deps: Error importing dependency: {e}") continue if hasattr(mod, 'dep_bins'): paths.extend(mod.dep_bins) sys.path.extend(paths) if kivy_deps is None: return paths = [] for importer, modname, ispkg in pkgutil.iter_modules(kivy_deps.__path__): if not ispkg: continue try: mod = importer.find_module(modname).load_module(modname) except ImportError as e: logging.warning(f"deps: Error importing dependency: {e}") continue if hasattr(mod, 'dep_bins'): paths.extend(mod.dep_bins) sys.path.extend(paths)
e6c144b5423dada62fd13034c2d40bf48a2bc423
16
__init__.py
293
Replace deprecated logging.warn with logging.warning (#7906)
47,008
0
332
171
38
194,585
77
kivy
22
kivy/tools/packaging/pyinstaller_hooks/__init__.py
Python
29
{ "docstring": "Should be called by the hook. It adds the paths with the binary\n dependencies to the system path so that pyinstaller can find the binaries\n during its crawling stage.\n ", "language": "en", "n_whitespaces": 38, "n_words": 29, "vocab_size": 25 }
https://github.com/kivy/kivy.git
4
_ln_exp_bound
def _ln_exp_bound(self): # for 0.1 <= x <= 10 we use the inequalities 1-1/x <= ln(x) <= x-1 adj = self._exp + len(self._int) - 1 if adj >= 1: # argument >= 10; we use 23/10 = 2.3 as a lower bound for ln(10) return len(str(adj*23//10)) - 1 if adj <= -2: # argument <= 0.1 return len(str((-1-adj)*23//10)) - 1 op = _WorkRep(self) c, e = op.int, op.exp if adj == 0: # 1 < self < 10 num = str(c-10**-e) den = str(c) return len(num) - len(den) - (num < den) # adj == -1, 0.1 <= self < 1 return e + len(str(10**-e - c)) - 1
8198943edd73a363c266633e1aa5b2a9e9c9f526
17
_pydecimal.py
225
add python 3.10.4 for windows
55,682
0
267
126
59
219,652
109
XX-Net
15
python3.10.4/Lib/_pydecimal.py
Python
13
{ "docstring": "Compute a lower bound for the adjusted exponent of self.ln().\n In other words, compute r such that self.ln() >= 10**r. Assumes\n that self is finite and positive and that self != 1.\n ", "language": "en", "n_whitespaces": 54, "n_words": 32, "vocab_size": 28 }
https://github.com/XX-net/XX-Net.git
1
test_orderline_query
def test_orderline_query(staff_api_client, permission_manage_orders, fulfilled_order): order = fulfilled_order query = line = order.lines.first() metadata_key = "md key" metadata_value = "md value" line.store_value_in_private_metadata({metadata_key: metadata_value}) line.store_value_in_metadata({metadata_key: metadata_value}) line.save() staff_api_client.user.user_permissions.add(permission_manage_orders) response = staff_api_client.post_graphql(query) content = get_graphql_content(response) order_data = content["data"]["orders"]["edges"][0]["node"] first_order_data_line = order_data["lines"][0] variant_id = graphene.Node.to_global_id("ProductVariant", line.variant.pk) assert first_order_data_line["thumbnail"] is None assert first_order_data_line["variant"]["id"] == variant_id assert first_order_data_line["quantity"] == line.quantity assert first_order_data_line["unitPrice"]["currency"] == line.unit_price.currency assert first_order_data_line["metadata"] == [ {"key": metadata_key, "value": metadata_value} ] assert first_order_data_line["privateMetadata"] == [ {"key": metadata_key, "value": metadata_value} ] expected_unit_price = Money( amount=str(first_order_data_line["unitPrice"]["gross"]["amount"]), currency="USD", ) assert first_order_data_line["totalPrice"]["currency"] == line.unit_price.currency assert expected_unit_price == line.unit_price.gross expected_total_price = Money( amount=str(first_order_data_line["totalPrice"]["gross"]["amount"]), currency="USD", ) assert expected_total_price == line.unit_price.gross * line.quantity allocation = line.allocations.first() allocation_id = graphene.Node.to_global_id("Allocation", allocation.pk) warehouse_id = graphene.Node.to_global_id( "Warehouse", allocation.stock.warehouse.pk ) assert first_order_data_line["allocations"] == [ { "id": allocation_id, "quantity": allocation.quantity_allocated, "warehouse": {"id": warehouse_id}, } ]
a68553e1a55e3a1bd32826cdce294d27f74175e9
15
test_order.py
595
Metadata added to checkout and order lines (#10040) * Metadata added to checkout and order lines * CHANGELOG.md update * Missing tests added
5,130
0
330
349
78
27,800
129
saleor
45
saleor/graphql/order/tests/test_order.py
Python
93
{ "docstring": "\n query OrdersQuery {\n orders(first: 1) {\n edges {\n node {\n lines {\n thumbnail(size: 540) {\n url\n }\n variant {\n id\n }\n quantity\n allocations {\n id\n quantity\n warehouse {\n id\n }\n }\n unitPrice {\n currency\n gross {\n amount\n }\n }\n totalPrice {\n currency\n gross {\n amount\n }\n }\n metadata {\n key\n value\n }\n privateMetadata {\n key\n value\n }\n }\n }\n }\n }\n }\n ", "language": "en", "n_whitespaces": 1222, "n_words": 62, "vocab_size": 26 }
https://github.com/saleor/saleor.git
1
test_login_view
def test_login_view(self): # Get login page response = self.client.get(reverse("wagtailadmin_login")) # Check that the user received a login page self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "wagtailadmin/login.html")
d10f15e55806c6944827d801cd9c2d53f5da4186
11
test_account_management.py
67
Reformat with black
15,736
0
64
37
19
71,769
22
wagtail
9
wagtail/admin/tests/test_account_management.py
Python
4
{ "docstring": "\n This tests that the login view responds with a login page\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 10 }
https://github.com/wagtail/wagtail.git
8
construct_change_message
def construct_change_message(form, formsets, add): # Evaluating `form.changed_data` prior to disabling translations is required # to avoid fields affected by localization from being included incorrectly, # e.g. where date formats differ such as MM/DD/YYYY vs DD/MM/YYYY. changed_data = form.changed_data with translation_override(None): # Deactivate translations while fetching verbose_name for form # field labels and using `field_name`, if verbose_name is not provided. # Translations will happen later on LogEntry access. changed_field_labels = _get_changed_field_labels_from_form(form, changed_data) change_message = [] if add: change_message.append({"added": {}}) elif form.changed_data: change_message.append({"changed": {"fields": changed_field_labels}}) if formsets: with translation_override(None): for formset in formsets: for added_object in formset.new_objects: change_message.append( { "added": { "name": str(added_object._meta.verbose_name), "object": str(added_object), } } ) for changed_object, changed_fields in formset.changed_objects: change_message.append( { "changed": { "name": str(changed_object._meta.verbose_name), "object": str(changed_object), "fields": _get_changed_field_labels_from_form( formset.forms[0], changed_fields ), } } ) for deleted_object in formset.deleted_objects: change_message.append( { "deleted": { "name": str(deleted_object._meta.verbose_name), "object": str(deleted_object), } } ) return change_message
9c19aff7c7561e3a82978a272ecdaad40dda5c00
23
utils.py
352
Refs #33476 -- Reformatted code with Black.
50,436
0
979
206
101
203,539
144
django
22
django/contrib/admin/utils.py
Python
43
{ "docstring": "\n Construct a JSON structure describing changes from a changed object.\n Translations are deactivated so that strings are stored untranslated.\n Translation happens later on LogEntry access.\n ", "language": "en", "n_whitespaces": 38, "n_words": 25, "vocab_size": 23 }
https://github.com/django/django.git
9
split_header_words
def split_header_words(header_values): r assert not isinstance(header_values, str) result = [] for text in header_values: orig_text = text pairs = [] while text: m = HEADER_TOKEN_RE.search(text) if m: text = unmatched(m) name = m.group(1) m = HEADER_QUOTED_VALUE_RE.search(text) if m: # quoted value text = unmatched(m) value = m.group(1) value = HEADER_ESCAPE_RE.sub(r"\1", value) else: m = HEADER_VALUE_RE.search(text) if m: # unquoted value text = unmatched(m) value = m.group(1) value = value.rstrip() else: # no value, a lone token value = None pairs.append((name, value)) elif text.lstrip().startswith(","): # concatenated headers, as per RFC 2616 section 4.2 text = text.lstrip()[1:] if pairs: result.append(pairs) pairs = [] else: # skip junk non_junk, nr_junk_chars = re.subn(r"^[=\s;]*", "", text) assert nr_junk_chars > 0, ( "split_header_words bug: '%s', '%s', %s" % (orig_text, text, pairs)) text = non_junk if pairs: result.append(pairs) return result HEADER_JOIN_ESCAPE_RE = re.compile(r"([\"\\])")
8198943edd73a363c266633e1aa5b2a9e9c9f526
19
cookiejar.py
392
add python 3.10.4 for windows
54,932
0
700
227
78
217,784
136
XX-Net
29
python3.10.4/Lib/http/cookiejar.py
Python
81
{ "docstring": "Parse header values into a list of lists containing key,value pairs.\n\n The function knows how to deal with \",\", \";\" and \"=\" as well as quoted\n values after \"=\". A list of space separated tokens are parsed as if they\n were separated by \";\".\n\n If the header_values passed as argument contains multiple values, then they\n are treated as if they were a single value separated by comma \",\".\n\n This means that this function is useful for parsing header fields that\n follow this syntax (BNF as from the HTTP/1.1 specification, but we relax\n the requirement for tokens).\n\n headers = #header\n header = (token | parameter) *( [\";\"] (token | parameter))\n\n token = 1*<any CHAR except CTLs or separators>\n separators = \"(\" | \")\" | \"<\" | \">\" | \"@\"\n | \",\" | \";\" | \":\" | \"\\\" | <\">\n | \"/\" | \"[\" | \"]\" | \"?\" | \"=\"\n | \"{\" | \"}\" | SP | HT\n\n quoted-string = ( <\"> *(qdtext | quoted-pair ) <\"> )\n qdtext = <any TEXT except <\">>\n quoted-pair = \"\\\" CHAR\n\n parameter = attribute \"=\" value\n attribute = token\n value = token | quoted-string\n\n Each header is represented by a list of key/value pairs. The value for a\n simple token (not part of a parameter) is None. Syntactically incorrect\n headers will not necessarily be parsed as you would want.\n\n This is easier to describe with some examples:\n\n >>> split_header_words(['foo=\"bar\"; port=\"80,81\"; discard, bar=baz'])\n [[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]]\n >>> split_header_words(['text/html; charset=\"iso-8859-1\"'])\n [[('text/html', None), ('charset', 'iso-8859-1')]]\n >>> split_header_words([r'Basic realm=\"\\\"foo\\bar\\\"\"'])\n [[('Basic', None), ('realm', '\"foobar\"')]]\n\n ", "language": "en", "n_whitespaces": 527, "n_words": 259, "vocab_size": 161 }
https://github.com/XX-net/XX-Net.git
2
reparse
def reparse(self) -> None: # Do this in a fresh Stylesheet so if there are errors we don't break self. stylesheet = Stylesheet(variables=self.variables) for css, path in self.source: stylesheet.parse(css, path=path) self._clone(stylesheet)
e8636d0d86e596690647564b84a68d5e6d107dd0
10
stylesheet.py
71
css reparse
43,963
0
77
43
30
182,783
31
textual
10
src/textual/css/stylesheet.py
Python
12
{ "docstring": "Re-parse source, applying new variables.\n\n Raises:\n StylesheetError: If the CSS could not be read.\n StylesheetParseError: If the CSS is invalid.\n\n ", "language": "en", "n_whitespaces": 56, "n_words": 20, "vocab_size": 17 }
https://github.com/Textualize/textual.git
3
_get_feature_encoder_or_decoder
def _get_feature_encoder_or_decoder(feature): if DECODER in feature: return feature[DECODER] elif ENCODER in feature: return feature[ENCODER] else: feature[ENCODER] = {} return feature[ENCODER]
60197fe851aadfa51d18c16dd42b49f728ed7eaa
10
dataset_synthesizer.py
65
Adds registry to organize backward compatibility updates around versions and config sections (#2335) * First pass implementation of VersionTransformation * Remove test main. * Refactors backward_compatibility.py to use version registration system * Changed sort order to process outer first. * Moves test_deprecated_field_aliases from test_defaults.py to test_backward_compatibility.py * s/prefix/prefixes in test_version_transformation.py * Removes comment, print statements. * Adds docstrings. * typo fix. * Removes unused import. * Small cleanup to backward_compatibility.py, removed redundant keys. * Assume version 0.4 if no version present in the config. * Updates dataset synthesis to work with nested encoder/decoders. * Fixes test_server.py * nesting image feature params in test_ray * _get_feature_encoder_or_decoder in generate_category. * oops, forgot random.choice. Co-authored-by: Daniel Treiman <daniel@predibase.com>
1,274
0
60
40
14
7,812
20
ludwig
4
ludwig/data/dataset_synthesizer.py
Python
8
{ "docstring": "Returns the nested decoder or encoder dictionary for a feature.\n\n If neither encoder nor decoder is present, creates an empty encoder dict and returns it.\n ", "language": "en", "n_whitespaces": 31, "n_words": 25, "vocab_size": 22 }
https://github.com/ludwig-ai/ludwig.git
1
test_ContinuousSelector_4
def test_ContinuousSelector_4(): cs = ContinuousSelector() assert_raises(ValueError, cs.transform, iris_data[0:10,:])
388616b6247ca4ea8de4e2f340d6206aee523541
9
feature_transformers_tests.py
45
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
43,430
0
17
27
8
181,642
8
tpot
7
tests/feature_transformers_tests.py
Python
3
{ "docstring": "Assert that ContinuousSelector rasies ValueError without categorical features.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/EpistasisLab/tpot.git
6
dpm_solver_first_update
def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False): ns = self.noise_schedule dims = x.dim() lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) h = lambda_t - lambda_s log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t) sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t) alpha_t = torch.exp(log_alpha_t) if self.predict_x0: phi_1 = torch.expm1(-h) if model_s is None: model_s = self.model_fn(x, s) x_t = ( expand_dims(sigma_t / sigma_s, dims) * x - expand_dims(alpha_t * phi_1, dims) * model_s ) if return_intermediate: return x_t, {'model_s': model_s} else: return x_t else: phi_1 = torch.expm1(h) if model_s is None: model_s = self.model_fn(x, s) x_t = ( expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x - expand_dims(sigma_t * phi_1, dims) * model_s ) if return_intermediate: return x_t, {'model_s': model_s} else: return x_t
ca86da3a30c4e080d4db8c25fca73de843663cb4
17
dpm_solver.py
371
release more models
36,909
0
481
235
57
157,369
113
stablediffusion
30
ldm/models/diffusion/dpm_solver/dpm_solver.py
Python
32
{ "docstring": "\n DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`.\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (x.shape[0],).\n t: A pytorch tensor. The ending time, with the shape (x.shape[0],).\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s`.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n ", "language": "en", "n_whitespaces": 205, "n_words": 95, "vocab_size": 54 }
https://github.com/Stability-AI/stablediffusion.git
3
_asarray_with_order
def _asarray_with_order(array, dtype=None, order=None, copy=None, xp=None): if xp is None: xp, _ = get_namespace(array) if xp.__name__ in {"numpy", "numpy.array_api"}: # Use NumPy API to support order array = numpy.asarray(array, order=order, dtype=dtype) return xp.asarray(array, copy=copy) else: return xp.asarray(array, dtype=dtype, copy=copy)
2710a9e7eefd2088ce35fd2fb6651d5f97e5ef8b
11
_array_api.py
139
ENH Adds Array API support to LinearDiscriminantAnalysis (#22554) Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org> Co-authored-by: Julien Jerphanion <git@jjerphan.xyz>
76,628
0
86
90
34
261,023
39
scikit-learn
11
sklearn/utils/_array_api.py
Python
8
{ "docstring": "Helper to support the order kwarg only for NumPy-backed arrays\n\n Memory layout parameter `order` is not exposed in the Array API standard,\n however some input validation code in scikit-learn needs to work both\n for classes and functions that will leverage Array API only operations\n and for code that inherently relies on NumPy backed data containers with\n specific memory layout constraints (e.g. our own Cython code). The\n purpose of this helper is to make it possible to share code for data\n container validation without memory copies for both downstream use cases:\n the `order` parameter is only enforced if the input array implementation\n is NumPy based, otherwise `order` is just silently ignored.\n ", "language": "en", "n_whitespaces": 140, "n_words": 110, "vocab_size": 77 }
https://github.com/scikit-learn/scikit-learn.git
1
test_bound_blocks_are_available_on_template
def test_bound_blocks_are_available_on_template(self): block = SectionBlock() value = block.to_python({"title": "Hello", "body": "<i>italic</i> world"}) result = block.render(value) self.assertEqual(result, )
d10f15e55806c6944827d801cd9c2d53f5da4186
11
test_blocks.py
81
Reformat with black
16,226
0
52
43
15
74,164
17
wagtail
9
wagtail/core/tests/test_blocks.py
Python
5
{ "docstring": "\n Test that we are able to use value.bound_blocks within templates\n to access a child block's own HTML rendering\n <h1>Hello</h1><i>italic</i> world", "language": "en", "n_whitespaces": 41, "n_words": 20, "vocab_size": 19 }
https://github.com/wagtail/wagtail.git
2
_genName
def _genName(cls, name): if not name: name = "frame_" + str(uuid.uuid4()).replace("-", "") # TODO: reword name in case of caller's mistake return name
1c0935c1bc0856d43f69c1e32498636ee24ebc85
15
base_worker.py
62
FEAT-#4913: Enabling pyhdk (#4900) Co-authored-by: ienkovich <ilya.enkovich@intel.com> Signed-off-by: izamyati <igor.zamyatin@intel.com>
35,956
0
62
33
21
154,395
23
modin
7
modin/experimental/core/execution/native/implementations/omnisci_on_native/base_worker.py
Python
4
{ "docstring": "\n Generate or mangle a table name.\n\n Parameters\n ----------\n name : str or None\n Table name to mangle or None to generate a unique\n table name.\n\n Returns\n -------\n str\n Table name.\n ", "language": "en", "n_whitespaces": 120, "n_words": 30, "vocab_size": 18 }
https://github.com/modin-project/modin.git
11
sort_graph_by_row_values
def sort_graph_by_row_values(graph, copy=False, warn_when_not_sorted=True): if not issparse(graph): raise TypeError(f"Input graph must be a sparse matrix, got {graph!r} instead.") if graph.format == "csr" and _is_sorted_by_data(graph): return graph if warn_when_not_sorted: warnings.warn( "Precomputed sparse input was not sorted by row values. Use the function" " sklearn.neighbors.sort_graph_by_row_values to sort the input by row" " values, with warn_when_not_sorted=False to remove this warning.", EfficiencyWarning, ) if graph.format not in ("csr", "csc", "coo", "lil"): raise TypeError( f"Sparse matrix in {graph.format!r} format is not supported due to " "its handling of explicit zeros" ) elif graph.format != "csr": if not copy: raise ValueError( "The input graph is not in CSR format. Use copy=True to allow " "the conversion to CSR format." ) graph = graph.asformat("csr") elif copy: # csr format with copy=True graph = graph.copy() row_nnz = np.diff(graph.indptr) if row_nnz.max() == row_nnz.min(): # if each sample has the same number of provided neighbors n_samples = graph.shape[0] distances = graph.data.reshape(n_samples, -1) order = np.argsort(distances, kind="mergesort") order += np.arange(n_samples)[:, None] * row_nnz[0] order = order.ravel() graph.data = graph.data[order] graph.indices = graph.indices[order] else: for start, stop in zip(graph.indptr, graph.indptr[1:]): order = np.argsort(graph.data[start:stop], kind="mergesort") graph.data[start:stop] = graph.data[start:stop][order] graph.indices[start:stop] = graph.indices[start:stop][order] return graph
b94bc5ea6821607d1e9826ce2d084c76379820ba
15
_base.py
503
ENH add new function sort_graph_by_row_values (#23139) Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com> Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com>
75,959
0
501
297
125
259,865
190
scikit-learn
33
sklearn/neighbors/_base.py
Python
41
{ "docstring": "Sort a sparse graph such that each row is stored with increasing values.\n\n .. versionadded:: 1.2\n\n Parameters\n ----------\n graph : sparse matrix of shape (n_samples, n_samples)\n Distance matrix to other samples, where only non-zero elements are\n considered neighbors. Matrix is converted to CSR format if not already.\n\n copy : bool, default=False\n If True, the graph is copied before sorting. If False, the sorting is\n performed inplace. If the graph is not of CSR format, `copy` must be\n True to allow the conversion to CSR format, otherwise an error is\n raised.\n\n warn_when_not_sorted : bool, default=True\n If True, a :class:`~sklearn.exceptions.EfficiencyWarning` is raised\n when the input graph is not sorted by row values.\n\n Returns\n -------\n graph : sparse matrix of shape (n_samples, n_samples)\n Distance matrix to other samples, where only non-zero elements are\n considered neighbors. Matrix is in CSR format.\n ", "language": "en", "n_whitespaces": 237, "n_words": 137, "vocab_size": 78 }
https://github.com/scikit-learn/scikit-learn.git
1
add_and_show_greeks
def add_and_show_greeks(price, implied_volatility, strike, days, side): # Add in hedge option delta, gamma, vega = hedge_model.add_hedge_option( price, implied_volatility, strike, days, side ) # Show the added delta, gamma and vega positions. Next to that, also show the inputted # implied volatility and strike positions = pd.DataFrame( [delta, gamma, vega, implied_volatility, strike], index=["Delta", "Gamma", "Vega", "Implied Volatility", "Strike Price"], columns=["Positions"], ) # Show table print_rich_table(positions, show_index=True, headers=list(positions.columns)) console.print() return delta, gamma, vega
54a1b6f545a0016c576e9e00eef5c003d229dacf
11
hedge_view.py
154
Feature/hedge (#1768) * [Bug] Incorrect log for reddit keys. #1733 fix * Create new feature-hedge * Significantly improve code of hedge menu * More robust * Robustness * Fix tests * Fix can't multiply sequence by non-int of type 'numpy.float64' error * Temporary fix of singular matrix error. Return first feasible solution * Update Hugo Documentation * Combining menus and cleaning up code * Tidy up call_exp * Update tests Round 1 * Update tests Round 2 * Fix linting error * Fix linting? * Fixed glitch Co-authored-by: JerBouma <jer.bouma@gmail.com> Co-authored-by: James Maslek <jmaslek11@gmail.com> Co-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com> Co-authored-by: colin99d <colin99delahunty@gmail.com> Co-authored-by: didierlopes.eth <dro.lopes@campus.fct.unl.pt>
84,766
0
135
101
53
284,500
71
OpenBBTerminal
22
openbb_terminal/stocks/options/hedge/hedge_view.py
Python
12
{ "docstring": "Determine the delta, gamma and vega value of the portfolio and/or options and show them.\n\n Parameters\n ----------\n price: int\n The price.\n implied_volatility: float\n The implied volatility.\n strike: float\n The strike price.\n days: float\n The amount of days until expiration. Use annual notation thus a month would be 30 / 360.\n sign: int\n Whether you have a long (1) or short (-1) position\n\n Returns\n -------\n delta: float\n gamma: float\n vega: float\n ", "language": "en", "n_whitespaces": 144, "n_words": 70, "vocab_size": 56 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
1
test_acknowledged_new
def test_acknowledged_new(self) -> None: expected_topic = "Test policy name (1234)" expected_message = .strip() self.check_webhook( "incident_acknowledged_new", expected_topic, expected_message, content_type="application/json", )
bfd9fc86223c2446e8b38d2cdd5876caed50bfda
9
tests.py
57
integration: Fix integration with newrelic. Newrelic updated the payload that's sent via the webhook incoming call causing a bug in the newrelic webhook endpoint. This fixes the bug by updating the endpoint to respect the new format of the payload as well as the old format. This should be updated once the old format is EOLed. Fixes #22338.
17,831
0
90
32
18
84,412
19
zulip
7
zerver/webhooks/newrelic/tests.py
Python
11
{ "docstring": "\n[Incident](https://alerts.newrelic.com/accounts/2941966/incidents/1234) **acknowledged** by **Alice** for condition: **Server Down**\n", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/zulip/zulip.git
2
_get_vhost_block
def _get_vhost_block(self, vhost): try: span_val = self.parser.aug.span(vhost.path) except ValueError: logger.critical("Error while reading the VirtualHost %s from " "file %s", vhost.name, vhost.filep, exc_info=True) raise errors.PluginError("Unable to read VirtualHost from file") span_filep = span_val[0] span_start = span_val[5] span_end = span_val[6] with open(span_filep, 'r') as fh: fh.seek(span_start) vh_contents = fh.read(span_end-span_start).split("\n") self._remove_closing_vhost_tag(vh_contents) return vh_contents
eeca208c8f57304590ac1af80b496e61021aaa45
13
configurator.py
187
Various clean-ups in certbot-apache. Use f-strings. (#9132) * Various clean-ups in certbot-apache. Use f-strings. * Smaller tweaks
45,465
0
195
110
43
186,369
50
certbot
26
certbot-apache/certbot_apache/_internal/configurator.py
Python
15
{ "docstring": " Helper method to get VirtualHost contents from the original file.\n This is done with help of augeas span, which returns the span start and\n end positions\n\n :returns: `list` of VirtualHost block content lines without closing tag\n ", "language": "en", "n_whitespaces": 65, "n_words": 36, "vocab_size": 33 }
https://github.com/certbot/certbot.git
3
__call__
def __call__(self, string): texts = [] floats = [] for i, part in enumerate(self._FLOAT_RE.split(string)): if i % 2 == 0: texts.append(part) else: floats.append(float(part)) return texts, np.array(floats)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
14
keras_doctest_lib.py
109
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,626
0
113
66
24
276,313
26
keras
14
keras/testing_infra/keras_doctest_lib.py
Python
9
{ "docstring": "Extracts floats from a string.\n\n >>> text_parts, floats = _FloatExtractor()(\"Text 1.0 Text\")\n >>> text_parts\n ['Text ', ' Text']\n >>> floats\n array([1.])\n\n Args:\n string: the string to extract floats from.\n\n Returns:\n A (string, array) pair, where `string` has each float replaced by \"...\"\n and `array` is a `float32` `numpy.array` containing the extracted floats.\n ", "language": "en", "n_whitespaces": 135, "n_words": 52, "vocab_size": 45 }
https://github.com/keras-team/keras.git
7
parse_boundary_stream
def parse_boundary_stream(stream, max_header_size): # Stream at beginning of header, look for end of header # and parse it if found. The header must fit within one # chunk. chunk = stream.read(max_header_size) # 'find' returns the top of these four bytes, so we'll # need to munch them later to prevent them from polluting # the payload. header_end = chunk.find(b"\r\n\r\n")
9c19aff7c7561e3a82978a272ecdaad40dda5c00
9
multipartparser.py
53
Refs #33476 -- Reformatted code with Black.
51,350
0
86
149
47
206,065
59
django
7
django/http/multipartparser.py
Python
24
{ "docstring": "\n Parse one and exactly one stream that encapsulates a boundary.\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 9 }
https://github.com/django/django.git
12
slice_with_int_dask_array
def slice_with_int_dask_array(x, index): from dask.array.core import Array assert len(index) == x.ndim fancy_indexes = [ isinstance(idx, (tuple, list)) or (isinstance(idx, (np.ndarray, Array)) and idx.ndim > 0) for idx in index ] if sum(fancy_indexes) > 1: raise NotImplementedError("Don't yet support nd fancy indexing") out_index = [] dropped_axis_cnt = 0 for in_axis, idx in enumerate(index): out_axis = in_axis - dropped_axis_cnt if isinstance(idx, Array) and idx.dtype.kind in "iu": if idx.ndim == 0: idx = idx[np.newaxis] x = slice_with_int_dask_array_on_axis(x, idx, out_axis) x = x[tuple(0 if i == out_axis else slice(None) for i in range(x.ndim))] dropped_axis_cnt += 1 elif idx.ndim == 1: x = slice_with_int_dask_array_on_axis(x, idx, out_axis) out_index.append(slice(None)) else: raise NotImplementedError( "Slicing with dask.array of ints only permitted when " "the indexer has zero or one dimensions" ) else: out_index.append(idx) return x, tuple(out_index)
cccb9d8d8e33a891396b1275c2448c352ef40c27
19
slicing.py
344
absolufy-imports - No relative - PEP8 (#8796) Conversation in https://github.com/dask/distributed/issues/5889
36,524
0
408
219
89
156,059
127
dask
31
dask/array/slicing.py
Python
31
{ "docstring": "Slice x with at most one 1D dask arrays of ints.\n\n This is a helper function of :meth:`Array.__getitem__`.\n\n Parameters\n ----------\n x: Array\n index: tuple with as many elements as x.ndim, among which there are\n one or more Array's with dtype=int\n\n Returns\n -------\n tuple of (sliced x, new index)\n\n where the new index is the same as the input, but with slice(None)\n replaced to the original slicer where a 1D filter has been applied and\n one less element where a zero-dimensional filter has been applied.\n ", "language": "en", "n_whitespaces": 130, "n_words": 84, "vocab_size": 61 }
https://github.com/dask/dask.git
1
get_edit_upload_response_data
def get_edit_upload_response_data(self): return { "success": True, self.context_upload_id_name: self.upload_object.id, "form": render_to_string( self.edit_form_template_name, self.get_edit_upload_form_context_data(), request=self.request, ), }
d10f15e55806c6944827d801cd9c2d53f5da4186
11
multiple_upload.py
72
Reformat with black
15,889
0
125
45
15
72,418
15
wagtail
9
wagtail/admin/views/generic/multiple_upload.py
Python
10
{ "docstring": "\n Return the JSON response data for an object that has been uploaded to an\n upload object and now needs extra metadata to become a final object\n ", "language": "en", "n_whitespaces": 48, "n_words": 26, "vocab_size": 22 }
https://github.com/wagtail/wagtail.git
1
peek
def peek(self, n=0): self._check_can_read() # Relies on the undocumented fact that BufferedReader.peek() # always returns at least one byte (except at EOF), independent # of the value of n return self._buffer.peek(n)
8198943edd73a363c266633e1aa5b2a9e9c9f526
8
bz2.py
44
add python 3.10.4 for windows
56,263
0
73
24
26
221,193
31
XX-Net
5
python3.10.4/Lib/bz2.py
Python
3
{ "docstring": "Return buffered data without advancing the file position.\n\n Always returns at least one byte of data, unless at EOF.\n The exact number of bytes returned is unspecified.\n ", "language": "en", "n_whitespaces": 48, "n_words": 27, "vocab_size": 25 }
https://github.com/XX-net/XX-Net.git
3
_get_boot_time_aix
def _get_boot_time_aix(): res = __salt__["cmd.run_all"]("ps -o etime= -p 1") if res["retcode"] > 0: raise CommandExecutionError("Unable to find boot_time for pid 1.") bt_time = res["stdout"] match = re.match(r"\s*(?:(\d+)-)?(?:(\d\d):)?(\d\d):(\d\d)\s*", bt_time) if not match: raise CommandExecutionError("Unexpected time format.") groups = match.groups(default="00") boot_secs = ( _number(groups[0]) * 86400 + _number(groups[1]) * 3600 + _number(groups[2]) * 60 + _number(groups[3]) ) return boot_secs
7fabb22f3e361bfff3fdbca01c12659591d15109
14
status.py
182
Fix uptime on AIX systems when less than 24 hours
54,364
0
129
106
46
216,058
57
salt
11
salt/modules/status.py
Python
16
{ "docstring": "\n Return the number of seconds since boot time on AIX\n\n t=$(LC_ALL=POSIX ps -o etime= -p 1)\n d=0 h=0\n case $t in *-*) d=${t%%-*}; t=${t#*-};; esac\n case $t in *:*:*) h=${t%%:*}; t=${t#*:};; esac\n s=$((d*86400 + h*3600 + ${t%%:*}*60 + ${t#*:}))\n ", "language": "en", "n_whitespaces": 61, "n_words": 39, "vocab_size": 33 }
https://github.com/saltstack/salt.git
3
_matches_get_other_nodes
def _matches_get_other_nodes(dictionary, nodes, node_ind): ind_node = nodes[node_ind] return [ind for ind in dictionary if nodes[ind] == ind_node]
9d58006fc0a23afcba38f641c9472917c436428a
9
mul.py
47
Code cleanup
48,956
0
38
31
17
198,477
17
sympy
6
sympy/core/mul.py
Python
3
{ "docstring": "Find other wildcards that may have already been matched.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/sympy/sympy.git
1
test_uncancellable_disconnect
def test_uncancellable_disconnect(self) -> None: channel = make_request( self.reactor, self.site, "POST", "/sleep", await_result=False ) self._test_disconnect( self.reactor, channel, expect_cancellation=False, expected_body={"result": True}, )
dffecade7df8a88caced2a7707c51e2de3407c0d
11
test_server.py
81
Respect the `@cancellable` flag for `DirectServe{Html,Json}Resource`s (#12698) `DirectServeHtmlResource` and `DirectServeJsonResource` both inherit from `_AsyncResource`. These classes expect to be subclassed with `_async_render_*` methods. This commit has no effect on `JsonResource`, despite inheriting from `_AsyncResource`. `JsonResource` has its own `_async_render` override which will need to be updated separately. Signed-off-by: Sean Quah <seanq@element.io>
72,162
0
110
51
18
248,229
20
synapse
10
tests/test_server.py
Python
11
{ "docstring": "Test that handlers without the `@cancellable` flag cannot be cancelled.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/matrix-org/synapse.git
1
test_del_store_not_found
def test_del_store_not_found(certutil, cert_file): with pytest.raises(salt.exceptions.CommandExecutionError) as exc: certutil.del_store( source=str(cert_file.parent / "absent.cer"), store="TrustedPublisher", ) assert "cert_file not found" in exc.value.message
a8d2d1e1397cdc79b2c5f1ad7f6e3b729dcf8857
14
test_win_certutil.py
88
Add tests, fix state module
54,234
0
64
50
19
215,899
19
salt
16
tests/pytests/functional/modules/test_win_certutil.py
Python
7
{ "docstring": "\n Test del_store with a missing certificate\n ", "language": "en", "n_whitespaces": 13, "n_words": 6, "vocab_size": 6 }
https://github.com/saltstack/salt.git
2
__next__
def __next__(self): line = self.readline() if line: return line raise StopIteration
8198943edd73a363c266633e1aa5b2a9e9c9f526
8
codecs.py
36
add python 3.10.4 for windows
56,375
0
50
20
10
221,360
11
XX-Net
5
python3.10.4/Lib/codecs.py
Python
5
{ "docstring": " Return the next decoded line from the input stream.", "language": "en", "n_whitespaces": 9, "n_words": 9, "vocab_size": 8 }
https://github.com/XX-net/XX-Net.git
1
datetime_cast_date_sql
def datetime_cast_date_sql(self, field_name, tzname): raise NotImplementedError( "subclasses of BaseDatabaseOperations may require a " "datetime_cast_date_sql() method." )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
9
operations.py
31
Refs #33476 -- Reformatted code with Black.
50,949
0
59
16
16
204,876
16
django
5
django/db/backends/base/operations.py
Python
5
{ "docstring": "\n Return the SQL to cast a datetime value to date value.\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 10 }
https://github.com/django/django.git
1
test_empty
def test_empty(self) -> None: id_gen = self._create_id_generator() # The table is empty so we expect an empty map for positions self.assertEqual(id_gen.get_positions(), {})
9d21ecf7ceab55bc19c4457b8b07401b0b1623a7
9
test_id_generators.py
50
Add type hints to tests files. (#12256)
71,933
0
50
28
21
247,800
22
synapse
6
tests/storage/test_id_generators.py
Python
6
{ "docstring": "Test an ID generator against an empty database gives sensible\n current positions.\n ", "language": "en", "n_whitespaces": 26, "n_words": 12, "vocab_size": 11 }
https://github.com/matrix-org/synapse.git
3
nodata_value
def nodata_value(self, value): if value is None: capi.delete_band_nodata_value(self._ptr) elif not isinstance(value, (int, float)): raise ValueError("Nodata value must be numeric or None.") else: capi.set_band_nodata_value(self._ptr, value) self._flush()
9c19aff7c7561e3a82978a272ecdaad40dda5c00
11
band.py
93
Refs #33476 -- Reformatted code with Black.
50,605
0
93
56
24
204,001
25
django
12
django/contrib/gis/gdal/raster/band.py
Python
8
{ "docstring": "\n Set the nodata value for this band.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
https://github.com/django/django.git
1
test_has_module_permission
def test_has_module_permission(self): self.client.force_login(self.superuser) response = self.client.get(self.index_url) self.assertContains(response, "admin_views") self.assertContains(response, "Articles") self.client.logout() self.client.force_login(self.viewuser) response = self.client.get(self.index_url) self.assertContains(response, "admin_views") self.assertContains(response, "Articles") self.client.logout() self.client.force_login(self.adduser) response = self.client.get(self.index_url) self.assertContains(response, "admin_views") self.assertContains(response, "Articles") self.client.logout() self.client.force_login(self.changeuser) response = self.client.get(self.index_url) self.assertContains(response, "admin_views") self.assertContains(response, "Articles") self.client.logout() self.client.force_login(self.deleteuser) response = self.client.get(self.index_url) self.assertContains(response, "admin_views") self.assertContains(response, "Articles")
9c19aff7c7561e3a82978a272ecdaad40dda5c00
9
tests.py
376
Refs #33476 -- Reformatted code with Black.
52,077
0
221
224
14
207,737
46
django
14
tests/admin_views/tests.py
Python
25
{ "docstring": "\n has_module_permission() returns True for all users who\n have any permission for that module (add, change, or delete), so that\n the module is displayed on the admin index page.\n ", "language": "en", "n_whitespaces": 57, "n_words": 28, "vocab_size": 24 }
https://github.com/django/django.git
2
_supports_universal_builds
def _supports_universal_builds(): # As an approximation, we assume that if we are running on 10.4 or above, # then we are running with an Xcode environment that supports universal # builds, in particular -isysroot and -arch arguments to the compiler. This # is in support of allowing 10.4 universal builds to run on 10.3.x systems. osx_version = _get_system_version_tuple() return bool(osx_version >= (10, 4)) if osx_version else False
8198943edd73a363c266633e1aa5b2a9e9c9f526
10
_osx_support.py
46
add python 3.10.4 for windows
55,636
0
88
25
51
219,598
67
XX-Net
4
python3.10.4/Lib/_osx_support.py
Python
3
{ "docstring": "Returns True if universal builds are supported on this system", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/XX-net/XX-Net.git
1
mock_smile_adam_2
def mock_smile_adam_2() -> Generator[None, MagicMock, None]: chosen_env = "m_adam_heating" with patch( "homeassistant.components.plugwise.gateway.Smile", autospec=True ) as smile_mock: smile = smile_mock.return_value smile.gateway_id = "da224107914542988a88561b4452b0f6" smile.heater_id = "056ee145a816487eaa69243c3280f8bf" smile.smile_version = "3.6.4" smile.smile_type = "thermostat" smile.smile_hostname = "smile98765" smile.smile_name = "Adam" smile.connect.return_value = True smile.notifications = _read_json(chosen_env, "notifications") smile.async_update.return_value = _read_json(chosen_env, "all_data") yield smile @pytest.fixture
2667f0b792b1f936aeb5958cc40d5dee26350bf6
@pytest.fixture
11
conftest.py
180
Bump plugwise to v0.21.3, add related new features (#76610) Co-authored-by: Franck Nijhof <frenck@frenck.nl>
87,135
1
146
95
39
287,952
51
core
21
tests/components/plugwise/conftest.py
Python
17
{ "docstring": "Create a 2nd Mock Adam environment for testing exceptions.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/home-assistant/core.git
1
get_tables
def get_tables(self) -> StatusResponse: query = result = self.native_query(query) df = result.data_frame df = df[['TABLE_NAME' 'TABLE_TYPE']] result.data_frame = df.rename(columns={'TABLE_NAME': 'table_name', 'TABLE_TYPE': 'table_type'}) return result
9a0e918bba3439959112a7fd8e5210276b5ac255
12
druid_handler.py
103
implemented the get_tables() and get_columns() methods
25,698
0
74
55
17
116,214
24
mindsdb
10
mindsdb/integrations/handlers/druid_handler/druid_handler.py
Python
15
{ "docstring": "\n Return list of entities that will be accessible as tables.\n Returns:\n HandlerResponse\n \n SELECT *\n FROM INFORMATION_SCHEMA.TABLES\n ", "language": "en", "n_whitespaces": 79, "n_words": 16, "vocab_size": 16 }
https://github.com/mindsdb/mindsdb.git
4
_add_unique_metric_name
def _add_unique_metric_name(self, metric_name, metric_fn, output_index): # For multi-output models, prepend the output names to the metric name. if len(self.output_names) > 1: # If we're loading from an already-serialized model, we've already # prepended the output name, and we don't want to do it again. # # Alternatively, we may be receiving a stateless metric (e.g. the # string "accuracy") rather than a `Metric` object, in which case we # want to prepend the output name even if we are loading a # serialized model. if not getattr(metric_fn, "_from_serialized", False): metric_name = "%s_%s" % ( self.output_names[output_index], metric_name, ) j = 1 base_metric_name = metric_name while metric_name in self.metrics_names: metric_name = "%s_%d" % (base_metric_name, j) j += 1 return metric_name
fa6d9107a498f7c2403ff28c7b389a1a0c5cc083
14
training_v1.py
128
reduct too long lines
82,051
0
345
75
80
277,470
118
keras
11
keras/engine/training_v1.py
Python
13
{ "docstring": "Makes the metric name unique.\n\n If there are multiple outputs for which the metrics are calculated,\n the metric names have to be made unique by appending an integer.\n\n Args:\n metric_name: Metric name that corresponds to the metric specified by\n the user. For example: 'acc'.\n metric_fn: The Metric object.\n output_index: The index of the model output for which the metric name\n is being added.\n\n Returns:\n string, name of the model's unique metric name\n ", "language": "en", "n_whitespaces": 169, "n_words": 72, "vocab_size": 48 }
https://github.com/keras-team/keras.git
1
test_statistics_duplicated
def test_statistics_duplicated(hass_recorder, caplog): hass = hass_recorder() recorder = hass.data[DATA_INSTANCE] setup_component(hass, "sensor", {}) zero, four, states = record_states(hass) hist = history.get_significant_states(hass, zero, four) assert dict(states) == dict(hist) wait_recording_done(hass) assert "Compiling statistics for" not in caplog.text assert "Statistics already compiled" not in caplog.text with patch( "homeassistant.components.sensor.recorder.compile_statistics", return_value=statistics.PlatformCompiledStatistics([], {}), ) as compile_statistics: recorder.do_adhoc_statistics(start=zero) wait_recording_done(hass) assert compile_statistics.called compile_statistics.reset_mock() assert "Compiling statistics for" in caplog.text assert "Statistics already compiled" not in caplog.text caplog.clear() recorder.do_adhoc_statistics(start=zero) wait_recording_done(hass) assert not compile_statistics.called compile_statistics.reset_mock() assert "Compiling statistics for" not in caplog.text assert "Statistics already compiled" in caplog.text caplog.clear()
3737b58e85d834728fe27fefa05e8a0526e66d12
14
test_statistics.py
304
Avoid fetching metadata multiple times during stat compile (#70397)
97,291
0
236
181
44
298,347
88
core
28
tests/components/recorder/test_statistics.py
Python
28
{ "docstring": "Test statistics with same start time is not compiled.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/home-assistant/core.git
2
test_push
def test_push(self): filename = "/saltines/test.file" if salt.utils.platform.is_windows(): filename = "C:\\saltines\\test.file" with patch( "salt.modules.cp.os.path", MagicMock(isfile=Mock(return_value=True), wraps=cp.os.path), ), patch( "salt.modules.cp.os.path", MagicMock(getsize=MagicMock(return_value=10), wraps=cp.os.path), ), patch.multiple( "salt.modules.cp", _auth=MagicMock(**{"return_value.gen_token.return_value": "token"}), __opts__={"id": "abc", "file_buffer_size": 10}, ), patch( "salt.utils.files.fopen", mock_open(read_data=b"content") ) as m_open, patch( "salt.channel.client.ReqChannel.factory", MagicMock() ) as req_channel_factory_mock: response = cp.push(filename) assert response, response num_opens = len(m_open.filehandles[filename]) assert num_opens == 1, num_opens fh_ = m_open.filehandles[filename][0] assert fh_.read.call_count == 2, fh_.read.call_count req_channel_factory_mock().__enter__().send.assert_called_once_with( dict( loc=fh_.tell(), # pylint: disable=resource-leakage cmd="_file_recv", tok="token", path=["saltines", "test.file"], size=10, data=b"", # data is empty here because load['data'] is overwritten id="abc", ) )
68ab9eeae6899b7ff14fb3489b012862d62653c6
16
test_cp.py
403
Fix more tests
54,034
0
514
241
63
215,571
88
salt
43
tests/unit/modules/test_cp.py
Python
36
{ "docstring": "\n Test if push works with good posix path.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
https://github.com/saltstack/salt.git
23
in1d
def in1d(ar1, ar2, assume_unique=False, invert=False, kind=None): # Ravel both arrays, behavior for the first array could be different ar1 = np.asarray(ar1).ravel() ar2 = np.asarray(ar2).ravel() # Ensure that iteration through object arrays yields size-1 arrays if ar2.dtype == object: ar2 = ar2.reshape(-1, 1) # Convert booleans to uint8 so we can use the fast integer algorithm if ar1.dtype == bool: ar1 = ar1.view(np.uint8) if ar2.dtype == bool: ar2 = ar2.view(np.uint8) # Check if we can use a fast integer algorithm: integer_arrays = (np.issubdtype(ar1.dtype, np.integer) and np.issubdtype(ar2.dtype, np.integer)) if kind not in {None, 'sort', 'dictionary'}: raise ValueError( "Invalid kind: {0}. ".format(kind) + "Please use None, 'sort' or 'dictionary'.") if integer_arrays and kind in {None, 'dictionary'}: ar2_min = np.min(ar2) ar2_max = np.max(ar2) ar1_size = ar1.size ar2_size = ar2.size # Check for integer overflow with np.errstate(over='raise'): try: ar2_range = ar2_max - ar2_min # Optimal performance is for approximately # log10(size) > (log10(range) - 2.27) / 0.927. # However, here we set the requirement that # the intermediate array can only be 6x # the combined memory allocation of the original # arrays. # (see discussion on # https://github.com/numpy/numpy/pull/12065) below_memory_constraint = ( ar2_range <= 6 * (ar1_size + ar2_size) ) except FloatingPointError: below_memory_constraint = False # Use the fast integer algorithm if below_memory_constraint or kind == 'dictionary': if invert: outgoing_array = np.ones_like(ar1, dtype=bool) else: outgoing_array = np.zeros_like(ar1, dtype=bool) # Make elements 1 where the integer exists in ar2 if invert: isin_helper_ar = np.ones(ar2_range + 1, dtype=bool) isin_helper_ar[ar2 - ar2_min] = 0 else: isin_helper_ar = np.zeros(ar2_range + 1, dtype=bool) isin_helper_ar[ar2 - ar2_min] = 1 # Mask out elements we know won't work basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min) outgoing_array[basic_mask] = isin_helper_ar[ar1[basic_mask] - ar2_min] return outgoing_array elif kind == 'dictionary': raise ValueError( "The 'dictionary' method is only " "supported for boolean or integer arrays. " "Please select 'sort' or None for kind." ) # Check if one of the arrays may contain arbitrary objects contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject # This code is run when # a) the first condition is true, making the code significantly faster # b) the second condition is true (i.e. `ar1` or `ar2` may contain # arbitrary objects), since then sorting is not guaranteed to work if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object: if invert: mask = np.ones(len(ar1), dtype=bool) for a in ar2: mask &= (ar1 != a) else: mask = np.zeros(len(ar1), dtype=bool) for a in ar2: mask |= (ar1 == a) return mask # Otherwise use sorting if not assume_unique: ar1, rev_idx = np.unique(ar1, return_inverse=True) ar2 = np.unique(ar2) ar = np.concatenate((ar1, ar2)) # We need this to be a stable sort, so always use 'mergesort' # here. The values from the first array should always come before # the values from the second array. order = ar.argsort(kind='mergesort') sar = ar[order] if invert: bool_ar = (sar[1:] != sar[:-1]) else: bool_ar = (sar[1:] == sar[:-1]) flag = np.concatenate((bool_ar, [invert])) ret = np.empty(ar.shape, dtype=bool) ret[order] = flag if assume_unique: return ret[:len(ar1)] else: return ret[rev_idx]
cde60cee195660f2dafb2993e609d66559525fe8
17
arraysetops.py
955
MAINT: Switch parameter name to 'kind' over 'method'
38,719
0
1,384
585
258
160,766
496
numpy
57
numpy/lib/arraysetops.py
Python
77
{ "docstring": "\n Test whether each element of a 1-D array is also present in a second array.\n\n Returns a boolean array the same length as `ar1` that is True\n where an element of `ar1` is in `ar2` and False otherwise.\n\n We recommend using :func:`isin` instead of `in1d` for new code.\n\n Parameters\n ----------\n ar1 : (M,) array_like\n Input array.\n ar2 : array_like\n The values against which to test each value of `ar1`.\n assume_unique : bool, optional\n If True, the input arrays are both assumed to be unique, which\n can speed up the calculation. Default is False.\n invert : bool, optional\n If True, the values in the returned array are inverted (that is,\n False where an element of `ar1` is in `ar2` and True otherwise).\n Default is False. ``np.in1d(a, b, invert=True)`` is equivalent\n to (but is faster than) ``np.invert(in1d(a, b))``.\n kind : {None, 'sort', 'dictionary'}, optional\n The algorithm to use. This will not affect the final result,\n but will affect the speed. Default will select automatically\n based on memory considerations.\n\n - If 'sort', will use a mergesort-based approach. This will have\n a memory usage of roughly 6 times the sum of the sizes of\n `ar1` and `ar2`, not accounting for size of dtypes.\n - If 'dictionary', will use a key-dictionary approach similar\n to a counting sort. This is only available for boolean and\n integer arrays. This will have a memory usage of the\n size of `ar1` plus the max-min value of `ar2`. This tends\n to be the faster method if the following formula is true:\n `log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927`,\n but may use greater memory.\n - If `None`, will automatically choose 'dictionary' if\n the required memory allocation is less than or equal to\n 6 times the sum of the sizes of `ar1` and `ar2`,\n otherwise will use 'sort'. This is done to not use\n a large amount of memory by default, even though\n 'dictionary' may be faster in most cases.\n\n .. versionadded:: 1.8.0\n\n Returns\n -------\n in1d : (M,) ndarray, bool\n The values `ar1[in1d]` are in `ar2`.\n\n See Also\n --------\n isin : Version of this function that preserves the\n shape of ar1.\n numpy.lib.arraysetops : Module with a number of other functions for\n performing set operations on arrays.\n\n Notes\n -----\n `in1d` can be considered as an element-wise function version of the\n python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly\n equivalent to ``np.array([item in b for item in a])``.\n However, this idea fails if `ar2` is a set, or similar (non-sequence)\n container: As ``ar2`` is converted to an array, in those cases\n ``asarray(ar2)`` is an object array rather than the expected array of\n contained values.\n\n .. versionadded:: 1.4.0\n\n Examples\n --------\n >>> test = np.array([0, 1, 2, 5, 0])\n >>> states = [0, 2]\n >>> mask = np.in1d(test, states)\n >>> mask\n array([ True, False, True, False, True])\n >>> test[mask]\n array([0, 2, 0])\n >>> mask = np.in1d(test, states, invert=True)\n >>> mask\n array([False, True, False, True, False])\n >>> test[mask]\n array([1, 5])\n ", "language": "en", "n_whitespaces": 921, "n_words": 485, "vocab_size": 256 }
https://github.com/numpy/numpy.git
1
center_of_mass
def center_of_mass(mask, esp=1e-6): h, w = mask.shape grid_h = torch.arange(h, device=mask.device)[:, None] grid_w = torch.arange(w, device=mask.device) normalizer = mask.sum().float().clamp(min=esp) center_h = (mask * grid_h).sum() / normalizer center_w = (mask * grid_w).sum() / normalizer return center_h, center_w
fa77be290460e84ce7da975831cb7e687a419177
12
misc.py
156
Refactor package
70,747
0
60
100
25
245,290
36
mmdetection
18
mmdet/models/utils/misc.py
Python
8
{ "docstring": "Calculate the centroid coordinates of the mask.\n\n Args:\n mask (Tensor): The mask to be calculated, shape (h, w).\n esp (float): Avoid dividing by zero. Default: 1e-6.\n\n Returns:\n tuple[Tensor]: the coordinates of the center point of the mask.\n\n - center_h (Tensor): the center point of the height.\n - center_w (Tensor): the center point of the width.\n ", "language": "en", "n_whitespaces": 107, "n_words": 55, "vocab_size": 33 }
https://github.com/open-mmlab/mmdetection.git
23
streams
def streams(self, stream_types=None, sorting_excludes=None): try: ostreams = self._get_streams() if isinstance(ostreams, dict): ostreams = ostreams.items() # Flatten the iterator to a list so we can reuse it. if ostreams: ostreams = list(ostreams) except NoStreamsError: return {} except (OSError, ValueError) as err: raise PluginError(err) if not ostreams: return {} if stream_types is None: stream_types = self.default_stream_types(ostreams) # Add streams depending on stream type and priorities sorted_streams = sorted(iterate_streams(ostreams), key=partial(stream_type_priority, stream_types)) streams = {} for name, stream in sorted_streams: stream_type = type(stream).shortname() # Use * as wildcard to match other stream types if "*" not in stream_types and stream_type not in stream_types: continue # drop _alt from any stream names if name.endswith("_alt"): name = name[:-len("_alt")] existing = streams.get(name) if existing: existing_stream_type = type(existing).shortname() if existing_stream_type != stream_type: name = "{0}_{1}".format(name, stream_type) if name in streams: name = "{0}_alt".format(name) num_alts = len(list(filter(lambda n: n.startswith(name), streams.keys()))) # We shouldn't need more than 2 alt streams if num_alts >= 2: continue elif num_alts > 0: name = "{0}{1}".format(name, num_alts + 1) # Validate stream name and discard the stream if it's bad. match = re.match("([A-z0-9_+]+)", name) if match: name = match.group(1) else: self.logger.debug(f"The stream '{name}' has been ignored since it is badly named.") continue # Force lowercase name and replace space with underscore. streams[name.lower()] = stream # Create the best/worst synonyms
b72f23fd699de9730e9009ac319b84da68f15a73
21
plugin.py
506
docs: update API page, add type annotations
45,676
0
907
482
135
187,036
215
streamlink
44
src/streamlink/plugin/plugin.py
Python
68
{ "docstring": "\n Attempts to extract available streams.\n\n Returns a :class:`dict` containing the streams, where the key is\n the name of the stream (most commonly the quality name), with the value\n being a :class:`Stream` instance.\n\n The result can contain the synonyms **best** and **worst** which\n point to the streams which are likely to be of highest and\n lowest quality respectively.\n\n If multiple streams with the same name are found, the order of\n streams specified in *stream_types* will determine which stream\n gets to keep the name while the rest will be renamed to\n \"<name>_<stream type>\".\n\n The synonyms can be fine-tuned with the *sorting_excludes*\n parameter, which can be one of these types:\n\n - A list of filter expressions in the format\n ``[operator]<value>``. For example the filter \">480p\" will\n exclude streams ranked higher than \"480p\" from the list\n used in the synonyms ranking. Valid operators are ``>``, ``>=``, ``<``\n and ``<=``. If no operator is specified then equality will be tested.\n\n - A function that is passed to :meth:`filter` with a list of\n stream names as input.\n\n\n :param stream_types: A list of stream types to return\n :param sorting_excludes: Specify which streams to exclude from the best/worst synonyms\n :returns: A :class:`dict` of stream names and :class:`streamlink.stream.Stream` instances\n ", "language": "en", "n_whitespaces": 407, "n_words": 200, "vocab_size": 112 }
https://github.com/streamlink/streamlink.git
18
insert_predictor_answer
def insert_predictor_answer(self, insert): model_interface = self.session.model_interface data_store = self.session.data_store select_data_query = insert.get('select_data_query') if isinstance(select_data_query, str) is False or len(select_data_query) == 0: self.packet( ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg="'select_data_query' should not be empty" ).send() return models = model_interface.get_models() if insert['name'] in [x['name'] for x in models]: self.packet( ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg=f"predictor with name '{insert['name']}'' already exists" ).send() return kwargs = {} if isinstance(insert.get('training_options'), str) \ and len(insert['training_options']) > 0: try: kwargs = json.loads(insert['training_options']) except Exception: self.packet( ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg='training_options should be in valid JSON string' ).send() return integration = self.session.integration if isinstance(integration, str) is False or len(integration) == 0: self.packet( ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg='select_data_query can be used only in query from database' ).send() return insert['select_data_query'] = insert['select_data_query'].replace(r"\'", "'") ds_name = data_store.get_vacant_name(insert['name']) ds = data_store.save_datasource(ds_name, integration, {'query': insert['select_data_query']}) insert['predict'] = [x.strip() for x in insert['predict'].split(',')] ds_data = data_store.get_datasource(ds_name) if ds_data is None: raise Exception(f"DataSource '{ds_name}' does not exists") ds_columns = [x['name'] for x in ds_data['columns']] for col in insert['predict']: if col not in ds_columns: data_store.delete_datasource(ds_name) raise Exception(f"Column '{col}' not exists") try: insert['predict'] = self._check_predict_columns(insert['predict'], ds_columns) except Exception: data_store.delete_datasource(ds_name) raise model_interface.learn( insert['name'], ds, insert['predict'], ds_data['id'], kwargs=kwargs, delete_ds_on_fail=True ) self.packet(OkPacket).send()
551205a18ac2ac19626f4e4ffb2ed88fcad705b9
16
mysql_proxy.py
713
fix
25,051
0
833
445
109
113,876
181
mindsdb
42
mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
Python
63
{ "docstring": " Start learn new predictor.\n Parameters:\n - insert - dict with keys as columns of mindsb.predictors table.\n ", "language": "en", "n_whitespaces": 47, "n_words": 16, "vocab_size": 15 }
https://github.com/mindsdb/mindsdb.git
3
_remove_long_seq
def _remove_long_seq(maxlen, seq, label): new_seq, new_label = [], [] for x, y in zip(seq, label): if len(x) < maxlen: new_seq.append(x) new_label.append(y) return new_seq, new_label @keras_export('keras.preprocessing.sequence.TimeseriesGenerator')
f1aa8b7d2a0c89591c5c42eca5b6f013114a7bbd
@keras_export('keras.preprocessing.sequence.TimeseriesGenerator')
11
sequence.py
99
Copy sequence utils from keras_preprocessing directly into core keras PiperOrigin-RevId: 424915569
79,741
1
41
55
22
268,873
25
keras
12
keras/preprocessing/sequence.py
Python
7
{ "docstring": "Removes sequences that exceed the maximum length.\n\n Args:\n maxlen: Int, maximum length of the output sequences.\n seq: List of lists, where each sublist is a sequence.\n label: List where each element is an integer.\n\n Returns:\n new_seq, new_label: shortened lists for `seq` and `label`.\n ", "language": "en", "n_whitespaces": 66, "n_words": 43, "vocab_size": 36 }
https://github.com/keras-team/keras.git
7
remove_xml_tags
def remove_xml_tags(buf): filtered = bytearray() in_tag = False prev = 0 buf = memoryview(buf).cast("c") for curr, buf_char in enumerate(buf): # Check if we're coming out of or entering an XML tag if buf_char == b">": prev = curr + 1 in_tag = False elif buf_char == b"<": if curr > prev and not in_tag: # Keep everything after last non-extended-ASCII, # non-alphabetic character filtered.extend(buf[prev:curr]) # Output a space to delimit stretch we kept filtered.extend(b" ") in_tag = True # If we're not in a tag... if not in_tag: # Keep everything after last non-extended-ASCII, non-alphabetic # character filtered.extend(buf[prev:]) return filtered
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
16
charsetprober.py
180
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
4,089
0
384
103
62
21,893
100
pipenv
12
pipenv/patched/pip/_vendor/chardet/charsetprober.py
Python
17
{ "docstring": "\n Returns a copy of ``buf`` that retains only the sequences of English\n alphabet and high byte characters that are not between <> characters.\n This filter can be applied to all scripts which contain both English\n characters and extended ASCII characters, but is currently only used by\n ``Latin1Prober``.\n ", "language": "en", "n_whitespaces": 90, "n_words": 47, "vocab_size": 41 }
https://github.com/pypa/pipenv.git
1
test_rich_text_is_safe
def test_rich_text_is_safe(self): stream_block = blocks.StreamBlock( [ ( "paragraph", blocks.RichTextBlock(template="tests/jinja2/rich_text.html"), ) ] ) stream_value = stream_block.to_python( [ { "type": "paragraph", "value": '<p>Merry <a linktype="page" id="4">Christmas</a>!</p>', }, ] ) result = render_to_string( "tests/jinja2/stream.html", { "value": stream_value, }, ) self.assertIn( '<p>Merry <a href="/events/christmas/">Christmas</a>!</p>', result )
d10f15e55806c6944827d801cd9c2d53f5da4186
14
test_jinja2.py
125
Reformat with black
16,234
0
344
70
27
74,208
42
wagtail
12
wagtail/core/tests/test_jinja2.py
Python
26
{ "docstring": "\n Ensure that RichText values are marked safe\n so that they don't get double-escaped when inserted into a parent template (#2542)\n ", "language": "en", "n_whitespaces": 42, "n_words": 20, "vocab_size": 19 }
https://github.com/wagtail/wagtail.git
6
getWindowsDLLVersion
def getWindowsDLLVersion(filename): # Get size needed for buffer (0 if no info) import ctypes.wintypes if type(filename) is unicode: GetFileVersionInfoSizeW = ctypes.windll.version.GetFileVersionInfoSizeW GetFileVersionInfoSizeW.argtypes = [ ctypes.wintypes.LPCWSTR, ctypes.wintypes.LPDWORD, ] GetFileVersionInfoSizeW.restype = ctypes.wintypes.HANDLE size = GetFileVersionInfoSizeW(filename, None) else: size = ctypes.windll.version.GetFileVersionInfoSizeA(filename, None) if not size: return (0, 0, 0, 0) # Create buffer res = ctypes.create_string_buffer(size) # Load file information into buffer res if type(filename) is unicode: # Python3 needs our help here. GetFileVersionInfo = ctypes.windll.version.GetFileVersionInfoW GetFileVersionInfo.argtypes = [ ctypes.wintypes.LPCWSTR, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD, ctypes.wintypes.LPVOID, ] GetFileVersionInfo.restype = ctypes.wintypes.BOOL else: # Python2 just works. GetFileVersionInfo = ctypes.windll.version.GetFileVersionInfoA success = GetFileVersionInfo(filename, 0, size, res) # This cannot really fail anymore. assert success # Look for codepages VerQueryValueA = ctypes.windll.version.VerQueryValueA VerQueryValueA.argtypes = [ ctypes.wintypes.LPCVOID, ctypes.wintypes.LPCSTR, ctypes.wintypes.LPVOID, ctypes.POINTER(ctypes.c_uint32), ] VerQueryValueA.restype = ctypes.wintypes.BOOL file_info = ctypes.POINTER(VsFixedFileInfoStructure)() uLen = ctypes.c_uint32(ctypes.sizeof(file_info)) b = VerQueryValueA(res, br"\\", ctypes.byref(file_info), ctypes.byref(uLen)) if not b: return (0, 0, 0, 0) if file_info.contents.dwSignature != 0xFEEF04BD: return (0, 0, 0, 0) ms = file_info.contents.dwFileVersionMS ls = file_info.contents.dwFileVersionLS return (ms >> 16) & 0xFFFF, ms & 0xFFFF, (ls >> 16) & 0xFFFF, ls & 0xFFFF _readelf_usage = "The 'readelf' is used to analyse dependencies on ELF using systems and required to be found."
982929807fdc5838554cf302a2013a28e7707514
13
SharedLibraries.py
537
macOS: Avoid references to original install paths as library ids
42,769
0
480
351
118
178,629
194
Nuitka
43
nuitka/utils/SharedLibraries.py
Python
46
{ "docstring": "Return DLL version information from a file.\n\n If not present, it will be (0, 0, 0, 0), otherwise it will be\n a tuple of 4 numbers.\n ", "language": "en", "n_whitespaces": 35, "n_words": 26, "vocab_size": 21 }
https://github.com/Nuitka/Nuitka.git
1
test_rollback_cursor_consistency
def test_rollback_cursor_consistency(self): con = sqlite.connect(":memory:") cur = con.cursor() cur.execute("create table test(x)") cur.execute("insert into test(x) values (5)") cur.execute("select 1 union select 2 union select 3") con.rollback() self.assertEqual(cur.fetchall(), [(1,), (2,), (3,)])
9d6a239a34a66e16188d76c23a3a770515ca44ca
9
test_transactions.py
123
bpo-44092: Don't reset statements/cursors before rollback (GH-26026) In SQLite versions pre 3.7.11, pending statements would block a rollback. This is no longer the case, so remove the workaround.
41,562
0
85
71
26
175,168
29
cpython
11
Lib/test/test_sqlite3/test_transactions.py
Python
8
{ "docstring": "Check that cursors behave correctly after rollback.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
https://github.com/python/cpython.git
1
is_package
def is_package(cls, fullname): return False load_module = classmethod(_load_module_shim)
8198943edd73a363c266633e1aa5b2a9e9c9f526
6
_bootstrap.py
28
add python 3.10.4 for windows
55,099
0
25
10
8
218,049
8
XX-Net
6
python3.10.4/Lib/importlib/_bootstrap.py
Python
2
{ "docstring": "Return False as built-in modules are never packages.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/XX-net/XX-Net.git
2
get_ind_under_point
def get_ind_under_point(self, event): xy = self.pathpatch.get_path().vertices xyt = self.pathpatch.get_transform().transform(xy) # to display coords xt, yt = xyt[:, 0], xyt[:, 1] d = np.sqrt((xt - event.x)**2 + (yt - event.y)**2) ind = d.argmin() return ind if d[ind] < self.epsilon else None
1068a6faa19767724437461bcfb88c6852ec435c
13
path_editor.py
152
Remove unnecessary np.{,as}array / astype calls. Quite often numpy will call asarray for us, saving us the need to call asarray explicitly. When we do call asarray (or array) ourselves, a dtype can directly be passed in, rather than immediately calling astype immediately after. Passing the dtype makes it unnecessary for asarray to infer the dtype of the passed-in container, and can also save an extra array allocation if asarray first has to allocate an array of a type and astype immediately has to allocate an array of another type.
23,885
0
90
96
33
110,014
40
matplotlib
20
examples/event_handling/path_editor.py
Python
7
{ "docstring": "\n Return the index of the point closest to the event position or *None*\n if no point is within ``self.epsilon`` to the event position.\n ", "language": "en", "n_whitespaces": 45, "n_words": 23, "vocab_size": 17 }
https://github.com/matplotlib/matplotlib.git
3
round_filters
def round_filters(filters, global_params): multiplier = global_params.width_coefficient if not multiplier: return filters divisor = global_params.depth_divisor filters *= multiplier new_filters = int(filters + divisor / 2) // divisor * divisor if new_filters < 0.9 * filters: new_filters += divisor return int(new_filters)
6e607a0fa1cefbf0388dac86c84debf4781cec48
12
rec_efficientb3_pren.py
92
[Feature] Add PREN Scene Text Recognition Model(Accepted in CVPR2021) (#5563) * [Feature] add PREN scene text recognition model * [Patch] Optimize yml File * [Patch] Save Label/Pred Preprocess Time Cost * [BugFix] Modify Shape Conversion to Fit for Inference Model Exportion * [Patch] ? * [Patch] ? * 啥情况...
4,587
0
117
56
26
23,387
39
PaddleOCR
9
ppocr/modeling/backbones/rec_efficientb3_pren.py
Python
10
{ "docstring": "Calculate and round number of filters based on depth multiplier.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/PaddlePaddle/PaddleOCR.git
6
assert_lca_dicts_same
def assert_lca_dicts_same(self, d1, d2, G=None): if G is None: G = self.DG root_distance = self.root_distance else: roots = [n for n, deg in G.in_degree if deg == 0] assert len(roots) == 1 root_distance = nx.shortest_path_length(G, source=roots[0]) for a, b in ((min(pair), max(pair)) for pair in chain(d1, d2)): assert ( root_distance[get_pair(d1, a, b)] == root_distance[get_pair(d2, a, b)] )
b2f91c34a23058dd70b41784af0d87890216026a
13
test_lowest_common_ancestors.py
183
Naive lowest common ancestor implementation (#5736) * Add naive lca methods * Naive algorithm implementation for LCA * Modify naive lca functions * Correct parameters of nx.ancestors * Update lowest_common_ancestors.py * Parametrize tests * Apply suggestions from code review Co-authored-by: Dan Schult <dschult@colgate.edu> * Yield instead of append * Tests for naive lca * Correct test cases for naive lca algorithms * Apply suggestions from code review Co-authored-by: Mridul Seth <mail@mriduls.com> * Fix function name -when calling * Make requested changes * Inlining _get_a_lowest_common_ancestor Co-authored-by: dtuncturk <dilaramemis@sabanciuniv.edu> Co-authored-by: Dan Schult <dschult@colgate.edu> Co-authored-by: Mridul Seth <mail@mriduls.com>
42,227
0
177
124
40
177,015
57
networkx
22
networkx/algorithms/tests/test_lowest_common_ancestors.py
Python
12
{ "docstring": "Checks if d1 and d2 contain the same pairs and\n have a node at the same distance from root for each.\n If G is None use self.DG.", "language": "en", "n_whitespaces": 40, "n_words": 27, "vocab_size": 24 }
https://github.com/networkx/networkx.git
6
_prepare_socket_file
def _prepare_socket_file(self, socket_path, default_prefix): result = socket_path is_mac = sys.platform.startswith("darwin") if sys.platform == "win32": if socket_path is None: result = f"tcp://{self._localhost}" f":{self._get_unused_port()}" else: if socket_path is None: result = self._make_inc_temp( prefix=default_prefix, directory_name=self._sockets_dir ) else: try_to_create_directory(os.path.dirname(socket_path)) # Check socket path length to make sure it's short enough maxlen = (104 if is_mac else 108) - 1 # sockaddr_un->sun_path if len(result.split("://", 1)[-1].encode("utf-8")) > maxlen: raise OSError( "AF_UNIX path length cannot exceed " "{} bytes: {!r}".format(maxlen, result) ) return result
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
17
node.py
234
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,375
0
333
127
56
130,803
77
ray
25
python/ray/node.py
Python
20
{ "docstring": "Prepare the socket file for raylet and plasma.\n\n This method helps to prepare a socket file.\n 1. Make the directory if the directory does not exist.\n 2. If the socket file exists, do nothing (this just means we aren't the\n first worker on the node).\n\n Args:\n socket_path (string): the socket file to prepare.\n ", "language": "en", "n_whitespaces": 109, "n_words": 53, "vocab_size": 40 }
https://github.com/ray-project/ray.git
1
test_user_message_already_unlinked
def test_user_message_already_unlinked(self): IdentityProvider.objects.create(type="slack", external_id="TXXXXXXX1", config={}) responses.add(responses.POST, "https://slack.com/api/chat.postMessage", json={"ok": True}) resp = self.post_webhook(event_data=json.loads(MESSAGE_IM_EVENT_UNLINK)) assert resp.status_code == 200, resp.content request = responses.calls[0].request assert request.headers["Authorization"] == "Bearer xoxb-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx" data = json.loads(request.body) assert "You do not have a linked identity to unlink" in get_response_text(data)
b7d894b56953153cf33008ae7d33e1e41b175eb7
11
test_message_im.py
174
ref(tests): Split up large files (#31828)
19,302
0
103
104
35
96,313
40
sentry
25
tests/sentry/integrations/slack/endpoints/events/test_message_im.py
Python
9
{ "docstring": "\n Test that when a user without an Identity types in \"unlink\" to the DM we\n reply with the correct response.\n ", "language": "en", "n_whitespaces": 42, "n_words": 20, "vocab_size": 19 }
https://github.com/getsentry/sentry.git
1
test_subscribe_to_stream_post_policy_moderators_stream
def test_subscribe_to_stream_post_policy_moderators_stream(self) -> None: member = self.example_user("AARON") stream = self.make_stream("stream1") # Make sure that we are testing this with full member which is just below the moderator # in the role hierarchy. self.assertFalse(member.is_provisional_member) do_change_stream_post_policy( stream, Stream.STREAM_POST_POLICY_MODERATORS, acting_user=member ) result = self.common_subscribe_to_streams(member, ["stream1"]) self.assert_json_success(result) json = result.json() self.assertEqual(json["subscribed"], {member.email: ["stream1"]}) self.assertEqual(json["already_subscribed"], {})
c30458e1740c7878e436037f61431884e54b349d
11
test_subs.py
172
streams: Add notifications for posting policy changes. An explanatory note on the changes in zulip.yaml and curl_param_value_generators is warranted here. In our automated tests for our curl examples, the test for the API endpoint that changes the posting permissions of a stream comes before our existing curl test for adding message reactions. Since there is an extra notification message due to the change in posting permissions, the message IDs used in tests that come after need to be incremented by 1. This is a part of #20289.
17,579
0
153
100
45
83,023
51
zulip
18
zerver/tests/test_subs.py
Python
15
{ "docstring": "\n Members can subscribe to streams where only admins and moderators can post\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 11 }
https://github.com/zulip/zulip.git
1
test_overwrite_storage_path
def test_overwrite_storage_path(self): call_command("document_retagger", "--storage_path", "--overwrite") d_first, d_second, d_unrelated, d_auto = self.get_updated_docs() self.assertEqual(d_first.storage_path, self.sp2) self.assertEqual(d_auto.storage_path, self.sp1) self.assertIsNone(d_second.storage_path) self.assertEqual(d_unrelated.storage_path, self.sp2)
c8e838e3a0828e82efac1fd93ebb9aba6a000ff8
8
test_management_retagger.py
116
Adds the storage paths to the re-tagger command
117,022
0
67
71
17
319,934
18
paperless-ngx
13
src/documents/tests/test_management_retagger.py
Python
7
{ "docstring": "\n GIVEN:\n - 2 storage paths with documents which match them\n - 1 document which matches but has a storage path\n WHEN:\n - document retagger is called with overwrite\n THEN:\n - Matching document's storage paths updated\n - Non-matching documents have no storage path\n - Existing storage patch overwritten\n ", "language": "en", "n_whitespaces": 142, "n_words": 47, "vocab_size": 32 }
https://github.com/paperless-ngx/paperless-ngx.git
1
hashkey
def hashkey(cls, *args, **kwargs): return cachetools.keys.hashkey(f"<{cls.__name__}>", *args, **kwargs)
21972c91dd2b52cd206bf71ea038ab0e1f478b32
10
settings.py
52
add lock to cachetools usage * We observed daphne giving tracebacks when accessing logging settings. Originally, configure tower in tower settings was no a suspect because daphne is not multi-process. We've had issues with configure tower in tower settings and multi-process before. We later learned that Daphne is multi-threaded. Configure tower in tower was back to being a suspect. We constructed a minimal reproducer to show that multiple threads accessing settings can cause the same traceback that we saw in daphne. See https://gist.github.com/chrismeyersfsu/7aa4bdcf76e435efd617cb078c64d413 for that recreator. These fixes stop the recreation.
17,168
0
22
28
7
81,176
8
awx
7
awx/conf/settings.py
Python
2
{ "docstring": "\n Usage of @cachetools.cached has changed to @cachetools.cachedmethod\n The previous cachetools decorator called the hash function and passed in (self, key).\n The new cachtools decorator calls the hash function with just (key).\n Ideally, we would continue to pass self, however, the cachetools decorator interface\n does not allow us to.\n\n This hashkey function is to maintain that the key generated looks like\n ('<SettingsWrapper>', key). The thought is that maybe it is important to namespace\n our cache to the SettingsWrapper scope in case some other usage of this cache exists.\n I can not think of how any other system could and would use our private cache, but\n for safety sake we are ensuring the key schema does not change.\n ", "language": "en", "n_whitespaces": 194, "n_words": 116, "vocab_size": 82 }
https://github.com/ansible/awx.git
2
link_entity
def link_entity(props): id_ = props.get("id") link_props = {} if id_ is not None: link_props["linktype"] = "page" link_props["id"] = id_ else: link_props["href"] = check_url(props.get("url")) return DOM.create_element("a", link_props, props["children"])
d10f15e55806c6944827d801cd9c2d53f5da4186
14
contentstate.py
121
Reformat with black
15,641
0
66
66
21
71,201
27
wagtail
8
wagtail/admin/rich_text/converters/contentstate.py
Python
9
{ "docstring": "\n <a linktype=\"page\" id=\"1\">internal page link</a>\n ", "language": "en", "n_whitespaces": 12, "n_words": 5, "vocab_size": 5 }
https://github.com/wagtail/wagtail.git
14
_suggest_semantic_version
def _suggest_semantic_version(s): result = s.strip().lower() for pat, repl in _REPLACEMENTS: result = pat.sub(repl, result) if not result: result = '0.0.0' # Now look for numeric prefix, and separate it out from # the rest. #import pdb; pdb.set_trace() m = _NUMERIC_PREFIX.match(result) if not m: prefix = '0.0.0' suffix = result else: prefix = m.groups()[0].split('.') prefix = [int(i) for i in prefix] while len(prefix) < 3: prefix.append(0) if len(prefix) == 3: suffix = result[m.end():] else: suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():] prefix = prefix[:3] prefix = '.'.join([str(i) for i in prefix]) suffix = suffix.strip() if suffix: #import pdb; pdb.set_trace() # massage the suffix. for pat, repl in _SUFFIX_REPLACEMENTS: suffix = pat.sub(repl, suffix) if not suffix: result = prefix else: sep = '-' if 'dev' in suffix else '+' result = prefix + sep + suffix if not is_semver(result): result = None return result
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
19
version.py
411
upd; format
12,903
0
370
242
67
62,242
144
transferlearning
26
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/version.py
Python
33
{ "docstring": "\n Try to suggest a semantic form for a version for which\n _suggest_normalized_version couldn't come up with anything.\n ", "language": "en", "n_whitespaces": 27, "n_words": 17, "vocab_size": 15 }
https://github.com/jindongwang/transferlearning.git
1
test_previewing_subprocess_deployment
def test_previewing_subprocess_deployment(): result = invoke_and_assert( [ "deployment", "preview", "./tests/deployment_test_files/single_deployment.py", ], expected_output_contains="prefect.engine", ) assert result.stdout.endswith("\n") preview = result.stdout.strip() # spot-check some variables and the command-line assert "\nPREFECT_TEST_MODE=True \\" in preview assert "\nPREFECT_LOGGING_LEVEL=DEBUG \\" in preview assert preview.endswith(" -m prefect.engine 00000000000000000000000000000000")
5afded9fe6724d9e336f59792ee1d60656a2d94d
10
test_deployment_preview.py
110
Add a CLI command to preview how a FlowRun will appear in any FlowRunner's execution environment (PrefectHQ/orion#1971) Co-authored-by: Terrence Dorsey <terrence@prefect.io> Co-authored-by: Michael Adkins <madkinszane@gmail.com>
11,495
0
120
56
31
56,284
39
prefect
8
tests/cli/test_deployment_preview.py
Python
14
{ "docstring": "`prefect deployment preview my-flow-file.py` should render the\n shell command that will be run for the subprocess", "language": "en", "n_whitespaces": 18, "n_words": 16, "vocab_size": 15 }
https://github.com/PrefectHQ/prefect.git
4
completion_item_yank
def completion_item_yank(self, sel=False): text = self._cmd.selectedText() if not text: index = self.currentIndex() if not index.isValid(): raise cmdutils.CommandError("No item selected!") text = self._model().data(index) if not utils.supports_selection(): sel = False utils.set_clipboard(text, selection=sel)
a20bb67a878b2e68abf8268c1b0a27f018d01352
12
completionwidget.py
133
mypy: Upgrade to PyQt5-stubs 5.15.6.0 For some unknown reason, those new stubs cause a *lot* of things now to be checked by mypy which formerly probably got skipped due to Any being implied somewhere. The stubs themselves mainly improved, with a couple of regressions too. In total, there were some 337 (!) new mypy errors. This commit fixes almost all of them, and the next commit improves a fix to get things down to 0 errors again. Overview of the changes: ==== qutebrowser/app.py - Drop type ignore due to improved stubs. ==== qutebrowser/browser/browsertab.py - Specify the type of _widget members more closely than just QWidget. This is debatable: I suppose the abstract stuff shouldn't need to know anything about the concrete backends at all. But it seems like we cut some corners when initially implementing things, and put some code in browsertab.py just because the APIs of both backends happened to be compatible. Perhaps something to reconsider once we drop QtWebKit and hopefully implement a dummy backend. - Add an additional assertion in AbstractAction.run_string. This is already covered by the isinstance(member, self.action_base) above it, but that's too dynamic for mypy to understand. - Fix the return type of AbstractScroller.pos_px, which is a QPoint (with x and y components), not a single int. - Fix the return type of AbstractScroller.pos_perc, which is a Tuple (with x and y components), not a single int. - Fix the argument types of AbstractScroller.to_perc, as it's possible to pass fractional percentages too. - Specify the type for AbstractHistoryPrivate._history. See above (_widget) re this being debatable. - Fix the return type of AbstractTabPrivate.event_target(), which can be None (see #3888). - Fix the return type of AbstractTabPrivate.run_js_sync, which is Any (the JS return value), not None. - Fix the argument type for AbstractTabPrivate.toggle_inspector: position can be None to use the last used position. - Declare the type of sub-objects of AbstractTab. - Fix the return value of AbstractTab.icon(), which is the QIcon, not None. ==== qutebrowser/browser/commands.py - Make sure the active window is a MainWindow (with a .win_id attribute). ==== qutebrowser/browser/downloadview.py - Add _model() which makes sure that self.model() is a DownloadModel, not None or any other model. This is needed because other methods access a variety of custom attributes on it, e.g. last_index(). ==== qutebrowser/browser/greasemonkey.py - Add an ignore for AbstractDownload.requested_url which we patch onto the downloads. Probably would be nicer to add it as a proper attribute which always gets set by the DownloadManager. ==== qutebrowser/browser/hints.py - Remove type ignores for QUrl.toString(). - Add a new type ignore for combining different URL flags (which works, but is not exactly type safe... still probably a regression in the stubs). - Make sure the things we get back from self._get_keyparser are what we actually expect. Probably should introduce a TypedDict (and/or overloads for _get_keyparser with typing.Literal) to teach mypy about the exact return value. See #7098. This is needed because we access Hint/NormalKeyParser-specific attributes such as .set_inhibited_timout() or .update_bindings(). ==== qutebrowser/browser/inspector.py - Similar changes than in browsertab.py to make some types where we share API (e.g. .setPage()) more concrete. Didn't work out unfortunately, see next commit. ==== qutebrowser/browser/network/pac.py - Remove now unneeded type ignore for signal. ==== qutebrowser/browser/qtnetworkdownloads.py - Make sure that downloads is a qtnetworkdownloads.DownloadItem (rather than an AbstractDownload), so that we can call ._uses_nam() on it. ==== qutebrowser/browser/qutescheme.py - Remove now unneeded type ignore for QUrl flags. ==== qutebrowser/browser/urlmarks.py - Specify the type of UrlMarkManager._lineparser, as those only get initialized in _init_lineparser of subclasses, so mypy doesn't know it's supposed to exist. ==== qutebrowser/browser/webelem.py - New casts to turn single KeyboardModifier (enum) entries into KeyboardModifiers (flags). Might not be needed anymore with Qt 6. - With that, casting the final value is now unneeded. ==== qutebrowser/browser/webengine/notification.py - Remove now unneeded type ignore for signal. - Make sure the self.sender() we get in HerbeNotificationAdapter._on_finished() is a QProcess, not just any QObject. ==== qutebrowser/browser/webengine/webenginedownloads.py - Remove now unneeded type ignores for signals. ==== qutebrowser/browser/webengine/webengineelem.py - Specify the type of WebEngineElement._tab. - Remove now unneeded type ignore for mixed flags. ==== qutebrowser/browser/webengine/webengineinspector.py - See changes to inspector.py and next commit. - Remove now unneeded type ignore for signal. ==== qutebrowser/browser/webengine/webenginequtescheme.py - Remove now unneeded type ignore for mixed flags. ==== qutebrowser/browser/webengine/webenginesettings.py - Ignore access of .setter attribute which we patch onto QWebEngineProfile. Would be nice to have a subclass or wrapper-class instead. ==== qutebrowser/browser/webengine/webenginetab.py - Specified the type of _widget members more closely than just QWidget. See browsertab.py changes for details. - Remove some now-unneeded type ignores for creating FindFlags. - Specify more concrete types for WebEngineTab members where we actually need to access WebEngine-specific attributes. - Make sure the page we get is our custom WebEnginePage subclass, not just any QWebEnginePage. This is needed because we access custom attributes on it. ==== qutebrowser/browser/webengine/webview.py - Make sure the page we get is our custom WebEnginePage subclass, not just any QWebEnginePage. This is needed because we access custom attributes on it. ==== qutebrowser/browser/webkit/network/networkreply.py - Remove now unneeded type ignores for signals. ==== qutebrowser/browser/webkit/webkitinspector.py - See changes to inspector.py and next commit. ==== qutebrowser/browser/webkit/webkittab.py - Specify the type of _widget members more closely than just QWidget. See browsertab.py changes for details. - Add a type ignore for WebKitAction because our workaround needs to treat them as ints (which is allowed by PyQt, even if not type-safe). - Add new ignores for findText calls: The text is a QString and can be None; the flags are valid despite mypy thinking they aren't (stubs regression?). - Specify the type for WebKitHistoryPrivate._history, because we access WebKit-specific attributes. See above (_widget) re this being debatable. - Make mypy aware that .currentFrame() and .frameAt() can return None (stubs regression?). - Make sure the .page() and .page().networkAccessManager() are our subclasses rather than the more generic QtWebKit objects, as we use custom attributes. - Add new type ignores for signals (stubs regression!) ==== qutebrowser/browser/webkit/webpage.py - Make sure the .networkAccessManager() is our subclass rather than the more generic QtWebKit object, as we use custom attributes. - Replace a cast by a type ignore. The cast didn't work anymore. ==== qutebrowser/browser/webkit/webview.py - Make sure the .page() is our subclass rather than the more generic QtWebKit object, as we use custom attributes. ==== qutebrowser/commands/userscripts.py - Remove now unneeded type ignore for signal. ==== qutebrowser/completion/completer.py - Add a new _completion() getter (which ensures it actually gets the completion view) rather than accessing the .parent() directly (which could be any QObject). ==== qutebrowser/completion/completiondelegate.py - Make sure self.parent() is a CompletionView (no helper method as there is only one instance). - Remove a now-unneeded type ignore for adding QSizes. ==== qutebrowser/completion/completionwidget.py - Add a ._model() getter which ensures that we get a CompletionModel (with custom attributes) rather than Qt's .model() which can be any QAbstractItemModel (or None). - Removed a now-unneeded type ignore for OR-ing flags. ==== qutebrowser/completion/models/completionmodel.py - Remove now unneeded type ignores for signals. - Ignore a complaint about .set_pattern() not being defined. Completion categories don't share any common parent class, so it would be good to introduce a typing.Protocol for this. See #7098. ==== qutebrowser/components/misccommands.py - Removed a now-unneeded type ignore for OR-ing flags. ==== qutebrowser/components/readlinecommands.py - Make sure QApplication.instance() is a QApplication (and not just a QCoreApplication). This includes the former "not None" check. ==== qutebrowser/components/scrollcommands.py - Add basic annotation for "funcs" dict. Could have a callable protocol to specify it needs a count kwarg, see #7098. ==== qutebrowser/config/stylesheet.py - Correctly specify that stylesheet apply to QWidgets, not any QObject. - Ignore an attr-defined for obj.STYLESHEET. Perhaps could somehow teach mypy about this with overloads and protocols (stylesheet for set_register being None => STYLESHEET needs to be defined, otherwise anything goes), but perhaps not worth the troble. See #7098. ==== qutebrowser/keyinput/keyutils.py - Remove some now-unneeded type ignores and add a cast for using a single enum value as flags. Might need to look at this again with Qt 6 support. ==== qutebrowser/keyinput/modeman.py - Add a FIXME for using a TypedDict, see comments for hints.py above. ==== qutebrowser/mainwindow/mainwindow.py - Remove now-unneeded type ignores for calling with OR-ed flags. - Improve where we cast from WindowType to WindowFlags, no int needed - Use new .tab_bar() getter, see below. ==== qutebrowser/mainwindow/prompt.py - Remove now-unneeded type ignores for calling with OR-ed flags. ==== qutebrowser/mainwindow/statusbar/bar.py - Adjust type ignores around @pyqtProperty. The fact one is still needed seems like a stub regression. ==== qutebrowser/mainwindow/statusbar/command.py - Fix type for setText() override (from QLineEdit): text can be None (QString in C++). ==== qutebrowser/mainwindow/statusbar/url.py - Adjust type ignores around @pyqtProperty. The fact one is still needed seems like a stub regression. ==== qutebrowser/mainwindow/tabbedbrowser.py - Specify that TabDeque manages browser tabs, not any QWidgets. It accesses AbstractTab-specific attributes. - Make sure that the .tabBar() we get is a tabwidget.TabBar, as we access .maybe_hide. - Fix the annotations for stored marks: Scroll positions are a QPoint, not int. - Add _current_tab() and _tab_by_idx() wrappers for .currentWidget() and .widget(), which ensures that the return values are valid AbstractTabs (or None for _tab_by_idx). This is needed because we access AbstractTab-specific attributes. - For some places, where the tab can be None, continue using .currentTab() but add asserts. - Remove some now-unneeded [unreachable] ignores, as mypy knows about the None possibility now. ==== qutebrowser/mainwindow/tabwidget.py - Add new tab_bar() and _tab_by_idx() helpers which check that the .tabBar() and .widget() are of type TabBar and AbstractTab, respectively. - Add additional assertions where we expect ._tab_by_idx() to never be None. - Remove dead code in get_tab_fields for handling a None y scroll position. I was unable to find any place in the code where this could be set to None. - Remove some now-unneeded type ignores and casts, as mypy now knows that _type_by_idx() could be None. - Work around a strange instance where mypy complains about not being able to find the type of TabBar.drag_in_progress from TabWidget._toggle_visibility, despite it clearly being shown as a bool *inside* that class without any annotation. - Add a ._tab_widget() getter in TabBar which ensures that the .parent() is in fact a TabWidget. ==== qutebrowser/misc/crashsignal.py - Remove now unneeded type ignores for signals. ==== qutebrowser/misc/editor.py - Remove now unneeded type ignores for signals. ==== qutebrowser/misc/ipc.py - Remove now unneeded type ignores for signals. - Add new type ignores for .error() which is both a signal and a getter (stub regression?). Won't be relevant for Qt 6 anymore, as the signal was renamed to errorOccurred in 5.15. ==== qutebrowser/misc/objects.py - Make sure mypy knows that objects.app is our custom Application (with custom attributes) rather than any QApplication. ==== qutebrowser/utils/objreg.py - Ignore attr-defined for .win_id attributes. Maybe could add a typing.Protocol, but ideally, the whole objreg stuff should die one day anyways. ==== tests/unit/completion/test_completer.py - Make CompletionWidgetStub inherit from CompletionView so that it passes the new isinstance() asserts in completer.py (see above).
117,342
0
124
78
22
320,775
30
qutebrowser
17
qutebrowser/completion/completionwidget.py
Python
10
{ "docstring": "Yank the current completion item into the clipboard.\n\n Args:\n sel: Use the primary selection instead of the clipboard.\n ", "language": "en", "n_whitespaces": 43, "n_words": 18, "vocab_size": 14 }
https://github.com/qutebrowser/qutebrowser.git
1
get_index_and_columns
def _get_index_and_columns(df): return len(df.index), len(df.columns) @ray.remote(num_returns=4)
e7cb2e82f8b9c7a68f82abdd3b6011d661230b7e
@ray.remote(num_returns=4)
9
partition.py
50
REFACTOR-#4251: define public interfaces in `modin.core.execution.ray` module (#3868) Signed-off-by: Anatoly Myachev <anatoly.myachev@intel.com>
35,381
1
11
20
6
153,345
6
modin
8
modin/core/execution/ray/implementations/pandas_on_ray/partitioning/partition.py
Python
2
{ "docstring": "\n Get the number of rows and columns of a pandas DataFrame.\n\n Parameters\n ----------\n df : pandas.DataFrame\n A pandas DataFrame which dimensions are needed.\n\n Returns\n -------\n int\n The number of rows.\n int\n The number of columns.\n ", "language": "en", "n_whitespaces": 84, "n_words": 35, "vocab_size": 27 }
https://github.com/modin-project/modin.git
2
test_extra_tags
def test_extra_tags(self): for extra_tags in ['', None, 'some tags']: with self.subTest(extra_tags=extra_tags): self.assertEqual( self.encode_decode('message', extra_tags=extra_tags).extra_tags, extra_tags, )
efb4478e484ae61c5fc23563d4e9df1f8f49df49
15
test_cookie.py
80
Fixed #33458 -- Fixed encoding of messages with empty string as extra_tags.
50,236
0
109
47
16
203,144
16
django
6
tests/messages_tests/test_cookie.py
Python
7
{ "docstring": "\n A message's extra_tags attribute is correctly preserved when retrieved\n from the message storage.\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 13 }
https://github.com/django/django.git
2
compile_helper
def compile_helper(): import os import subprocess path = os.path.abspath(os.path.dirname(__file__)) ret = subprocess.run(['make', '-C', path]) if ret.returncode != 0: print("Making C++ dataset helpers module failed, exiting.") import sys sys.exit(1)
1d930169e118c325c03343b6acd7d9c05eab1f85
11
dataset_utils.py
105
[Pre-training] Optional for nsp task, support multi-datasets in ERNIE-1.0 pre-training (#1621) * add no nsp training. * support for multi-dataset * fix test sample nums.
118,235
0
67
59
25
322,771
28
PaddleNLP
13
examples/language_model/data_tools/dataset_utils.py
Python
9
{ "docstring": "Compile helper function ar runtime. Make sure this\n is invoked on a single process.", "language": "en", "n_whitespaces": 16, "n_words": 14, "vocab_size": 14 }
https://github.com/PaddlePaddle/PaddleNLP.git
5
variable
def variable(value, dtype=None, name=None, constraint=None): if dtype is None: dtype = floatx() if hasattr(value, "tocoo"): sparse_coo = value.tocoo() indices = np.concatenate( ( np.expand_dims(sparse_coo.row, 1), np.expand_dims(sparse_coo.col, 1), ), 1, ) v = tf.SparseTensor( indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape, ) v._keras_shape = sparse_coo.shape return v v = tf.Variable( value, dtype=tf.as_dtype(dtype), name=name, constraint=constraint ) if isinstance(value, np.ndarray): v._keras_shape = value.shape elif hasattr(value, "shape"): v._keras_shape = int_shape(value) track_variable(v) return v
84afc5193d38057e2e2badf9c889ea87d80d8fbf
14
backend.py
265
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,150
0
264
173
44
269,521
64
keras
29
keras/backend.py
Python
28
{ "docstring": "Instantiates a variable and returns it.\n\n Args:\n value: Numpy array, initial value of the tensor.\n dtype: Tensor type.\n name: Optional name string for the tensor.\n constraint: Optional projection function to be\n applied to the variable after an optimizer update.\n\n Returns:\n A variable instance (with Keras metadata included).\n\n Examples:\n\n >>> val = np.array([[1, 2], [3, 4]])\n >>> kvar = tf.keras.backend.variable(value=val, dtype='float64',\n ... name='example_var')\n >>> tf.keras.backend.dtype(kvar)\n 'float64'\n >>> print(kvar)\n <tf.Variable 'example_var:...' shape=(2, 2) dtype=float64, numpy=\n array([[1., 2.],\n [3., 4.]])>\n\n ", "language": "en", "n_whitespaces": 206, "n_words": 77, "vocab_size": 66 }
https://github.com/keras-team/keras.git
1
test_conversation_chain_errors_bad_variable
def test_conversation_chain_errors_bad_variable() -> None: llm = FakeLLM() prompt = PromptTemplate(input_variables=["foo"], template="{foo}") memory = ConversationBufferMemory(dynamic_key="foo") with pytest.raises(ValueError): ConversationChain(llm=llm, prompt=prompt, memory=memory, input_key="foo") @pytest.mark.parametrize( "memory", [ ConversationBufferMemory(dynamic_key="baz"), ConversationSummaryMemory(llm=FakeLLM(), dynamic_key="baz"), ], )
a408ed3ea39dfa47e8b522a9e153b259f25df54e
@pytest.mark.parametrize( "memory", [ ConversationBufferMemory(dynamic_key="baz"), ConversationSummaryMemory(llm=FakeLLM(), dynamic_key="baz"), ], )
12
test_conversation.py
161
Samantha/add conversation chain (#166) Add MemoryChain and ConversationChain as chains that take a docstore in addition to the prompt, and use the docstore to stuff context into the prompt. This can be used to have an ongoing conversation with a chatbot. Probably needs a bit of refactoring for code quality Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
46,692
1
71
60
26
191,569
28
langchain
18
tests/unit_tests/chains/test_conversation.py
Python
7
{ "docstring": "Test that conversation chain works in basic setting.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/hwchase17/langchain.git
8
register
async def register(cls): for field in cls.__fields__.values(): if Block.is_block_class(field.type_): await field.type_.register() if get_origin(field.type_) is Union: for type in get_args(field.type_): if Block.is_block_class(type): await type.register()
1c74cd08aaa8eb7759490fc156abb18f916b0764
16
core.py
116
Renames method from install to register
11,452
0
131
178
18
56,177
23
prefect
12
src/prefect/blocks/core.py
Python
27
{ "docstring": "\n Makes block available for configuration with current Orion server.\n Recursively registers all nested blocks. Registration is idempotent.\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 17 }
https://github.com/PrefectHQ/prefect.git
14
get_input
def get_input(self, field_name, **kwargs): if self.credential_type.kind != 'external' and field_name in self.dynamic_input_fields: return self._get_dynamic_input(field_name) if field_name in self.credential_type.secret_fields: try: return decrypt_field(self, field_name) except AttributeError: for field in self.credential_type.inputs.get('fields', []): if field['id'] == field_name and 'default' in field: return field['default'] if 'default' in kwargs: return kwargs['default'] raise AttributeError(field_name) if field_name in self.inputs: return self.inputs[field_name] if 'default' in kwargs: return kwargs['default'] for field in self.credential_type.inputs.get('fields', []): if field['id'] == field_name and 'default' in field: return field['default'] raise AttributeError(field_name)
871175f97fb177ad3c6d6f150e2b685dc11893ce
16
__init__.py
279
Sending field_name in AttributeError
17,314
0
327
166
35
82,077
76
awx
14
awx/main/models/credential/__init__.py
Python
21
{ "docstring": "\n Get an injectable and decrypted value for an input field.\n\n Retrieves the value for a given credential input field name. Return\n values for secret input fields are decrypted. If the credential doesn't\n have an input value defined for the given field name, an AttributeError\n is raised unless a default value is provided.\n\n :param field_name(str): The name of the input field.\n :param default(optional[str]): A default return value to use.\n ", "language": "en", "n_whitespaces": 132, "n_words": 68, "vocab_size": 43 }
https://github.com/ansible/awx.git
1
synchronized_update_sequences
def synchronized_update_sequences(self) -> tuple[str, str]: return ( self._synchronized_update_start_sequence(), self._synchronized_update_end_sequence(), )
d14659c1a3760eade2dd3479b66eb8b2e7711db0
8
_terminal_features.py
45
[terminal buffering] Add support for the "mode 2026" That task is definitely way more complicated that it seemed to be 😅
44,251
0
53
28
10
183,562
10
textual
6
src/textual/_terminal_features.py
Python
14
{ "docstring": "\n Returns the ANSI sequence that we should send to the terminal to tell it that\n it should buffer the content we're about to send, as well as the ANIS sequence to end the buffering.\n If the terminal doesn't seem to support synchronised updates both strings will be empty.\n\n Returns:\n tuple[str, str]: the start and end ANSI sequences, respectively. They will both be empty strings\n if the terminal emulator doesn't seem to support the \"synchronised updates\" mode.\n ", "language": "en", "n_whitespaces": 138, "n_words": 76, "vocab_size": 47 }
https://github.com/Textualize/textual.git
8
do_for
def do_for(parser, token): bits = token.split_contents() if len(bits) < 4: raise TemplateSyntaxError( "'for' statements should have at least four words: %s" % token.contents ) is_reversed = bits[-1] == 'reversed' in_index = -3 if is_reversed else -2 if bits[in_index] != 'in': raise TemplateSyntaxError("'for' statements should use the format" " 'for x in y': %s" % token.contents) invalid_chars = frozenset((' ', '"', "'", FILTER_SEPARATOR)) loopvars = re.split(r' *, *', ' '.join(bits[1:in_index])) for var in loopvars: if not var or not invalid_chars.isdisjoint(var): raise TemplateSyntaxError( "'for' tag received an invalid argument: %s" % token.contents ) sequence = parser.compile_filter(bits[in_index + 1]) nodelist_loop = parser.parse(('empty', 'endfor',)) token = parser.next_token() if token.contents == 'empty': nodelist_empty = parser.parse(('endfor',)) parser.delete_first_token() else: nodelist_empty = None return ForNode(loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty)
c5cd8783825b5f6384417dac5f3889b4210b7d08
13
defaulttags.py
343
Refs #33476 -- Refactored problematic code before reformatting by Black. In these cases Black produces unexpected results, e.g. def make_random_password( self, length=10, allowed_chars='abcdefghjkmnpqrstuvwxyz' 'ABCDEFGHJKLMNPQRSTUVWXYZ' '23456789', ): or cursor.execute(""" SELECT ... """, [table name], )
50,268
0
296
203
89
203,240
121
django
27
django/template/defaulttags.py
Python
27
{ "docstring": "\n Loop over each item in an array.\n\n For example, to display a list of athletes given ``athlete_list``::\n\n <ul>\n {% for athlete in athlete_list %}\n <li>{{ athlete.name }}</li>\n {% endfor %}\n </ul>\n\n You can loop over a list in reverse by using\n ``{% for obj in list reversed %}``.\n\n You can also unpack multiple values from a two-dimensional array::\n\n {% for key,value in dict.items %}\n {{ key }}: {{ value }}\n {% endfor %}\n\n The ``for`` tag can take an optional ``{% empty %}`` clause that will\n be displayed if the given array is empty or could not be found::\n\n <ul>\n {% for athlete in athlete_list %}\n <li>{{ athlete.name }}</li>\n {% empty %}\n <li>Sorry, no athletes in this list.</li>\n {% endfor %}\n <ul>\n\n The above is equivalent to -- but shorter, cleaner, and possibly faster\n than -- the following::\n\n <ul>\n {% if athlete_list %}\n {% for athlete in athlete_list %}\n <li>{{ athlete.name }}</li>\n {% endfor %}\n {% else %}\n <li>Sorry, no athletes in this list.</li>\n {% endif %}\n </ul>\n\n The for loop sets a number of variables available within the loop:\n\n ========================== ================================================\n Variable Description\n ========================== ================================================\n ``forloop.counter`` The current iteration of the loop (1-indexed)\n ``forloop.counter0`` The current iteration of the loop (0-indexed)\n ``forloop.revcounter`` The number of iterations from the end of the\n loop (1-indexed)\n ``forloop.revcounter0`` The number of iterations from the end of the\n loop (0-indexed)\n ``forloop.first`` True if this is the first time through the loop\n ``forloop.last`` True if this is the last time through the loop\n ``forloop.parentloop`` For nested loops, this is the loop \"above\" the\n current one\n ========================== ================================================\n ", "language": "en", "n_whitespaces": 764, "n_words": 262, "vocab_size": 121 }
https://github.com/django/django.git
2
set_global_relative_scale_factor
def set_global_relative_scale_factor(self, scale_factor, reference_quantity): from sympy.physics.units import UnitSystem scale_factor = sympify(scale_factor) if isinstance(scale_factor, Prefix): self._is_prefixed = True # replace all prefixes by their ratio to canonical units: scale_factor = scale_factor.replace( lambda x: isinstance(x, Prefix), lambda x: x.scale_factor ) scale_factor = sympify(scale_factor) UnitSystem._quantity_scale_factors_global[self] = (scale_factor, reference_quantity) UnitSystem._quantity_dimensional_equivalence_map_global[self] = reference_quantity
40a89803dbe877edc8ab6672819715f959273e60
11
quantities.py
133
feat(physics.units): add `is_prefixed` property to `Quantity`
48,646
0
151
86
38
197,624
48
sympy
16
sympy/physics/units/quantities.py
Python
12
{ "docstring": "\n Setting a scale factor that is valid across all unit system.\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 11 }
https://github.com/sympy/sympy.git
14
validate_expense_against_budget
def validate_expense_against_budget(args): args = frappe._dict(args) if args.get("company") and not args.fiscal_year: args.fiscal_year = get_fiscal_year(args.get("posting_date"), company=args.get("company"))[0] frappe.flags.exception_approver_role = frappe.get_cached_value( "Company", args.get("company"), "exception_budget_approver_role" ) if not args.account: args.account = args.get("expense_account") if not (args.get("account") and args.get("cost_center")) and args.item_code: args.cost_center, args.account = get_item_details(args) if not args.account: return for budget_against in ["project", "cost_center"] + get_accounting_dimensions(): if ( args.get(budget_against) and args.account and frappe.db.get_value("Account", {"name": args.account, "root_type": "Expense"}) ): doctype = frappe.unscrub(budget_against) if frappe.get_cached_value("DocType", doctype, "is_tree"): lft, rgt = frappe.db.get_value(doctype, args.get(budget_against), ["lft", "rgt"]) condition = % ( doctype, lft, rgt, budget_against, ) # nosec args.is_tree = True else: condition = "and b.%s=%s" % (budget_against, frappe.db.escape(args.get(budget_against))) args.is_tree = False args.budget_against_field = budget_against args.budget_against_doctype = doctype budget_records = frappe.db.sql( .format( condition=condition, budget_against_field=budget_against ), (args.fiscal_year, args.account), as_dict=True, ) # nosec if budget_records: validate_budget_records(args, budget_records)
494bd9ef78313436f0424b918f200dab8fc7c20b
19
budget.py
538
style: format code with black
13,727
0
83
324
82
64,811
123
erpnext
33
erpnext/accounts/doctype/budget/budget.py
Python
59
{ "docstring": "and exists(select name from `tab%s`\n\t\t\t\t\twhere lft<=%s and rgt>=%s and name=b.%s)\n\t\t\t\tselect\n\t\t\t\t\tb.{budget_against_field} as budget_against, ba.budget_amount, b.monthly_distribution,\n\t\t\t\t\tifnull(b.applicable_on_material_request, 0) as for_material_request,\n\t\t\t\t\tifnull(applicable_on_purchase_order, 0) as for_purchase_order,\n\t\t\t\t\tifnull(applicable_on_booking_actual_expenses,0) as for_actual_expenses,\n\t\t\t\t\tb.action_if_annual_budget_exceeded, b.action_if_accumulated_monthly_budget_exceeded,\n\t\t\t\t\tb.action_if_annual_budget_exceeded_on_mr, b.action_if_accumulated_monthly_budget_exceeded_on_mr,\n\t\t\t\t\tb.action_if_annual_budget_exceeded_on_po, b.action_if_accumulated_monthly_budget_exceeded_on_po\n\t\t\t\tfrom\n\t\t\t\t\t`tabBudget` b, `tabBudget Account` ba\n\t\t\t\twhere\n\t\t\t\t\tb.name=ba.parent and b.fiscal_year=%s\n\t\t\t\t\tand ba.account=%s and b.docstatus=1\n\t\t\t\t\t{condition}\n\t\t\t", "language": "en", "n_whitespaces": 33, "n_words": 49, "vocab_size": 38 }
https://github.com/frappe/erpnext.git
5
load_macros
def load_macros(self, version): vsbase = r"Software\Microsoft\VisualStudio\%0.1f" % version self.set_macro("VCInstallDir", vsbase + r"\Setup\VC", "productdir") self.set_macro("VSInstallDir", vsbase + r"\Setup\VS", "productdir") net = r"Software\Microsoft\.NETFramework" self.set_macro("FrameworkDir", net, "installroot") try: if version > 7.0: self.set_macro("FrameworkSDKDir", net, "sdkinstallrootv1.1") else: self.set_macro("FrameworkSDKDir", net, "sdkinstallroot") except KeyError as exc: # raise DistutilsPlatformError( ) p = r"Software\Microsoft\NET Framework Setup\Product" for base in HKEYS: try: h = RegOpenKeyEx(base, p) except RegError: continue key = RegEnumKey(h, 0) d = read_values(base, r"%s\%s" % (p, key)) self.macros["$(FrameworkVersion)"] = d["version"]
8198943edd73a363c266633e1aa5b2a9e9c9f526
13
msvccompiler.py
255
add python 3.10.4 for windows
56,835
0
296
151
58
222,984
75
XX-Net
20
python3.10.4/Lib/distutils/msvccompiler.py
Python
26
{ "docstring": "Python was built with Visual Studio 2003;\nextensions must be built with a compiler than can generate compatible binaries.\nVisual Studio 2003 was not found on this system. If you have Cygwin installed,\nyou can try compiling with MingW32, by passing \"-c mingw32\" to setup.py.", "language": "en", "n_whitespaces": 41, "n_words": 45, "vocab_size": 37 }
https://github.com/XX-net/XX-Net.git
2
samestat
def samestat(s1, s2): return (s1.st_ino == s2.st_ino and s1.st_dev == s2.st_dev) # Are two filenames really pointing to the same file?
8198943edd73a363c266633e1aa5b2a9e9c9f526
9
genericpath.py
43
add python 3.10.4 for windows
54,829
0
37
26
20
217,516
21
XX-Net
5
python3.10.4/Lib/genericpath.py
Python
3
{ "docstring": "Test whether two stat buffers reference the same file", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/XX-net/XX-Net.git
11
batch
def batch(_func=None, max_batch_size=10, batch_wait_timeout_s=0.0): # `_func` will be None in the case when the decorator is parametrized. # See the comment at the end of this function for a detailed explanation. if _func is not None: if not callable(_func): raise TypeError( "@serve.batch can only be used to decorate functions or methods." ) if not iscoroutinefunction(_func): raise TypeError("Functions decorated with @serve.batch must be 'async def'") if not isinstance(max_batch_size, int): if isinstance(max_batch_size, float) and max_batch_size.is_integer(): max_batch_size = int(max_batch_size) else: raise TypeError("max_batch_size must be integer >= 1") if max_batch_size < 1: raise ValueError("max_batch_size must be an integer >= 1") if not isinstance(batch_wait_timeout_s, (float, int)): raise TypeError("batch_wait_timeout_s must be a float >= 0") if batch_wait_timeout_s < 0: raise ValueError("batch_wait_timeout_s must be a float >= 0")
e5a8b1dd55817305ac85237cd99d8b6ed23df294
13
batching.py
206
[Serve] Add API Annotations And Move to _private (#27058)
28,055
0
264
136
78
126,078
121
ray
12
python/ray/serve/batching.py
Python
21
{ "docstring": "Converts a function to asynchronously handle batches.\n\n The function can be a standalone function or a class method. In both\n cases, the function must be `async def` and take a list of objects as\n its sole argument and return a list of the same length as a result.\n\n When invoked, the caller passes a single object. These will be batched\n and executed asynchronously once there is a batch of `max_batch_size`\n or `batch_wait_timeout_s` has elapsed, whichever occurs first.\n\n Example:\n >>> from ray import serve\n >>> @serve.batch(max_batch_size=50, batch_wait_timeout_s=0.5) # doctest: +SKIP\n ... async def handle_batch(batch: List[str]): # doctest: +SKIP\n ... return [s.lower() for s in batch] # doctest: +SKIP\n\n >>> async def handle_single(s: str): # doctest: +SKIP\n ... # Returns s.lower().\n ... return await handle_batch(s) # doctest: +SKIP\n\n Arguments:\n max_batch_size: the maximum batch size that will be executed in\n one call to the underlying function.\n batch_wait_timeout_s: the maximum duration to wait for\n `max_batch_size` elements before running the underlying function.\n ", "language": "en", "n_whitespaces": 253, "n_words": 157, "vocab_size": 97 }
https://github.com/ray-project/ray.git
3
convert_example
def convert_example(example, tokenizer, max_seq_length=512, is_test=False): text_a = example['text_a'] text_b = example.get('text_b', None) text_a = _tokenize_special_chars(_normalize(text_a)) if text_b is not None: text_b = _tokenize_special_chars(_normalize(text_b)) encoded_inputs = tokenizer( text=text_a, text_pair=text_b, max_seq_len=max_seq_length, return_position_ids=True) input_ids = encoded_inputs['input_ids'] token_type_ids = encoded_inputs['token_type_ids'] position_ids = encoded_inputs['position_ids'] if is_test: return input_ids, token_type_ids, position_ids label = np.array([example['label']], dtype='int64') return input_ids, token_type_ids, position_ids, label
15f0aa8f4515ae6cf2ee3ef71f90d533bc9e61b2
12
utils.py
205
[ehealth] add sequence classification example
118,112
0
132
128
37
322,284
54
PaddleNLP
22
examples/biomedical/cblue/sequence_classification/utils.py
Python
18
{ "docstring": "\n Builds model inputs from a sequence or a pair of sequences for sequence\n classification tasks by concatenating and adding special tokens. And\n creates a mask from the two sequences for sequence-pair classification\n tasks.\n\n The convention in Electra/EHealth is:\n\n - single sequence:\n input_ids: ``[CLS] X [SEP]``\n token_type_ids: `` 0 0 0``\n position_ids: `` 0 1 2``\n\n - a senquence pair:\n input_ids: ``[CLS] X [SEP] Y [SEP]``\n token_type_ids: `` 0 0 0 1 1``\n position_ids: `` 0 1 2 3 4``\n\n Args:\n example (obj:`dict`):\n A dictionary of input data, containing text and label if it has.\n tokenizer (obj:`PretrainedTokenizer`):\n A tokenizer inherits from :class:`paddlenlp.transformers.PretrainedTokenizer`.\n Users can refer to the superclass for more information.\n max_seq_length (obj:`int`):\n The maximum total input sequence length after tokenization.\n Sequences longer will be truncated, and the shorter will be padded.\n is_test (obj:`bool`, default to `False`):\n Whether the example contains label or not.\n\n Returns:\n input_ids (obj:`list[int]`):\n The list of token ids.\n token_type_ids (obj:`list[int]`):\n List of sequence pair mask.\n position_ids (obj:`list[int]`):\n List of position ids.\n label(obj:`numpy.array`, data type of int64, optional):\n The input label if not is_test.\n ", "language": "en", "n_whitespaces": 457, "n_words": 176, "vocab_size": 116 }
https://github.com/PaddlePaddle/PaddleNLP.git
4
accepted_pairs
def accepted_pairs(self) -> List[Dict[str, Any]]: final = [] for pair, info in self._cached_pairs.items(): if (info.expectancy > float(self.edge_config.get('minimum_expectancy', 0.2)) and info.winrate > float(self.edge_config.get('minimum_winrate', 0.60))): final.append({ 'Pair': pair, 'Winrate': info.winrate, 'Expectancy': info.expectancy, 'Stoploss': info.stoploss, }) return final
6024fa482e1b09bae8e85b3afc9fc58e483c1512
15
edge_positioning.py
168
Use brackets to break IF lines
34,375
0
199
107
32
149,135
35
freqtrade
18
freqtrade/edge/edge_positioning.py
Python
15
{ "docstring": "\n return a list of accepted pairs along with their winrate, expectancy and stoploss\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 13 }
https://github.com/freqtrade/freqtrade.git
1
get_remote_url
def get_remote_url(cls, location): # type: (str) -> str raise NotImplementedError
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
6
versioncontrol.py
19
upd; format
12,560
0
31
10
10
61,416
10
transferlearning
4
.venv/lib/python3.8/site-packages/pip/_internal/vcs/versioncontrol.py
Python
2
{ "docstring": "\n Return the url used at location\n\n Raises RemoteNotFoundError if the repository does not have a remote\n url configured.\n ", "language": "en", "n_whitespaces": 47, "n_words": 18, "vocab_size": 16 }
https://github.com/jindongwang/transferlearning.git
12
answer_drop_tables
def answer_drop_tables(self, statement): if statement.if_exists is False: for table in statement.tables: if len(table.parts) > 1: db_name = table.parts[0] else: db_name = self.session.database if db_name not in ['files', 'mindsdb']: raise SqlApiException(f"Cannot delete a table from database '{db_name}'") table_name = table.parts[-1] dn = self.session.datahub[db_name] if dn.has_table(table_name) is False: raise SqlApiException(f"Cannot delete a table from database '{db_name}': table does not exists") for table in statement.tables: if len(table.parts) > 1: db_name = table.parts[0] else: db_name = self.session.database if db_name not in ['files', 'mindsdb']: raise SqlApiException(f"Cannot delete a table from database '{db_name}'") table_name = table.parts[-1] dn = self.session.datahub[db_name] if dn.has_table(table_name): if db_name == 'mindsdb': self.session.datahub['mindsdb'].delete_predictor(table_name) elif db_name == 'files': self.session.data_store.delete_file(table_name) return SQLAnswer(ANSWER_TYPE.OK)
01b47406a29d17781356badb20f49f2fdc24d00e
16
mysql_proxy.py
366
'drop table' query
25,199
0
491
217
46
114,473
107
mindsdb
22
mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
Python
28
{ "docstring": " answer on 'drop table [if exists] {name}'\n Args:\n statement: ast\n ", "language": "en", "n_whitespaces": 44, "n_words": 10, "vocab_size": 10 }
https://github.com/mindsdb/mindsdb.git
2
validate
def validate(self, num_steps=None, profile=False, reduce_results=True, info=None): worker_stats = self.worker_group.validate( num_steps=num_steps, profile=profile, info=info ) if reduce_results: return self._process_stats(worker_stats) else: return worker_stats
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
9
torch_trainer.py
85
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,985
0
88
56
18
133,353
20
ray
9
python/ray/util/sgd/torch/torch_trainer.py
Python
8
{ "docstring": "Evaluates the model on the validation data set.\n\n Args:\n num_steps (int): Number of batches to compute update steps on\n per worker. This corresponds also to the number of times\n ``TrainingOperator.validate_batch`` is called per worker.\n profile (bool): Returns time stats for the evaluation procedure.\n reduce_results (bool): Whether to average all metrics across\n all workers into one dict. If a metric is a non-numerical\n value (or nested dictionaries), one value will be randomly\n selected among the workers. If False, returns a list of dicts.\n info (dict): Optional dictionary passed to the training\n operator for `validate` and `validate_batch`.\n\n Returns:\n A dictionary of metrics for validation.\n You can provide custom metrics by passing in a custom\n ``training_operator_cls``.\n ", "language": "en", "n_whitespaces": 309, "n_words": 113, "vocab_size": 84 }
https://github.com/ray-project/ray.git
2
_Net_set_input_arrays
def _Net_set_input_arrays(self, data, labels): if labels.ndim == 1: labels = np.ascontiguousarray(labels[:, np.newaxis, np.newaxis, np.newaxis]) return self._set_input_arrays(data, labels)
cc4d0564756ca067516f71718a3d135996525909
12
pycaffe.py
74
Balanced joint maximum mean discrepancy for deep transfer learning
12,064
0
77
49
16
60,284
17
transferlearning
9
code/deep/BJMMD/caffe/python/caffe/pycaffe.py
Python
5
{ "docstring": "\n Set input arrays of the in-memory MemoryDataLayer.\n (Note: this is only for networks declared with the memory data layer.)\n ", "language": "en", "n_whitespaces": 29, "n_words": 19, "vocab_size": 18 }
https://github.com/jindongwang/transferlearning.git
1
test_multi_trial_reuse
def test_multi_trial_reuse(ray_start_4_cpus_extra): os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "2" register_trainable("foo2", MyResettableClass) # We sleep here for one second so that the third actor # does not finish training before the fourth can be scheduled. # This helps ensure that both remote runners are re-used and # not just one. [trial1, trial2, trial3, trial4] = tune.run( "foo2", config={ "message": tune.grid_search(["First", "Second", "Third", "Fourth"]), "id": -1, "sleep": 2, }, reuse_actors=True, resources_per_trial={"cpu": 2}, ).trials assert trial3.last_result["num_resets"] == 1 assert trial4.last_result["num_resets"] == 1
1510fb2cd631b2776092fb45ee4082e5e65f16f8
16
test_actor_reuse.py
175
[air/tune] Internal resource management 2 - Ray Tune to use new Ray AIR resource manager (#30016) Includes/depends on #30777 TLDR: This PR refactors Ray Tune's resource management to use a central AIR resource management package instead of the tightly coupled PlacementGroupManager. Ray Tune's resource management currently uses a tightly coupled placement group manager. This leads to a number of shortcomings: - The tight coupling on the manager side (e.g. PG manager keeps track of trials) prevents re-usability - The tight coupling on the trial executor side prevents using different resource management strategies (e.g. shared or budget-based) - It's hard to test independently. Tests for the resource management require a simulated tune setup. To improve stability, extensibility, and maintainability, this PR moves the resource management logic into a central `ray.air.execution.resources` subpackage. The resource management has a simple API that works with `ResourceRequest`s and `AllocatedResources` to manage requested and assigned resources, respectively. The actual resource management can then be anything - per default it is a placement group based manager, but this PR also introduces a PoC budget-based manager that can be plugged in. The PR does not substantially change existing tests, so we can be certain that the new resource model is a fully compatible replacement for the old placement group manager. Signed-off-by: Kai Fricke <kai@anyscale.com>
31,311
0
176
100
65
138,087
75
ray
18
python/ray/tune/tests/test_actor_reuse.py
Python
15
{ "docstring": "Test that actors from multiple trials running in parallel will be reused.\n\n - 2 trials can run at the same time\n - Trial 3 will be scheduled after trial 1 succeeded, so will reuse actor\n - Trial 4 will be scheduled after trial 2 succeeded, so will reuse actor\n ", "language": "en", "n_whitespaces": 61, "n_words": 49, "vocab_size": 31 }
https://github.com/ray-project/ray.git
1
test_get_dynamic_sampling_default_biases
def test_get_dynamic_sampling_default_biases(self): with Feature( { self.new_ds_flag: True, } ): response = self.get_success_response( self.organization.slug, self.project.slug, method="get" ) assert response.data["dynamicSamplingBiases"] == DEFAULT_BIASES
6fc6106b6a57149a5bae3c0f4677349cfbae1155
12
test_project_details.py
84
fix(dyn-sampling): Backend code clean up (#42001) We are consolidating server-side-sampling and dynamic-sampling flags into only dynamic-sampling. The flag is being controlled by plan
18,532
0
126
50
20
89,350
20
sentry
12
tests/sentry/api/endpoints/test_project_details.py
Python
10
{ "docstring": "\n Tests the case when organization on AM2 plan, but haven't manipulated the bias toggles\n yet, so they get the default biases.\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 19 }
https://github.com/getsentry/sentry.git
6
remove_widget
def remove_widget(self, widget): if widget not in self.children: return self.children.remove(widget) if widget.canvas in self.canvas.children: self.canvas.remove(widget.canvas) elif widget.canvas in self.canvas.after.children: self.canvas.after.remove(widget.canvas) elif widget.canvas in self.canvas.before.children: self.canvas.before.remove(widget.canvas) for type_id in widget.motion_filter: self.unregister_for_motion_event(type_id, widget) widget.funbind('motion_filter', self._update_motion_filter) widget.parent = None widget.dec_disabled(self._disabled_count)
1830123ba3edf7290b7c6cb1c6f406ccf1d0e5d4
12
widget.py
213
Feature: EventManagerBase (#7658) * Added EventManagerBase class and event_managers attribute to WindowBase class. * Added on_motion event to Widget class. * Updated post_dispatch_input in EventLoopBase to skip non-touch events. * Using type ids in MouseMotionEventProvider. * Added on_motion method to Widget subclasses. * Updated Widget.on_motion method to dispatch to filtered widgets if 'pos' is not in me.profile. * Changed motion_filter property in Widget to store key to list values. * Updated Widget.on_motion to not dispatch event to children if widget is disabled. * Widget: Using flags to control dispatching in on_motion method. * Widget: Don't dispatch on_motion to children if only self is registered. * Widget: Removed collision on disabled check from on_motion method. * Widget: Added docstrings for motion_filter and related methods. * EventManager: Moved motion event flags to eventmanager/__init__.py module. * ScreenManager: Overrode the on_motion method. * WindowBase: Using attributes event_managers and event_managers_dict. * WindowBase: Added doc for register_event_manager and unregister_event_manager methods. * Widget: Improved default dispatch to stop after the last registered widgets. * EventManagerBase: Added initial docs class and module. * Widget: Added experimental warnings to motion_filter property and to on_motion and (un)register_for_motion_event methods. * WindowBase: Added docs for event_managers and event_managers_dict attributes. * MotionEvent: Added type_id and flags to push_attrs list. * EventManagerBase: Added versionadded tag on all flags. * EventManagerBase: Use dispatch modes instead of flags.
46,984
0
162
134
29
194,460
37
kivy
16
kivy/uix/widget.py
Python
15
{ "docstring": "Remove a widget from the children of this widget.\n\n :Parameters:\n `widget`: :class:`Widget`\n Widget to remove from our children list.\n\n .. code-block:: python\n\n >>> from kivy.uix.button import Button\n >>> root = Widget()\n >>> button = Button()\n >>> root.add_widget(button)\n >>> root.remove_widget(button)\n ", "language": "en", "n_whitespaces": 117, "n_words": 39, "vocab_size": 31 }
https://github.com/kivy/kivy.git
3
mixin_base_ppr_parser
def mixin_base_ppr_parser(parser): mixin_essential_parser(parser) gp = add_arg_group(parser, title='Base Deployment') gp.add_argument( '--extra-search-paths', type=str, default=[], nargs='*', help='Extra search paths to be used when loading modules and finding YAML config files.' if _SHOW_ALL_ARGS else argparse.SUPPRESS, ) gp.add_argument( '--timeout-ctrl', type=int, default=int(os.getenv('JINA_DEFAULT_TIMEOUT_CTRL', '60')), help='The timeout in milliseconds of the control request, -1 for waiting forever', ) parser.add_argument( '--k8s-namespace', type=str, help='Name of the namespace where Kubernetes deployment should be deployed, to be filled by flow name' if _SHOW_ALL_ARGS else argparse.SUPPRESS, ) gp.add_argument( '--polling', type=str, default=PollingType.ANY.name, help=, )
a3b71c7208b3cd48aa7bc978c3343a074947e3d9
13
base.py
202
fix(parsers): clearify flow args (#4701)
2,215
0
253
123
64
12,207
80
jina
21
jina/parsers/orchestrate/base.py
Python
41
{ "docstring": "Mixing in arguments required by pod/deployment/runtime module into the given parser.\n :param parser: the parser instance to which we add arguments\n \n The polling strategy of the Deployment and its endpoints (when `shards>1`).\n Can be defined for all endpoints of a Deployment or by endpoint.\n Define per Deployment:\n - ANY: only one (whoever is idle) Pod polls the message\n - ALL: all Pods poll the message (like a broadcast)\n Define per Endpoint:\n JSON dict, {endpoint: PollingType}\n {'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}\n \n ", "language": "en", "n_whitespaces": 119, "n_words": 81, "vocab_size": 66 }
https://github.com/jina-ai/jina.git
7
calc
def calc(term): # This part is for reading and converting arithmetic terms. term = term.replace(" ", "") term = term.replace("^", "**") term = term.replace("=", "") term = term.replace("?", "") term = term.replace("%", "/100.00") term = term.replace("rad", "radians") term = term.replace("mod", "%") term = term.replace("aval", "abs") functions = [ "sin", "cos", "tan", "pow", "cosh", "sinh", "tanh", "sqrt", "pi", "radians", "e", ] # This part is for reading and converting function expressions. term = term.lower() for func in functions: if func in term: withmath = "math." + func term = term.replace(func, withmath) try: # here goes the actual evaluating. term = eval(term) # here goes to the error cases. except ZeroDivisionError: print("Can't divide by 0. Please try again.") except NameError: print("Invalid input. Please try again") except AttributeError: print("Please check usage method and try again.") except TypeError: print("please enter inputs of correct datatype ") return term
f0af0c43340763724f139fa68aa1e5a9ffe458b4
12
calculator.py
345
refactor: clean code Signed-off-by: slowy07 <slowy.arfy@gmail.com>
4,373
0
359
182
93
22,594
143
Python
13
calculator.py
Python
38
{ "docstring": "\n input: term of type str\n output: returns the result of the computed term.\n purpose: This function is the actual calculator and the heart of the application\n ", "language": "en", "n_whitespaces": 39, "n_words": 26, "vocab_size": 20 }
https://github.com/geekcomputers/Python.git
1
test_song_from_dict
def test_song_from_dict(): song = Song.from_dict( { "name": "Ropes", "artists": ["Dirty Palm", "Chandler Jewels"], "album_name": "Ropes", "album_artist": "Dirty Palm", "genres": ["gaming edm", "melbourne bounce international"], "disc_number": 1, "duration": 188, "year": 2021, "date": "2021-10-28", "track_number": 1, "tracks_count": 1, "isrc": "GB2LD2110301", "song_id": "1t2qKa8K72IBC8yQlhD9bU", "cover_url": "https://i.scdn.co/image/ab67616d0000b273fe2cb38e4d2412dbb0e54332", "explicit": False, "download_url": None, "artist": "Dirty Palm", "disc_count": 1, "copyright": "", "publisher": "", "url": "https://open.spotify.com/track/1t2qKa8K72IBC8yQlhD9bU", } ) assert song.name == "Ropes" assert song.artists == ["Dirty Palm", "Chandler Jewels"] assert song.album_name == "Ropes" assert song.album_artist == "Dirty Palm" assert song.genres == ["gaming edm", "melbourne bounce international"] assert song.disc_number == 1 assert song.duration == 188 assert song.year == 2021 assert song.date == "2021-10-28" assert song.track_number == 1 assert song.tracks_count == 1 assert song.isrc == "GB2LD2110301" assert song.song_id == "1t2qKa8K72IBC8yQlhD9bU" assert ( song.cover_url == "https://i.scdn.co/image/ab67616d0000b273fe2cb38e4d2412dbb0e54332" ) assert song.explicit == False
fa2ad657482aca9dc628e6d7062b8badf2706bb6
12
test_song.py
384
v4 init
5,349
0
445
206
81
30,148
129
spotify-downloader
19
tests/types/test_song.py
Python
44
{ "docstring": "\n Tests if Song.from_dict() works correctly.\n ", "language": "en", "n_whitespaces": 12, "n_words": 5, "vocab_size": 5 }
https://github.com/spotDL/spotify-downloader.git
3
_get_states_count_upstream_ti
def _get_states_count_upstream_ti(ti, finished_tis): counter = Counter(task.state for task in finished_tis if task.task_id in ti.task.upstream_task_ids) return ( counter.get(State.SUCCESS, 0), counter.get(State.SKIPPED, 0), counter.get(State.FAILED, 0), counter.get(State.UPSTREAM_FAILED, 0), sum(counter.values()), )
99f86ccfa59df6aa1aa33afce5e0b66dd5df9a3d
12
trigger_rule_dep.py
127
Some refactoring work on scheduling code (#21414)
8,298
0
109
86
22
44,559
26
airflow
17
airflow/ti_deps/deps/trigger_rule_dep.py
Python
9
{ "docstring": "\n This function returns the states of the upstream tis for a specific ti in order to determine\n whether this ti can run in this iteration\n\n :param ti: the ti that we want to calculate deps for\n :param finished_tis: all the finished tasks of the dag_run\n ", "language": "en", "n_whitespaces": 81, "n_words": 45, "vocab_size": 33 }
https://github.com/apache/airflow.git
2
fit
def fit(self, X, y=None): # Validating the scalar parameters. check_scalar( self.threshold, "threshold", target_type=numbers.Real, min_val=0.0, include_boundaries="neither", ) check_scalar( self.branching_factor, "branching_factor", target_type=numbers.Integral, min_val=1, include_boundaries="neither", ) if isinstance(self.n_clusters, numbers.Number): check_scalar( self.n_clusters, "n_clusters", target_type=numbers.Integral, min_val=1, ) # TODO: Remove deprecated flags in 1.2 self._deprecated_fit, self._deprecated_partial_fit = True, False return self._fit(X, partial=False)
ee5a1b69d1dfa99635a10f0a5b54ec263cedf866
11
_birch.py
171
DOC, MNT Typos found by codespell (#22906)
75,706
0
309
113
39
259,315
47
scikit-learn
20
sklearn/cluster/_birch.py
Python
24
{ "docstring": "\n Build a CF Tree for the input data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Input data.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self\n Fitted estimator.\n ", "language": "en", "n_whitespaces": 135, "n_words": 38, "vocab_size": 35 }
https://github.com/scikit-learn/scikit-learn.git
3
load
def load(self): for key in sorted(self._alignments.data): this_frame_faces = [] for item in self._alignments.data[key]["faces"]: face = DetectedFace() face.from_alignment(item, with_thumb=True) face.load_aligned(None) _ = face.aligned.average_distance # cache the distances this_frame_faces.append(face) self._frame_faces.append(this_frame_faces) self._sorted_frame_names = sorted(self._alignments.data)
23d92c1f0d83ce1cdcc51480cfe37af074a981b3
12
detected_faces.py
149
Bugfixes - Sort - Fix rare help-text parsing bug - Manual - Fix issue where frame count is incorrect when een > 1 used on extract
19,790
0
161
91
26
100,289
31
faceswap
19
tools/manual/detected_faces.py
Python
11
{ "docstring": " Load the faces from the alignments file, convert to\n :class:`~lib.align.DetectedFace`. objects and add to :attr:`_frame_faces`. ", "language": "en", "n_whitespaces": 23, "n_words": 15, "vocab_size": 13 }
https://github.com/deepfakes/faceswap.git
1
get_assessment_criteria
def get_assessment_criteria(course): return frappe.get_all( "Course Assessment Criteria", fields=["assessment_criteria", "weightage"], filters={"parent": course}, order_by="idx", ) @frappe.whitelist()
494bd9ef78313436f0424b918f200dab8fc7c20b
@frappe.whitelist()
11
api.py
72
style: format code with black
14,035
1
6
34
14
65,846
14
erpnext
8
erpnext/education/api.py
Python
7
{ "docstring": "Returns Assessmemt Criteria and their Weightage from Course Master.\n\n\t:param Course: Course\n\t", "language": "en", "n_whitespaces": 10, "n_words": 12, "vocab_size": 11 }
https://github.com/frappe/erpnext.git
7
new_compiler
def new_compiler(plat=None, compiler=None, verbose=0, dry_run=0, force=0): if plat is None: plat = os.name try: if compiler is None: compiler = get_default_compiler(plat) (module_name, class_name, long_description) = compiler_class[compiler] except KeyError: msg = "don't know how to compile C/C++ code on platform '%s'" % plat if compiler is not None: msg = msg + " with '%s' compiler" % compiler raise DistutilsPlatformError(msg) try: module_name = "distutils." + module_name __import__ (module_name) module = sys.modules[module_name] klass = vars(module)[class_name] except ImportError: raise DistutilsModuleError( "can't compile C/C++ code: unable to load module '%s'" % \ module_name) except KeyError: raise DistutilsModuleError( "can't compile C/C++ code: unable to find class '%s' " "in module '%s'" % (class_name, module_name)) # XXX The None is necessary to preserve backwards compatibility # with classes that expect verbose to be the first positional # argument. return klass(None, dry_run, force)
8198943edd73a363c266633e1aa5b2a9e9c9f526
14
ccompiler.py
242
add python 3.10.4 for windows
56,657
0
329
148
83
222,589
136
XX-Net
24
python3.10.4/Lib/distutils/ccompiler.py
Python
26
{ "docstring": "Generate an instance of some CCompiler subclass for the supplied\n platform/compiler combination. 'plat' defaults to 'os.name'\n (eg. 'posix', 'nt'), and 'compiler' defaults to the default compiler\n for that platform. Currently only 'posix' and 'nt' are supported, and\n the default compilers are \"traditional Unix interface\" (UnixCCompiler\n class) and Visual C++ (MSVCCompiler class). Note that it's perfectly\n possible to ask for a Unix compiler object under Windows, and a\n Microsoft compiler object under Unix -- if you supply a value for\n 'compiler', 'plat' is ignored.\n ", "language": "en", "n_whitespaces": 113, "n_words": 83, "vocab_size": 59 }
https://github.com/XX-net/XX-Net.git
1
register
def register(config_class, feature_extractor_class): FEATURE_EXTRACTOR_MAPPING.register(config_class, feature_extractor_class)
2e11a043374a6229ec129a4765ee4ba7517832b9
7
feature_extraction_auto.py
27
Register feature extractor (#15634) * Rework AutoFeatureExtractor.from_pretrained internal * Custom feature extractor * Add more tests * Add support for custom feature extractor code * Clean up * Add register API to AutoFeatureExtractor
6,418
0
19
16
5
35,173
5
transformers
4
src/transformers/models/auto/feature_extraction_auto.py
Python
2
{ "docstring": "\n Register a new feature extractor for this class.\n\n Args:\n config_class ([`PretrainedConfig`]):\n The configuration corresponding to the model to register.\n feature_extractor_class ([`FeatureExtractorMixin`]): The feature extractor to register.\n ", "language": "en", "n_whitespaces": 85, "n_words": 26, "vocab_size": 20 }
https://github.com/huggingface/transformers.git
8
execute
def execute(): if not frappe.get_all("Pricing Rule", limit=1): return frappe.reload_doc("accounts", "doctype", "pricing_rule_detail") doctypes = { "Supplier Quotation": "buying", "Purchase Order": "buying", "Purchase Invoice": "accounts", "Purchase Receipt": "stock", "Quotation": "selling", "Sales Order": "selling", "Sales Invoice": "accounts", "Delivery Note": "stock", } for doctype, module in doctypes.items(): frappe.reload_doc(module, "doctype", frappe.scrub(doctype)) child_doc = frappe.scrub(doctype) + "_item" frappe.reload_doc(module, "doctype", child_doc, force=True) child_doctype = doctype + " Item" frappe.db.sql( .format( child_doctype=child_doctype ) ) data = frappe.db.sql( .format( child_doc=child_doctype ), as_dict=1, ) values = [] for d in data: values.append( ( d.pricing_rule, d.name, d.parent, "pricing_rules", d.parenttype, d.creation, d.modified, d.docstatus, d.modified_by, d.owner, frappe.generate_hash("", 10), ) ) if values: frappe.db.sql( .format( values=", ".join(["%s"] * len(values)) ), tuple(values), ) frappe.reload_doc("accounts", "doctype", "pricing_rule") for doctype, apply_on in { "Pricing Rule Item Code": "Item Code", "Pricing Rule Item Group": "Item Group", "Pricing Rule Brand": "Brand", }.items(): frappe.reload_doc("accounts", "doctype", frappe.scrub(doctype)) field = frappe.scrub(apply_on) data = frappe.get_all( "Pricing Rule", fields=[field, "name", "creation", "modified", "owner", "modified_by"], filters={"apply_on": apply_on}, ) values = [] for d in data: values.append( ( d.get(field), d.name, parentfield.get(field), "Pricing Rule", d.creation, d.modified, d.owner, d.modified_by, frappe.generate_hash("", 10), ) ) if values: frappe.db.sql( .format( doctype=doctype, field=field, values=", ".join(["%s"] * len(values)) ), tuple(values), )
494bd9ef78313436f0424b918f200dab8fc7c20b
19
update_pricing_rule_fields.py
756
style: format code with black
14,303
0
99
450
106
66,703
188
erpnext
40
erpnext/patches/v12_0/update_pricing_rule_fields.py
Python
100
{ "docstring": " UPDATE `tab{child_doctype}` SET pricing_rules = pricing_rule\n\t\t\tWHERE docstatus < 2 and pricing_rule is not null and pricing_rule != ''\n\t\t SELECT pricing_rule, name, parent,\n\t\t\t\tparenttype, creation, modified, docstatus, modified_by, owner, name\n\t\t\tFROM `tab{child_doc}` where docstatus < 2 and pricing_rule is not null\n\t\t\tand pricing_rule != '' INSERT INTO\n\t\t\t\t`tabPricing Rule Detail` (`pricing_rule`, `child_docname`, `parent`, `parentfield`, `parenttype`,\n\t\t\t\t`creation`, `modified`, `docstatus`, `modified_by`, `owner`, `name`)\n\t\t\tVALUES {values} INSERT INTO\n\t\t\t\t`tab{doctype}` ({field}, parent, parentfield, parenttype, creation, modified,\n\t\t\t\t\towner, modified_by, name)\n\t\t\tVALUES {values} ", "language": "en", "n_whitespaces": 69, "n_words": 77, "vocab_size": 52 }
https://github.com/frappe/erpnext.git
2
test_state_aggregate_option_behavior
def test_state_aggregate_option_behavior(master_opts): minion_opts = salt.config.DEFAULT_MINION_OPTS.copy() possible = [None, True, False, ["pkg"]] expected_result = [ True, False, ["pkg"], True, True, ["pkg"], False, True, ["pkg"], ["pkg"], True, ["pkg"], ] for idx, combo in enumerate(itertools.permutations(possible, 2)): master_opts["state_aggregate"], minion_opts["state_aggregate"] = combo state_obj = salt.state.BaseHighState state_obj.client = MockBaseHighStateClient(master_opts) return_result = state_obj(minion_opts)._BaseHighState__gen_opts(minion_opts) assert expected_result[idx] == return_result["state_aggregate"]
8168b25fe5906883a07de5bfdfefabc6d1f57784
12
test_state_options.py
209
fixes saltstack/salt#61478 state_aggregate minion option not respected
54,314
0
187
132
32
215,998
50
salt
21
tests/pytests/unit/state/test_state_options.py
Python
23
{ "docstring": "\n Ensure state_aggregate can be overridden on the minion\n ", "language": "en", "n_whitespaces": 15, "n_words": 8, "vocab_size": 8 }
https://github.com/saltstack/salt.git
2
register
def register(cls, map_function, reduce_function=None, **kwargs): if reduce_function is None: reduce_function = map_function return cls.call(map_function, reduce_function, **kwargs)
58bbcc37477866d19c8b092a0e1974a4f0baa586
8
tree_reduce.py
54
REFACTOR-#2656: Update modin to fit algebra (code only) (#3717) Co-authored-by: Yaroslav Igoshev <Poolliver868@mail.ru> Co-authored-by: Vasily Litvinov <vasilij.n.litvinov@intel.com> Co-authored-by: Alexey Prutskov <alexey.prutskov@intel.com> Co-authored-by: Devin Petersohn <devin-petersohn@users.noreply.github.com> Signed-off-by: Rehan Durrani <rehan@ponder.io>
35,228
0
48
35
15
153,044
16
modin
6
modin/core/dataframe/algebra/tree_reduce.py
Python
4
{ "docstring": "\n Build TreeReduce function.\n\n Parameters\n ----------\n map_function : callable(pandas.DataFrame) -> [pandas.DataFrame, pandas.Series]\n Source map function.\n reduce_function : callable(pandas.DataFrame) -> pandas.Series, optional\n Source reduce function. If not specified `map_function` will be used.\n **kwargs : dict\n Additional parameters to pass to the builder function.\n\n Returns\n -------\n callable\n Function that takes query compiler and executes passed functions\n with TreeReduce algorithm.\n ", "language": "en", "n_whitespaces": 182, "n_words": 56, "vocab_size": 46 }
https://github.com/modin-project/modin.git
3
trustworthiness
def trustworthiness(X, X_embedded, *, n_neighbors=5, metric="euclidean"): r n_samples = X.shape[0] if n_neighbors >= n_samples / 2: raise ValueError( f"n_neighbors ({n_neighbors}) should be less than n_samples / 2" f" ({n_samples / 2})" ) dist_X = pairwise_distances(X, metric=metric) if metric == "precomputed": dist_X = dist_X.copy() # we set the diagonal to np.inf to exclude the points themselves from # their own neighborhood np.fill_diagonal(dist_X, np.inf) ind_X = np.argsort(dist_X, axis=1) # `ind_X[i]` is the index of sorted distances between i and other samples ind_X_embedded = ( NearestNeighbors(n_neighbors=n_neighbors) .fit(X_embedded) .kneighbors(return_distance=False) ) # We build an inverted index of neighbors in the input space: For sample i, # we define `inverted_index[i]` as the inverted index of sorted distances: # inverted_index[i][ind_X[i]] = np.arange(1, n_sample + 1) inverted_index = np.zeros((n_samples, n_samples), dtype=int) ordered_indices = np.arange(n_samples + 1) inverted_index[ordered_indices[:-1, np.newaxis], ind_X] = ordered_indices[1:] ranks = ( inverted_index[ordered_indices[:-1, np.newaxis], ind_X_embedded] - n_neighbors ) t = np.sum(ranks[ranks > 0]) t = 1.0 - t * ( 2.0 / (n_samples * n_neighbors * (2.0 * n_samples - 3.0 * n_neighbors - 1.0)) ) return t
ade90145c9c660a1a7baf2315185995899b0f356
16
_t_sne.py
352
FIX Raise error when n_neighbors >= n_samples / 2 in manifold.trustworthiness (#23033) Co-authored-by: Shao Yang Hong <hongsy2006@gmail.com> Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com> Co-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>
75,842
0
322
228
115
259,640
173
scikit-learn
32
sklearn/manifold/_t_sne.py
Python
84
{ "docstring": "Expresses to what extent the local structure is retained.\n\n The trustworthiness is within [0, 1]. It is defined as\n\n .. math::\n\n T(k) = 1 - \\frac{2}{nk (2n - 3k - 1)} \\sum^n_{i=1}\n \\sum_{j \\in \\mathcal{N}_{i}^{k}} \\max(0, (r(i, j) - k))\n\n where for each sample i, :math:`\\mathcal{N}_{i}^{k}` are its k nearest\n neighbors in the output space, and every sample j is its :math:`r(i, j)`-th\n nearest neighbor in the input space. In other words, any unexpected nearest\n neighbors in the output space are penalised in proportion to their rank in\n the input space.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features) or (n_samples, n_samples)\n If the metric is 'precomputed' X must be a square distance\n matrix. Otherwise it contains a sample per row.\n\n X_embedded : ndarray of shape (n_samples, n_components)\n Embedding of the training data in low-dimensional space.\n\n n_neighbors : int, default=5\n The number of neighbors that will be considered. Should be fewer than\n `n_samples / 2` to ensure the trustworthiness to lies within [0, 1], as\n mentioned in [1]_. An error will be raised otherwise.\n\n metric : str or callable, default='euclidean'\n Which metric to use for computing pairwise distances between samples\n from the original input space. If metric is 'precomputed', X must be a\n matrix of pairwise distances or squared distances. Otherwise, for a list\n of available metrics, see the documentation of argument metric in\n `sklearn.pairwise.pairwise_distances` and metrics listed in\n `sklearn.metrics.pairwise.PAIRWISE_DISTANCE_FUNCTIONS`. Note that the\n \"cosine\" metric uses :func:`~sklearn.metrics.pairwise.cosine_distances`.\n\n .. versionadded:: 0.20\n\n Returns\n -------\n trustworthiness : float\n Trustworthiness of the low-dimensional embedding.\n\n References\n ----------\n .. [1] Jarkko Venna and Samuel Kaski. 2001. Neighborhood\n Preservation in Nonlinear Projection Methods: An Experimental Study.\n In Proceedings of the International Conference on Artificial Neural Networks\n (ICANN '01). Springer-Verlag, Berlin, Heidelberg, 485-491.\n\n .. [2] Laurens van der Maaten. Learning a Parametric Embedding by Preserving\n Local Structure. Proceedings of the Twelth International Conference on\n Artificial Intelligence and Statistics, PMLR 5:384-391, 2009.\n ", "language": "en", "n_whitespaces": 550, "n_words": 314, "vocab_size": 202 }
https://github.com/scikit-learn/scikit-learn.git